OSDN Git Service

netpoll: Respect NETIF_F_LLTX
authorEric W. Biederman <ebiederm@xmission.com>
Thu, 27 Mar 2014 22:42:20 +0000 (15:42 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 29 Mar 2014 21:58:37 +0000 (17:58 -0400)
Stop taking the transmit lock when a network device has specified
NETIF_F_LLTX.

If no locks needed to trasnmit a packet this is the ideal scenario for
netpoll as all packets can be trasnmitted immediately.

Even if some locks are needed in ndo_start_xmit skipping any unnecessary
serialization is desirable for netpoll as it makes it more likely a
debugging packet may be trasnmitted immediately instead of being
deferred until later.

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netdevice.h
net/core/netpoll.c

index 0d8c871..4cd5e9e 100644 (file)
@@ -2909,6 +2909,11 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
        }                                               \
 }
 
+#define HARD_TX_TRYLOCK(dev, txq)                      \
+       (((dev->features & NETIF_F_LLTX) == 0) ?        \
+               __netif_tx_trylock(txq) :               \
+               true )
+
 #define HARD_TX_UNLOCK(dev, txq) {                     \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
                __netif_tx_unlock(txq);                 \
index d44af23..ed7740f 100644 (file)
@@ -119,17 +119,17 @@ static void queue_process(struct work_struct *work)
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 
                local_irq_save(flags);
-               __netif_tx_lock(txq, smp_processor_id());
+               HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (netif_xmit_frozen_or_stopped(txq) ||
                    netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
-                       __netif_tx_unlock(txq);
+                       HARD_TX_UNLOCK(dev, txq);
                        local_irq_restore(flags);
 
                        schedule_delayed_work(&npinfo->tx_work, HZ/10);
                        return;
                }
-               __netif_tx_unlock(txq);
+               HARD_TX_UNLOCK(dev, txq);
                local_irq_restore(flags);
        }
 }
@@ -345,11 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                /* try until next clock tick */
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
                     tries > 0; --tries) {
-                       if (__netif_tx_trylock(txq)) {
+                       if (HARD_TX_TRYLOCK(dev, txq)) {
                                if (!netif_xmit_stopped(txq))
                                        status = netpoll_start_xmit(skb, dev, txq);
 
-                               __netif_tx_unlock(txq);
+                               HARD_TX_UNLOCK(dev, txq);
 
                                if (status == NETDEV_TX_OK)
                                        break;