diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-09-30 13:03:33 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-05 00:21:54 -0700 |
commit | 0835acfe72e43b2f9bd46ec8c0d219e94c3525e0 (patch) | |
tree | fba23c6f67d2b6c42e70c02bf4b7a4bf25dbc103 /net | |
parent | b3a5b6cc7cab89dcc3301add750f88019d910a2b (diff) |
pktgen: Avoid dirtying skb->users when txq is full
We can avoid two atomic ops on skb->users if packet is not going to be
sent to the device (because hardware txqueue is full)
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/pktgen.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b69455217ed..e856ab0d074 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3441,12 +3441,14 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) txq = netdev_get_tx_queue(odev, queue_map); __netif_tx_lock_bh(txq); - atomic_inc(&(pkt_dev->skb->users)); - if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) + if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { ret = NETDEV_TX_BUSY; - else - ret = (*xmit)(pkt_dev->skb, odev); + pkt_dev->last_ok = 0; + goto unlock; + } + atomic_inc(&(pkt_dev->skb->users)); + ret = (*xmit)(pkt_dev->skb, odev); switch (ret) { case NETDEV_TX_OK: @@ -3468,6 +3470,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) atomic_dec(&(pkt_dev->skb->users)); pkt_dev->last_ok = 0; } +unlock: __netif_tx_unlock_bh(txq); /* If pkt_dev->count is zero, then run forever */ |