From c773e847ea8f6812804e40f52399c6921a00eab1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 8 Jul 2008 23:13:53 -0700 Subject: netdev: Move _xmit_lock and xmit_lock_owner into netdev_queue. Accesses are mostly structured such that when there are multiple TX queues the code transformations will be a little bit simpler. Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'net/sched') diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index fcc7533f0bc..b6a36d39466 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -92,10 +92,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, struct netdev_queue *dev_queue, struct Qdisc *q) { - struct net_device *dev = dev_queue->dev; int ret; - if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { + if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We @@ -105,7 +104,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, kfree_skb(skb); if (net_ratelimit()) printk(KERN_WARNING "Dead loop on netdevice %s, " - "fix it urgently!\n", dev->name); + "fix it urgently!\n", dev_queue->dev->name); ret = qdisc_qlen(q); } else { /* @@ -155,10 +154,10 @@ static inline int qdisc_restart(struct netdev_queue *txq) dev = txq->dev; - HARD_TX_LOCK(dev, smp_processor_id()); + HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_subqueue_stopped(dev, skb)) ret = dev_hard_start_xmit(skb, dev); - HARD_TX_UNLOCK(dev); + HARD_TX_UNLOCK(dev, txq); spin_lock(&txq->lock); q = txq->qdisc; -- cgit v1.2.3