diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-08-07 09:55:03 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-08-07 09:55:03 +0100 |
commit | 4fb8af10d0fd09372d52966b76922b9e82bbc950 (patch) | |
tree | d240e4d40357583e3f3eb228dccf20122a5b31ed /net/sched | |
parent | f44f82e8a20b98558486eb14497b2f71c78fa325 (diff) | |
parent | 64a99d2a8c3ed5c4e39f3ae1cc682aa8fd3977fc (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-fixes
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_api.c | 57 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 14 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 27 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 10 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 26 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 12 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 24 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 5 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 14 | ||||
-rw-r--r-- | net/sched/sch_red.c | 2 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 8 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 3 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 9 |
13 files changed, 106 insertions, 105 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index b0601642e22..4840aff4725 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -572,44 +572,21 @@ static u32 qdisc_alloc_handle(struct net_device *dev) static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc) { + struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; spinlock_t *root_lock; - struct Qdisc *oqdisc; - int ingress; - - ingress = 0; - if (qdisc && qdisc->flags&TCQ_F_INGRESS) - ingress = 1; - - if (ingress) { - oqdisc = dev_queue->qdisc; - } else { - oqdisc = dev_queue->qdisc_sleeping; - } root_lock = qdisc_root_lock(oqdisc); spin_lock_bh(root_lock); - if (ingress) { - /* Prune old scheduler */ - if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { - /* delete */ - qdisc_reset(oqdisc); - dev_queue->qdisc = NULL; - } else { /* new */ - dev_queue->qdisc = qdisc; - } + /* Prune old scheduler */ + if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) + qdisc_reset(oqdisc); - } else { - /* Prune old scheduler */ - if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) - qdisc_reset(oqdisc); - - /* ... and graft new one */ - if (qdisc == NULL) - qdisc = &noop_qdisc; - dev_queue->qdisc_sleeping = qdisc; - dev_queue->qdisc = &noop_qdisc; - } + /* ... and graft new one */ + if (qdisc == NULL) + qdisc = &noop_qdisc; + dev_queue->qdisc_sleeping = qdisc; + dev_queue->qdisc = &noop_qdisc; spin_unlock_bh(root_lock); @@ -678,7 +655,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, ingress = 0; num_q = dev->num_tx_queues; - if (q && q->flags & TCQ_F_INGRESS) { + if ((q && q->flags & TCQ_F_INGRESS) || + (new && new->flags & TCQ_F_INGRESS)) { num_q = 1; ingress = 1; } @@ -692,13 +670,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, if (!ingress) dev_queue = netdev_get_tx_queue(dev, i); - if (ingress) { - old = dev_graft_qdisc(dev_queue, q); - } else { - old = dev_graft_qdisc(dev_queue, new); - if (new && i > 0) - atomic_inc(&new->refcnt); - } + old = dev_graft_qdisc(dev_queue, new); + if (new && i > 0) + atomic_inc(&new->refcnt); + notify_and_destroy(skb, n, classid, old, new); } @@ -817,7 +792,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, goto err_out3; } } - if (parent) + if (parent && !(sch->flags & TCQ_F_INGRESS)) list_add_tail(&sch->list, &dev_queue->qdisc->list); return sch; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 6b517b9dac5..43d37256c15 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) case TC_ACT_QUEUED: case TC_ACT_STOLEN: kfree_skb(skb); - return NET_XMIT_SUCCESS; + return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: kfree_skb(skb); goto drop; @@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, flow->q); if (ret != 0) { drop: __maybe_unused - sch->qstats.drops++; - if (flow) - flow->qstats.drops++; + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + if (flow) + flow->qstats.drops++; + } return ret; } sch->bstats.bytes += qdisc_pkt_len(skb); @@ -455,7 +457,7 @@ drop: __maybe_unused return 0; } tasklet_schedule(&p->task); - return NET_XMIT_BYPASS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } /* @@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch) if (!ret) { sch->q.qlen++; sch->qstats.requeues++; - } else { + } else if (net_xmit_drop_count(ret)) { sch->qstats.drops++; p->link.qstats.drops++; } diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 14954bf4a68..4e261ce62f4 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) (cl = cbq_class_lookup(q, prio)) != NULL) return cl; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; for (;;) { int result = 0; defmap = head->defaults; @@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; case TC_ACT_RECLASSIFY: @@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->rx_class = cl; #endif if (cl == NULL) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; @@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) return ret; } - sch->qstats.drops++; - cbq_mark_toplevel(q, cl); - cl->qstats.drops++; + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + cbq_mark_toplevel(q, cl); + cl->qstats.drops++; + } return ret; } @@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) cbq_activate_class(cl); return 0; } - sch->qstats.drops++; - cl->qstats.drops++; + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + cl->qstats.drops++; + } return ret; } @@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) q->rx_class = NULL; if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { + int ret; cbq_mark_toplevel(q, cl); q->rx_class = cl; cl->q->__parent = sch; - if (qdisc_enqueue(skb, cl->q) == 0) { + ret = qdisc_enqueue(skb, cl->q); + if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; sch->bstats.packets++; sch->bstats.bytes += qdisc_pkt_len(skb); @@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) cbq_activate_class(cl); return 0; } - sch->qstats.drops++; + if (net_xmit_drop_count(ret)) + sch->qstats.drops++; return 0; } diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index a935676987e..edd1298f85f 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) case TC_ACT_QUEUED: case TC_ACT_STOLEN: kfree_skb(skb); - return NET_XMIT_SUCCESS; + return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: goto drop; @@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) err = qdisc_enqueue(skb, p->q); if (err != NET_XMIT_SUCCESS) { - sch->qstats.drops++; + if (net_xmit_drop_count(err)) + sch->qstats.drops++; return err; } @@ -267,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) drop: kfree_skb(skb); sch->qstats.drops++; - return NET_XMIT_BYPASS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) @@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch) err = p->q->ops->requeue(skb, p->q); if (err != NET_XMIT_SUCCESS) { - sch->qstats.drops++; + if (net_xmit_drop_count(err)) + sch->qstats.drops++; return err; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index fd2a6cadb11..7cf83b37459 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -29,7 +29,7 @@ /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with - * qdisc_root_lock(qdisc) spinlock. + * qdisc_lock(qdisc) spinlock. * * The idea is the following: * - enqueue, dequeue are serialized via qdisc root lock @@ -126,7 +126,7 @@ static inline int qdisc_restart(struct Qdisc *q) if (unlikely((skb = dequeue_skb(q)) == NULL)) return 0; - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); /* And release qdisc */ spin_unlock(root_lock); @@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q) txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_subqueue_stopped(dev, skb)) + if (!netif_tx_queue_stopped(txq) && + !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); @@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q) break; } - if (ret && netif_tx_queue_stopped(txq)) + if (ret && (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq))) ret = 0; return ret; @@ -505,7 +507,7 @@ errout: } EXPORT_SYMBOL(qdisc_create_dflt); -/* Under qdisc_root_lock(qdisc) and BH! */ +/* Under qdisc_lock(qdisc) and BH! */ void qdisc_reset(struct Qdisc *qdisc) { @@ -541,7 +543,7 @@ static void __qdisc_destroy(struct rcu_head *head) kfree((char *) qdisc - qdisc->padded); } -/* Under qdisc_root_lock(qdisc) and BH! */ +/* Under qdisc_lock(qdisc) and BH! */ void qdisc_destroy(struct Qdisc *qdisc) { @@ -596,7 +598,7 @@ static void transition_one_qdisc(struct net_device *dev, int *need_watchdog_p = _need_watchdog; rcu_assign_pointer(dev_queue->qdisc, new_qdisc); - if (new_qdisc != &noqueue_qdisc) + if (need_watchdog_p && new_qdisc != &noqueue_qdisc) *need_watchdog_p = 1; } @@ -619,6 +621,7 @@ void dev_activate(struct net_device *dev) need_watchdog = 0; netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); + transition_one_qdisc(dev, &dev->rx_queue, NULL); if (need_watchdog) { dev->trans_start = jiffies; @@ -656,7 +659,7 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock) dev_queue = netdev_get_tx_queue(dev, i); q = dev_queue->qdisc; - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); if (lock) spin_lock_bh(root_lock); @@ -677,6 +680,7 @@ void dev_deactivate(struct net_device *dev) bool running; netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); + dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); dev_watchdog_down(dev); @@ -718,7 +722,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, void dev_init_scheduler(struct net_device *dev) { netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); - dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); + dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); } @@ -731,7 +735,7 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { - spinlock_t *root_lock = qdisc_root_lock(qdisc); + spinlock_t *root_lock = qdisc_lock(qdisc); dev_queue->qdisc = qdisc_default; dev_queue->qdisc_sleeping = qdisc_default; @@ -745,6 +749,6 @@ static void shutdown_scheduler_queue(struct net_device *dev, void dev_shutdown(struct net_device *dev) { netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); - shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); + shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); WARN_ON(timer_pending(&dev->watchdog_timer)); } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 0ae7d19dcba..c2b8d9cce3d 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1159,14 +1159,14 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) if (cl->level == 0) return cl; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; tcf = q->root.filter_list; while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } @@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl = hfsc_classify(skb, sch, &err); if (cl == NULL) { - if (err == NET_XMIT_BYPASS) + if (err & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return err; @@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { - cl->qstats.drops++; - sch->qstats.drops++; + if (net_xmit_drop_count(err)) { + cl->qstats.drops++; + sch->qstats.drops++; + } return err; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 75a40951c4f..be35422711a 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -214,14 +214,14 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) return cl; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; tcf = q->filter_list; while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } @@ -567,14 +567,16 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; #endif - } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { - sch->qstats.drops++; - cl->qstats.drops++; + } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + cl->qstats.drops++; + } return NET_XMIT_DROP; } else { cl->bstats.packets += @@ -610,15 +612,17 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; #endif - } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != + } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { - sch->qstats.drops++; - cl->qstats.drops++; + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + cl->qstats.drops++; + } return NET_XMIT_DROP; } else htb_activate(q, cl); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index a5908570067..fb0294d0b55 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (count == 0) { sch->qstats.drops++; kfree_skb(skb); - return NET_XMIT_BYPASS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } skb_orphan(skb); @@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) sch->q.qlen++; sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; - } else + } else if (net_xmit_drop_count(ret)) { sch->qstats.drops++; + } pr_debug("netem: enqueue ret %d\n", ret); return ret; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index f849243eb09..eac197610ed 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -38,14 +38,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) struct tcf_result res; int err; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { err = tc_classify(skb, q->filter_list, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } @@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; @@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) sch->q.qlen++; return NET_XMIT_SUCCESS; } - sch->qstats.drops++; + if (net_xmit_drop_count(ret)) + sch->qstats.drops++; return ret; } @@ -102,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) qdisc = prio_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; @@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) sch->qstats.requeues++; return 0; } - sch->qstats.drops++; + if (net_xmit_drop_count(ret)) + sch->qstats.drops++; return NET_XMIT_DROP; } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 3f2d1d7f3bb..5da05839e22 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; sch->q.qlen++; - } else { + } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; sch->qstats.drops++; } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 8589da66656..6e041d10dbd 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -171,14 +171,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, if (!q->filter_list) return sfq_hash(q, skb) + 1; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tc_classify(skb, q->filter_list, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return 0; } @@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) hash = sfq_classify(skb, sch, &ret); if (hash == 0) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; @@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) hash = sfq_classify(skb, sch, &ret); if (hash == 0) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index b296672f763..7d3b7ff3bf0 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ret = qdisc_enqueue(skb, q->qdisc); if (ret != 0) { - sch->qstats.drops++; + if (net_xmit_drop_count(ret)) + sch->qstats.drops++; return ret; } diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 537223642b6..2c35c678563 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -305,10 +305,11 @@ restart: switch (teql_resolve(skb, skb_res, slave)) { case 0: - if (netif_tx_trylock(slave)) { - if (!__netif_subqueue_stopped(slave, subq) && + if (__netif_tx_trylock(slave_txq)) { + if (!netif_tx_queue_stopped(slave_txq) && + !netif_tx_queue_frozen(slave_txq) && slave->hard_start_xmit(skb, slave) == 0) { - netif_tx_unlock(slave); + __netif_tx_unlock(slave_txq); master->slaves = NEXT_SLAVE(q); netif_wake_queue(dev); master->stats.tx_packets++; @@ -316,7 +317,7 @@ restart: qdisc_pkt_len(skb); return 0; } - netif_tx_unlock(slave); + __netif_tx_unlock(slave_txq); } if (netif_queue_stopped(dev)) busy = 1; |