aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2005-06-18 22:57:42 -0700
committerDavid S. Miller <davem@davemloft.net>2005-06-18 22:57:42 -0700
commitaaae3013d186d71a01e1059c9633c4ec8729d891 (patch)
tree533e5512f5f8497ae293db9bd828a771e957fd8a
parent9972b25d0c6e7f8f893eb3444dea37b42b1201de (diff)
[PKT_SCHED]: Transform fifo qdisc to use generic queue management interface
The simplicity of the fifo qdisc allows several qdisc operations to be redirected to the relevant queue management function directly. Saves a lot of code lines and gives the pfifo a byte based backlog. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_fifo.c102
1 files changed, 14 insertions, 88 deletions
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 4888305c96d..83a4db4d3cd 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -47,61 +47,10 @@ bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
- if (sch->qstats.backlog + skb->len <= q->limit) {
- __skb_queue_tail(&sch->q, skb);
- sch->qstats.backlog += skb->len;
- sch->bstats.bytes += skb->len;
- sch->bstats.packets++;
- return 0;
- }
- sch->qstats.drops++;
-#ifdef CONFIG_NET_CLS_POLICE
- if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
-#endif
- kfree_skb(skb);
- return NET_XMIT_DROP;
-}
-
-static int
-bfifo_requeue(struct sk_buff *skb, struct Qdisc* sch)
-{
- __skb_queue_head(&sch->q, skb);
- sch->qstats.backlog += skb->len;
- sch->qstats.requeues++;
- return 0;
-}
-
-static struct sk_buff *
-bfifo_dequeue(struct Qdisc* sch)
-{
- struct sk_buff *skb;
-
- skb = __skb_dequeue(&sch->q);
- if (skb)
- sch->qstats.backlog -= skb->len;
- return skb;
-}
-
-static unsigned int
-fifo_drop(struct Qdisc* sch)
-{
- struct sk_buff *skb;
-
- skb = __skb_dequeue_tail(&sch->q);
- if (skb) {
- unsigned int len = skb->len;
- sch->qstats.backlog -= len;
- kfree_skb(skb);
- return len;
- }
- return 0;
-}
+ if (likely(sch->qstats.backlog + skb->len <= q->limit))
+ return qdisc_enqueue_tail(skb, sch);
-static void
-fifo_reset(struct Qdisc* sch)
-{
- skb_queue_purge(&sch->q);
- sch->qstats.backlog = 0;
+ return qdisc_reshape_fail(skb, sch);
}
static int
@@ -109,33 +58,10 @@ pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
- if (sch->q.qlen < q->limit) {
- __skb_queue_tail(&sch->q, skb);
- sch->bstats.bytes += skb->len;
- sch->bstats.packets++;
- return 0;
- }
- sch->qstats.drops++;
-#ifdef CONFIG_NET_CLS_POLICE
- if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
-#endif
- kfree_skb(skb);
- return NET_XMIT_DROP;
-}
+ if (likely(skb_queue_len(&sch->q) < q->limit))
+ return qdisc_enqueue_tail(skb, sch);
-static int
-pfifo_requeue(struct sk_buff *skb, struct Qdisc* sch)
-{
- __skb_queue_head(&sch->q, skb);
- sch->qstats.requeues++;
- return 0;
-}
-
-
-static struct sk_buff *
-pfifo_dequeue(struct Qdisc* sch)
-{
- return __skb_dequeue(&sch->q);
+ return qdisc_reshape_fail(skb, sch);
}
static int fifo_init(struct Qdisc *sch, struct rtattr *opt)
@@ -180,11 +106,11 @@ struct Qdisc_ops pfifo_qdisc_ops = {
.id = "pfifo",
.priv_size = sizeof(struct fifo_sched_data),
.enqueue = pfifo_enqueue,
- .dequeue = pfifo_dequeue,
- .requeue = pfifo_requeue,
- .drop = fifo_drop,
+ .dequeue = qdisc_dequeue_head,
+ .requeue = qdisc_requeue,
+ .drop = qdisc_queue_drop,
.init = fifo_init,
- .reset = fifo_reset,
+ .reset = qdisc_reset_queue,
.destroy = NULL,
.change = fifo_init,
.dump = fifo_dump,
@@ -197,11 +123,11 @@ struct Qdisc_ops bfifo_qdisc_ops = {
.id = "bfifo",
.priv_size = sizeof(struct fifo_sched_data),
.enqueue = bfifo_enqueue,
- .dequeue = bfifo_dequeue,
- .requeue = bfifo_requeue,
- .drop = fifo_drop,
+ .dequeue = qdisc_dequeue_head,
+ .requeue = qdisc_requeue,
+ .drop = qdisc_queue_drop,
.init = fifo_init,
- .reset = fifo_reset,
+ .reset = qdisc_reset_queue,
.destroy = NULL,
.change = fifo_init,
.dump = fifo_dump,