aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-03-23 11:28:30 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-25 22:27:51 -0700
commita084980dcbf56c896e4b6c19aff2b082d5db7006 (patch)
tree2bdecde658ff928eff6e0f12ccba63217692d8de /net
parent104e0878984bb467e3f54d61105d8903babb4ec1 (diff)
[NET_SCHED]: kill PSCHED_SET_PASTPERFECT/PSCHED_IS_PASTPERFECT
Use direct assignment and comparison instead. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_cbq.c17
-rw-r--r--net/sched/sch_netem.c2
2 files changed, 9 insertions, 10 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 9e6cdab6af3..2bb271b0efc 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -738,7 +738,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
if (cl && q->toplevel >= borrowed->level) {
if (cl->q->q.qlen > 1) {
do {
- if (PSCHED_IS_PASTPERFECT(borrowed->undertime)) {
+ if (borrowed->undertime == PSCHED_PASTPERFECT) {
q->toplevel = borrowed->level;
return;
}
@@ -824,7 +824,7 @@ cbq_update(struct cbq_sched_data *q)
} else {
/* Underlimit */
- PSCHED_SET_PASTPERFECT(cl->undertime);
+ cl->undertime = PSCHED_PASTPERFECT;
if (avgidle > cl->maxidle)
cl->avgidle = cl->maxidle;
else
@@ -845,7 +845,7 @@ cbq_under_limit(struct cbq_class *cl)
if (cl->tparent == NULL)
return cl;
- if (PSCHED_IS_PASTPERFECT(cl->undertime) || q->now >= cl->undertime) {
+ if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
cl->delayed = 0;
return cl;
}
@@ -868,8 +868,7 @@ cbq_under_limit(struct cbq_class *cl)
}
if (cl->level > q->toplevel)
return NULL;
- } while (!PSCHED_IS_PASTPERFECT(cl->undertime) &&
- q->now < cl->undertime);
+ } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
cl->delayed = 0;
return cl;
@@ -1054,11 +1053,11 @@ cbq_dequeue(struct Qdisc *sch)
*/
if (q->toplevel == TC_CBQ_MAXLEVEL &&
- PSCHED_IS_PASTPERFECT(q->link.undertime))
+ q->link.undertime == PSCHED_PASTPERFECT)
break;
q->toplevel = TC_CBQ_MAXLEVEL;
- PSCHED_SET_PASTPERFECT(q->link.undertime);
+ q->link.undertime = PSCHED_PASTPERFECT;
}
/* No packets in scheduler or nobody wants to give them to us :-(
@@ -1289,7 +1288,7 @@ cbq_reset(struct Qdisc* sch)
qdisc_reset(cl->q);
cl->next_alive = NULL;
- PSCHED_SET_PASTPERFECT(cl->undertime);
+ cl->undertime = PSCHED_PASTPERFECT;
cl->avgidle = cl->maxidle;
cl->deficit = cl->quantum;
cl->cpriority = cl->priority;
@@ -1650,7 +1649,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0;
- if (!PSCHED_IS_PASTPERFECT(cl->undertime))
+ if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5d571aa04a7..1e88301f505 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -532,7 +532,7 @@ static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
} else
q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
- PSCHED_SET_PASTPERFECT(q->oldest);
+ q->oldest = PSCHED_PASTPERFECT;
return 0;
}