aboutsummaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2009-01-12 21:54:16 -0800
committerDavid S. Miller <davem@davemloft.net>2009-01-12 21:54:16 -0800
commitc08513471911cf33cb50249a7ff12848374f7263 (patch)
tree9310f25b012fd7ecb8eb58ec0b0caa8dc834438e /net/sched
parentdaaf83d2b9277928739f3eb7ea64f49c1254fd62 (diff)
pkt_sched: sch_htb: Consider used jiffies in htb_do_events()
Next event time should consider jiffies used for recounting. Otherwise qdisc_watchdog_schedule() triggers hrtimer immediately with the event in the past, and may cause very high ksoftirqd cpu usage (if highres is on). There is also removed checking "event" for zero in htb_dequeue(): it's always true in this place. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_htb.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5070643ce53..9ca8a26ba50 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -685,8 +685,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
- /* too much load - let's continue on next jiffie */
- return q->now + PSCHED_TICKS_PER_SEC / HZ;
+ /* too much load - let's continue on next jiffie (including above) */
+ return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -873,7 +873,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
} else
event = q->near_ev_cache[level];
- if (event && next_event > event)
+ if (next_event > event)
next_event = event;
m = ~q->row_mask[level];