From c8cba857b4997d5b00451d01474638f6a153f713 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 27 Jun 2008 13:41:23 +0200 Subject: sched: simplify the group load balancer While thinking about the previous patch - I realized that using per domain aggregate load values in load_balance_fair() is wrong. We should use the load value for that CPU. By not needing per domain hierarchical load values we don't need to store per domain aggregate shares, which greatly simplifies all the math. It basically falls apart in two separate computations: - per domain update of the shares - per CPU update of the hierarchical load Also get rid of the move_group_shares() stuff - just re-compute the shares again after a successful load balance. Signed-off-by: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'kernel/sched_fair.c') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 03b9fbd9d64..7b8d664d6f2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1421,17 +1421,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, struct task_group *tg; rcu_read_lock(); + update_h_load(busiest_cpu); + list_for_each_entry(tg, &task_groups, list) { + struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; long rem_load, moved_load; /* * empty group */ - if (!tg->cfs_rq[busiest_cpu]->task_weight) + if (!busiest_cfs_rq->task_weight) continue; - rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight; - rem_load /= aggregate(tg, this_cpu)->load + 1; + rem_load = rem_load_move * busiest_cfs_rq->load.weight; + rem_load /= busiest_cfs_rq->h_load + 1; moved_load = __load_balance_fair(this_rq, this_cpu, busiest, rem_load, sd, idle, all_pinned, this_best_prio, @@ -1440,10 +1443,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, if (!moved_load) continue; - move_group_shares(tg, this_cpu, sd, busiest_cpu, this_cpu); - - moved_load *= aggregate(tg, this_cpu)->load; - moved_load /= aggregate(tg, this_cpu)->rq_weight + 1; + moved_load *= busiest_cfs_rq->h_load; + moved_load /= busiest_cfs_rq->load.weight + 1; rem_load_move -= moved_load; if (rem_load_move < 0) -- cgit v1.2.3