diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 54 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 1 |
2 files changed, 4 insertions, 51 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0014b03adac..740fb409e5b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1657,42 +1657,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd) } /* - * Redistribute tg->shares amongst all tg->cfs_rq[]s. - */ -static void __aggregate_redistribute_shares(struct task_group *tg) -{ - int i, max_cpu = smp_processor_id(); - unsigned long rq_weight = 0; - unsigned long shares, max_shares = 0, shares_rem = tg->shares; - - for_each_possible_cpu(i) - rq_weight += tg->cfs_rq[i]->load.weight; - - for_each_possible_cpu(i) { - /* - * divide shares proportional to the rq_weights. - */ - shares = tg->shares * tg->cfs_rq[i]->load.weight; - shares /= rq_weight + 1; - - tg->cfs_rq[i]->shares = shares; - - if (shares > max_shares) { - max_shares = shares; - max_cpu = i; - } - shares_rem -= shares; - } - - /* - * Ensure it all adds up to tg->shares; we can loose a few - * due to rounding down when computing the per-cpu shares. - */ - if (shares_rem) - tg->cfs_rq[max_cpu]->shares += shares_rem; -} - -/* * Compute the weight of this group on the given cpus. */ static @@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd) unsigned long shares = 0; int i; -again: for_each_cpu_mask(i, sd->span) shares += tg->cfs_rq[i]->shares; - /* - * When the span doesn't have any shares assigned, but does have - * tasks to run do a machine wide rebalance (should be rare). - */ - if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) { - __aggregate_redistribute_shares(tg); - goto again; - } + if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares) + shares = tg->shares; aggregate(tg, sd)->shares = shares; } @@ -7991,11 +7948,6 @@ void __init sched_init_smp(void) #else void __init sched_init_smp(void) { -#if defined(CONFIG_NUMA) - sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), - GFP_KERNEL); - BUG_ON(sched_group_nodes_bycpu == NULL); -#endif sched_init_granularity(); } #endif /* CONFIG_SMP */ @@ -8128,7 +8080,7 @@ void __init sched_init(void) * we use alloc_bootmem(). */ if (alloc_size) { - ptr = (unsigned long)alloc_bootmem_low(alloc_size); + ptr = (unsigned long)alloc_bootmem(alloc_size); #ifdef CONFIG_FAIR_GROUP_SCHED init_task_group.se = (struct sched_entity **)ptr; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d358d4e3a95..b854a895591 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -393,6 +393,7 @@ void tick_nohz_restart_sched_tick(void) sub_preempt_count(HARDIRQ_OFFSET); } + touch_softlockup_watchdog(); /* * Cancel the scheduled timer and restore the tick */ |