From 8e6598af3f35629c37249a610cf13e73f70db279 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 3 Sep 2009 13:20:03 +0200 Subject: sched: Feature to disable APERF/MPERF cpu_power I suspect a feed-back loop between cpuidle and the aperf/mperf cpu_power bits, where when we have idle C-states lower the ratio, which leads to lower cpu_power and then less load, which generates more idle time, etc.. Put in a knob to disable it. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/sched.c | 12 ++++++++++-- kernel/sched_features.h | 5 +++++ 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index c210321adcb..e8e603bf876 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3602,11 +3602,19 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) unsigned long power = SCHED_LOAD_SCALE; struct sched_group *sdg = sd->groups; - power *= arch_scale_freq_power(sd, cpu); + if (sched_feat(ARCH_POWER)) + power *= arch_scale_freq_power(sd, cpu); + else + power *= default_scale_freq_power(sd, cpu); + power >>= SCHED_LOAD_SHIFT; if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { - power *= arch_scale_smt_power(sd, cpu); + if (sched_feat(ARCH_POWER)) + power *= arch_scale_smt_power(sd, cpu); + else + power *= default_scale_smt_power(sd, cpu); + power >>= SCHED_LOAD_SHIFT; } diff --git a/kernel/sched_features.h b/kernel/sched_features.h index e98c2e8de1d..294e10edd3c 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -82,6 +82,11 @@ SCHED_FEAT(LAST_BUDDY, 1) */ SCHED_FEAT(CACHE_HOT_BUDDY, 1) +/* + * Use arch dependent cpu power functions + */ +SCHED_FEAT(ARCH_POWER, 0) + SCHED_FEAT(HRTICK, 0) SCHED_FEAT(DOUBLE_TICK, 0) SCHED_FEAT(LB_BIAS, 1) -- cgit v1.2.3