aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-04 18:54:32 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-04 19:29:57 +0200
commitb82914ce33146186d554b0f5c41e4e13693614ce (patch)
treedde12a31830dd4216433d9a6b365c90bccaa1792
parentdab6f6a3401f596fe934f41fc5da3f401adfdfb1 (diff)
perf_counter: round-robin per-CPU counters too
This used to be unstable when we had the rq->lock dependencies, but now that they are that of the past we can turn on percpu counter RR too. [ Impact: handle counter over-commit for per-CPU counters too ] LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/perf_counter.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 8660ae57953..b9679c36bcc 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1069,18 +1069,14 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
{
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
- const int rotate_percpu = 0;
- if (rotate_percpu)
- perf_counter_cpu_sched_out(cpuctx);
+ perf_counter_cpu_sched_out(cpuctx);
perf_counter_task_sched_out(curr, cpu);
- if (rotate_percpu)
- rotate_ctx(&cpuctx->ctx);
+ rotate_ctx(&cpuctx->ctx);
rotate_ctx(ctx);
- if (rotate_percpu)
- perf_counter_cpu_sched_in(cpuctx, cpu);
+ perf_counter_cpu_sched_in(cpuctx, cpu);
perf_counter_task_sched_in(curr, cpu);
}