aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-05-12 21:59:01 +1000
committerIngo Molnar <mingo@elte.hu>2009-05-12 15:31:06 +0200
commite758a33d6fc5b9d6a3ae489863d04fcecad8120b (patch)
tree3345d35fd5c9ee41a2f5a22fc5795672c0db7c2b
parent615a3f1e055ac9b0ae74e1f935a12ea2cfe2a2ad (diff)
perf_counter: call hw_perf_save_disable/restore around group_sched_in
I noticed that when enabling a group via the PERF_COUNTER_IOC_ENABLE ioctl on the group leader, the counters weren't enabled and counting immediately on return from the ioctl, but did start counting a little while later (presumably after a context switch). The reason was that __perf_counter_enable calls group_sched_in which calls hw_perf_group_sched_in, which on powerpc assumes that the caller has called hw_perf_save_disable already. Until commit 46d686c6 ("perf_counter: put whole group on when enabling group leader") it was true that all callers of group_sched_in had called hw_perf_save_disable first, and the powerpc hw_perf_group_sched_in relies on that (there isn't an x86 version). This fixes the problem by putting calls to hw_perf_save_disable / hw_perf_restore around the calls to group_sched_in and counter_sched_in in __perf_counter_enable. Having the calls to hw_perf_save_disable/restore around the counter_sched_in call is harmless and makes this call consistent with the other call sites of counter_sched_in, which have all called hw_perf_save_disable first. [ Impact: more precise counter group disable/enable functionality ] Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <18953.25733.53359.147452@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/perf_counter.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 5ea0240adab..ff166c11b69 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -663,6 +663,7 @@ static void __perf_counter_enable(void *info)
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_counter_context *ctx = counter->ctx;
struct perf_counter *leader = counter->group_leader;
+ unsigned long pmuflags;
unsigned long flags;
int err;
@@ -689,14 +690,18 @@ static void __perf_counter_enable(void *info)
if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
goto unlock;
- if (!group_can_go_on(counter, cpuctx, 1))
+ if (!group_can_go_on(counter, cpuctx, 1)) {
err = -EEXIST;
- else if (counter == leader)
- err = group_sched_in(counter, cpuctx, ctx,
- smp_processor_id());
- else
- err = counter_sched_in(counter, cpuctx, ctx,
- smp_processor_id());
+ } else {
+ pmuflags = hw_perf_save_disable();
+ if (counter == leader)
+ err = group_sched_in(counter, cpuctx, ctx,
+ smp_processor_id());
+ else
+ err = counter_sched_in(counter, cpuctx, ctx,
+ smp_processor_id());
+ hw_perf_restore(pmuflags);
+ }
if (err) {
/*