aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-20 22:19:54 +0100
committerIngo Molnar <mingo@elte.hu>2009-11-21 14:11:39 +0100
commit2b8988c9f7defe319cffe0cd362a7cd356c86f62 (patch)
tree0b9dde7d4c4304eaa1b701897609b1918d72e0f3
parent58e5ad1de3d6ad931c84f0cc8ef0655c922f30ad (diff)
perf: Fix time locking
Most sites updating ctx->time and event times do so under ctx->lock, make sure they all do. This was made possible by removing the __perf_event_read() call from __perf_event_sync_stat(), which already had this lock taken. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <20091120212509.102316434@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/perf_event.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 028619dd6d0..fdfae888a67 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1526,8 +1526,11 @@ static void __perf_event_read(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
+ spin_lock(&ctx->lock);
update_context_time(ctx);
update_event_times(event);
+ spin_unlock(&ctx->lock);
+
event->pmu->read(event);
}
@@ -1541,7 +1544,13 @@ static u64 perf_event_read(struct perf_event *event)
smp_call_function_single(event->oncpu,
__perf_event_read, event, 1);
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ struct perf_event_context *ctx = event->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ update_context_time(ctx);
update_event_times(event);
+ spin_unlock_irqrestore(&ctx->lock, flags);
}
return atomic64_read(&event->count);