aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 11:45:04 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-07 10:48:56 +0200
commitf6c7d5fe58b4846ee0cb4b98b6042489705eced4 (patch)
treeb37bda884e0740489269da5ddc3401ffa61f076e
parentb6276f353bf490add62dcf7db0ebd75baa3e1a37 (diff)
perf_counter: theres more to overflow than writing events
Prepare for more generic overflow handling. The new perf_counter_overflow() method will handle the generic bits of the counter overflow, and can return a !0 return value, in which case the counter should be (soft) disabled, so that it won't count until it's properly disabled. XXX: do powerpc and swcounter Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094517.812109629@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/powerpc/kernel/perf_counter.c2
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c3
-rw-r--r--include/linux/perf_counter.h4
-rw-r--r--kernel/perf_counter.c29
4 files changed, 28 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 0a4d14f279a..f88c35d0710 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -732,7 +732,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
* Finally record data if requested.
*/
if (record)
- perf_counter_output(counter, 1, regs);
+ perf_counter_overflow(counter, 1, regs);
}
/*
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 438415866fe..1116a41bc7b 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -800,7 +800,8 @@ again:
continue;
perf_save_and_restart(counter);
- perf_counter_output(counter, nmi, regs);
+ if (perf_counter_overflow(counter, nmi, regs))
+ __pmc_generic_disable(counter, &counter->hw, bit);
}
hw_perf_ack_status(ack);
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 977fb15a53f..ca2d4df29e0 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -491,8 +491,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_counter_context *ctx, int cpu);
extern void perf_counter_update_userpage(struct perf_counter *counter);
-extern void perf_counter_output(struct perf_counter *counter,
- int nmi, struct pt_regs *regs);
+extern int perf_counter_overflow(struct perf_counter *counter,
+ int nmi, struct pt_regs *regs);
/*
* Return 1 for a software counter, 0 for a hardware counter
*/
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0a2ade2e4f1..195e976eb07 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1800,8 +1800,8 @@ static void perf_output_end(struct perf_output_handle *handle)
rcu_read_unlock();
}
-void perf_counter_output(struct perf_counter *counter,
- int nmi, struct pt_regs *regs)
+static void perf_counter_output(struct perf_counter *counter,
+ int nmi, struct pt_regs *regs)
{
int ret;
u64 record_type = counter->hw_event.record_type;
@@ -2034,6 +2034,17 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
}
/*
+ * Generic counter overflow handling.
+ */
+
+int perf_counter_overflow(struct perf_counter *counter,
+ int nmi, struct pt_regs *regs)
+{
+ perf_counter_output(counter, nmi, regs);
+ return 0;
+}
+
+/*
* Generic software counter infrastructure
*/
@@ -2077,6 +2088,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
+ enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_counter *counter;
struct pt_regs *regs;
@@ -2092,12 +2104,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
!counter->hw_event.exclude_user)
regs = task_pt_regs(current);
- if (regs)
- perf_counter_output(counter, 0, regs);
+ if (regs) {
+ if (perf_counter_overflow(counter, 0, regs))
+ ret = HRTIMER_NORESTART;
+ }
hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
- return HRTIMER_RESTART;
+ return ret;
}
static void perf_swcounter_overflow(struct perf_counter *counter,
@@ -2105,7 +2119,10 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
{
perf_swcounter_update(counter);
perf_swcounter_set_period(counter);
- perf_counter_output(counter, nmi, regs);
+ if (perf_counter_overflow(counter, nmi, regs))
+ /* soft-disable the counter */
+ ;
+
}
static int perf_swcounter_match(struct perf_counter *counter,