aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-12-08 19:35:37 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-11 15:45:47 +0100
commitdfa7c899b401d7dc5d85aca416aee64ac82812f2 (patch)
tree496b0fee69989fd4127905a888de7135a7969e9e /arch
parenteab656ae04b9d3b83265e3db01c0d2c46b748ef7 (diff)
perf counters: expand use of counter->event
Impact: change syscall, cleanup Make use of the new perf_counters event type. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 0a7f3bea2dc..30e7ebf7827 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -56,9 +56,10 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
/*
* Setup the hardware configuration for a given hw_event_type
*/
-int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
+int hw_perf_counter_init(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
+ u32 hw_event_type = counter->event.hw_event_type;
if (unlikely(!perf_counters_initialized))
return -EINVAL;
@@ -83,7 +84,7 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
- hwc->irq_period = counter->__irq_period;
+ hwc->irq_period = counter->event.hw_event_period;
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
@@ -95,21 +96,19 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
hwc->next_count = -((s32) hwc->irq_period);
/*
- * Negative event types mean raw encoded event+umask values:
+ * Raw event type provide the config in the event structure
*/
- if (hw_event_type < 0) {
- counter->hw_event_type = -hw_event_type;
- counter->hw_event_type &= ~PERF_COUNT_NMI;
+ hw_event_type &= ~PERF_COUNT_NMI;
+ if (hw_event_type == PERF_COUNT_RAW) {
+ hwc->config |= counter->event.hw_raw_ctrl;
} else {
- hw_event_type &= ~PERF_COUNT_NMI;
if (hw_event_type >= max_intel_perfmon_events)
return -EINVAL;
/*
* The generic map:
*/
- counter->hw_event_type = intel_perfmon_event_map[hw_event_type];
+ hwc->config |= intel_perfmon_event_map[hw_event_type];
}
- hwc->config |= counter->hw_event_type;
counter->wakeup_pending = 0;
return 0;
@@ -373,7 +372,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
perf_save_and_restart(counter);
}
}
- perf_store_irq_data(leader, counter->hw_event_type);
+ perf_store_irq_data(leader, counter->event.hw_event_type);
perf_store_irq_data(leader, atomic64_counter_read(counter));
}
}
@@ -418,7 +417,8 @@ again:
perf_store_irq_data(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
- perf_store_irq_data(counter, counter->hw_event_type);
+ perf_store_irq_data(counter,
+ counter->event.hw_event_type);
perf_store_irq_data(counter,
atomic64_counter_read(counter));
perf_handle_group(counter, &status, &ack);