aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorYong Wang <yong.y.wang@linux.intel.com>2009-06-03 13:12:55 +0800
committerIngo Molnar <mingo@elte.hu>2009-06-03 09:53:34 +0200
commita32881066e58346f2901afe0ebdfbf0c562877e5 (patch)
treeacd28c9b659a3df0769eedb32127e9c6ad72def9 /arch/x86/kernel/cpu/perf_counter.c
parentaddc2785ce92ff05da8edf18317b6b4719e10d9f (diff)
perf_counter/x86: Remove the IRQ (non-NMI) handling bits
Remove the IRQ (non-NMI) handling bits as NMI will be used always. Signed-off-by: Yong Wang <yong.y.wang@intel.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090603051255.GA2791@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index e16e8c13132..12cc05ed9f4 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -40,7 +40,7 @@ struct cpu_hw_counters {
struct x86_pmu {
const char *name;
int version;
- int (*handle_irq)(struct pt_regs *, int);
+ int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(void);
void (*enable)(struct hw_perf_counter *, int);
@@ -755,7 +755,7 @@ static void intel_pmu_reset(void)
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
-static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
+static int intel_pmu_handle_irq(struct pt_regs *regs)
{
struct cpu_hw_counters *cpuc;
struct cpu_hw_counters;
@@ -794,7 +794,7 @@ again:
if (!intel_pmu_save_and_restart(counter))
continue;
- if (perf_counter_overflow(counter, nmi, regs, 0))
+ if (perf_counter_overflow(counter, 1, regs, 0))
intel_pmu_disable_counter(&counter->hw, bit);
}
@@ -812,7 +812,7 @@ again:
return 1;
}
-static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
+static int amd_pmu_handle_irq(struct pt_regs *regs)
{
int cpu, idx, handled = 0;
struct cpu_hw_counters *cpuc;
@@ -840,22 +840,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
if (!x86_perf_counter_set_period(counter, hwc, idx))
continue;
- if (perf_counter_overflow(counter, nmi, regs, 0))
+ if (perf_counter_overflow(counter, 1, regs, 0))
amd_pmu_disable_counter(hwc, idx);
}
return handled;
}
-void smp_perf_counter_interrupt(struct pt_regs *regs)
-{
- irq_enter();
- apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
- ack_APIC_irq();
- x86_pmu.handle_irq(regs, 0);
- irq_exit();
-}
-
void smp_perf_pending_interrupt(struct pt_regs *regs)
{
irq_enter();
@@ -910,7 +901,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
* If the first NMI handles both, the latter will be empty and daze
* the CPU.
*/
- x86_pmu.handle_irq(regs, 1);
+ x86_pmu.handle_irq(regs);
return NOTIFY_STOP;
}