aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-03-16 21:00:00 +1100
committerIngo Molnar <mingo@elte.hu>2009-04-06 09:30:14 +0200
commitb6c5a71da1477d261bc36254fe1f20d32b57598d (patch)
tree02815fa0a0016eacd6203b1f496109f07bbd7be5 /arch/powerpc
parent7bb497bd885eedd0f56dfe3cc1b5ff20710d33b9 (diff)
perf_counter: abstract wakeup flag setting in core to fix powerpc build
Impact: build fix for powerpc Commit bd753921015e7905 ("perf_counter: software counter event infrastructure") introduced a use of TIF_PERF_COUNTERS into the core perfcounter code. This breaks the build on powerpc because we use a flag in a per-cpu area to signal wakeups on powerpc rather than a thread_info flag, because the thread_info flags have to be manipulated with atomic operations and are thus slower than per-cpu flags. This fixes the by changing the core to use an abstracted set_perf_counter_pending() function, which is defined on x86 to set the TIF_PERF_COUNTERS flag and on powerpc to set the per-cpu flag (paca->perf_counter_pending). It changes the previous powerpc definition of set_perf_counter_pending to not take an argument and adds a clear_perf_counter_pending, so as to simplify the definition on x86. On x86, set_perf_counter_pending() is defined as a macro. Defining it as a static inline in arch/x86/include/asm/perf_counters.h causes compile failures because <asm/perf_counters.h> gets included early in <linux/sched.h>, and the definitions of set_tsk_thread_flag etc. are therefore not available in <asm/perf_counters.h>. (On powerpc this problem is avoided by defining set_perf_counter_pending etc. in <asm/hw_irq.h>.) Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h14
-rw-r--r--arch/powerpc/kernel/irq.c11
-rw-r--r--arch/powerpc/kernel/perf_counter.c3
3 files changed, 15 insertions, 13 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index b43076ff92c..cb32d571c9c 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -142,10 +142,17 @@ static inline unsigned long get_perf_counter_pending(void)
return x;
}
-static inline void set_perf_counter_pending(int x)
+static inline void set_perf_counter_pending(void)
{
asm volatile("stb %0,%1(13)" : :
- "r" (x),
+ "r" (1),
+ "i" (offsetof(struct paca_struct, perf_counter_pending)));
+}
+
+static inline void clear_perf_counter_pending(void)
+{
+ asm volatile("stb %0,%1(13)" : :
+ "r" (0),
"i" (offsetof(struct paca_struct, perf_counter_pending)));
}
@@ -158,7 +165,8 @@ static inline unsigned long get_perf_counter_pending(void)
return 0;
}
-static inline void set_perf_counter_pending(int x) {}
+static inline void set_perf_counter_pending(void) {}
+static inline void clear_perf_counter_pending(void) {}
static inline void perf_counter_do_pending(void) {}
#endif /* CONFIG_PERF_COUNTERS */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0d2e37c5773..469e9635ff0 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -104,13 +104,6 @@ static inline notrace void set_soft_enabled(unsigned long enable)
: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
}
-#ifdef CONFIG_PERF_COUNTERS
-notrace void __weak perf_counter_do_pending(void)
-{
- set_perf_counter_pending(0);
-}
-#endif
-
notrace void raw_local_irq_restore(unsigned long en)
{
/*
@@ -142,8 +135,10 @@ notrace void raw_local_irq_restore(unsigned long en)
iseries_handle_interrupts();
}
- if (get_perf_counter_pending())
+ if (get_perf_counter_pending()) {
+ clear_perf_counter_pending();
perf_counter_do_pending();
+ }
/*
* if (get_paca()->hard_enabled) return;
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 0e33d27cd46..5008762e8bf 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -653,7 +653,6 @@ void perf_counter_do_pending(void)
struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
struct perf_counter *counter;
- set_perf_counter_pending(0);
for (i = 0; i < cpuhw->n_counters; ++i) {
counter = cpuhw->counter[i];
if (counter && counter->wakeup_pending) {
@@ -811,7 +810,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
perf_counter_do_pending();
irq_exit();
} else {
- set_perf_counter_pending(1);
+ set_perf_counter_pending();
}
}
}