aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86/percpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/percpu.h')
-rw-r--r--include/asm-x86/percpu.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 912a3a17b9d..4e91ee1e37a 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -22,6 +22,32 @@
DECLARE_PER_CPU(struct x8664_pda, pda);
+/*
+ * These are supposed to be implemented as a single instruction which
+ * operates on the per-cpu data base segment. x86-64 doesn't have
+ * that yet, so this is a fairly inefficient workaround for the
+ * meantime. The single instruction is atomic with respect to
+ * preemption and interrupts, so we need to explicitly disable
+ * interrupts here to achieve the same effect. However, because it
+ * can be used from within interrupt-disable/enable, we can't actually
+ * disable interrupts; disabling preemption is enough.
+ */
+#define x86_read_percpu(var) \
+ ({ \
+ typeof(per_cpu_var(var)) __tmp; \
+ preempt_disable(); \
+ __tmp = __get_cpu_var(var); \
+ preempt_enable(); \
+ __tmp; \
+ })
+
+#define x86_write_percpu(var, val) \
+ do { \
+ preempt_disable(); \
+ __get_cpu_var(var) = (val); \
+ preempt_enable(); \
+ } while(0)
+
#else /* CONFIG_X86_64 */
#ifdef __ASSEMBLY__