aboutsummaryrefslogtreecommitdiff
path: root/arch/alpha/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-24 15:13:52 +0900
committerTejun Heo <tj@kernel.org>2009-06-24 15:13:52 +0900
commit9b7dbc7dc0365a943af2d73b1376a6f0aac5dc0d (patch)
tree324bef0a494e20f415b8fb892fe9240fba638156 /arch/alpha/include
parent6088464cf1ae9fb3d2ccc0ec5feb3f5b971098d8 (diff)
alpha: switch to dynamic percpu allocator
Alpha implements custom SHIFT_PERCPU_PTR for modules because percpu area can be located far away from the 4G area where the module text is located. The custom SHIFT_PERCPU_PTR forces GOT usage using ldq instruction with literal relocation; however, the relocation can't be used with dynamically allocated percpu variables. Fortunately, similar result can be achieved by using weak percpu variable definitions. This patch makes alpha use weak definitions and switch to dynamic percpu allocator. asm/tlbflush.h was getting linux/sched.h via asm/percpu.h which no longer needs it. Include linux/sched.h directly in asm/tlbflush.h. Compile tested. Generation of litereal relocation verified. This patch is based on Ivan Kokshaysky's alpha percpu patch. [ Impact: use dynamic percpu allocator ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'arch/alpha/include')
-rw-r--r--arch/alpha/include/asm/percpu.h95
-rw-r--r--arch/alpha/include/asm/tlbflush.h1
2 files changed, 9 insertions, 87 deletions
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 7f0a9c4f2fd..2c12378e3aa 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -1,97 +1,18 @@
#ifndef __ALPHA_PERCPU_H
#define __ALPHA_PERCPU_H
-#include <linux/compiler.h>
-#include <linux/threads.h>
-#include <linux/percpu-defs.h>
-
/*
- * Determine the real variable name from the name visible in the
- * kernel sources.
- */
-#define per_cpu_var(var) per_cpu__##var
-
-#ifdef CONFIG_SMP
-
-/*
- * per_cpu_offset() is the offset that has to be added to a
- * percpu variable to get to the instance for a certain processor.
- */
-extern unsigned long __per_cpu_offset[NR_CPUS];
-
-#define per_cpu_offset(x) (__per_cpu_offset[x])
-
-#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
-#ifdef CONFIG_DEBUG_PREEMPT
-#define my_cpu_offset per_cpu_offset(smp_processor_id())
-#else
-#define my_cpu_offset __my_cpu_offset
-#endif
-
-#ifndef MODULE
-#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
-#else
-/*
- * To calculate addresses of locally defined variables, GCC uses 32-bit
- * displacement from the GP. Which doesn't work for per cpu variables in
- * modules, as an offset to the kernel per cpu area is way above 4G.
+ * To calculate addresses of locally defined variables, GCC uses
+ * 32-bit displacement from the GP. Which doesn't work for per cpu
+ * variables in modules, as an offset to the kernel per cpu area is
+ * way above 4G.
*
- * This forces allocation of a GOT entry for per cpu variable using
- * ldq instruction with a 'literal' relocation.
- */
-#define SHIFT_PERCPU_PTR(var, offset) ({ \
- extern int simple_identifier_##var(void); \
- unsigned long __ptr, tmp_gp; \
- asm ( "br %1, 1f \n\
- 1: ldgp %1, 0(%1) \n\
- ldq %0, per_cpu__" #var"(%1)\t!literal" \
- : "=&r"(__ptr), "=&r"(tmp_gp)); \
- (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
-
-#endif /* MODULE */
-
-/*
- * A percpu variable may point to a discarded regions. The following are
- * established ways to produce a usable pointer from the percpu variable
- * offset.
+ * Always use weak definitions for percpu variables in modules.
*/
-#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
-#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
-#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
-
-#else /* ! SMP */
-
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
-#define __get_cpu_var(var) per_cpu_var(var)
-#define __raw_get_cpu_var(var) per_cpu_var(var)
-
-#endif /* SMP */
-
-#ifdef CONFIG_SMP
-#define PER_CPU_BASE_SECTION ".data.percpu"
-#else
-#define PER_CPU_BASE_SECTION ".data"
-#endif
-
-#ifdef CONFIG_SMP
-
-#ifdef MODULE
-#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#else
-#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
-#endif
-#define PER_CPU_FIRST_SECTION ".first"
-
-#else
-
-#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_FIRST_SECTION ""
-
+#if defined(MODULE) && defined(CONFIG_SMP)
+#define ARCH_NEEDS_WEAK_PER_CPU
#endif
-#define PER_CPU_ATTRIBUTES
+#include <asm-generic/percpu.h>
#endif /* __ALPHA_PERCPU_H */
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index 9d87aaa08c0..e89e0c2e15b 100644
--- a/arch/alpha/include/asm/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
@@ -2,6 +2,7 @@
#define _ALPHA_TLBFLUSH_H
#include <linux/mm.h>
+#include <linux/sched.h>
#include <asm/compiler.h>
#include <asm/pgalloc.h>