aboutsummaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bug.h17
-rw-r--r--include/asm-generic/percpu.h97
-rw-r--r--include/asm-generic/tlb.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h1
4 files changed, 73 insertions, 43 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index d56fedbb457..2632328d864 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -31,14 +31,19 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
#endif
-#ifndef HAVE_ARCH_WARN_ON
+#ifndef __WARN
+#ifndef __ASSEMBLY__
+extern void warn_on_slowpath(const char *file, const int line);
+#define WANT_WARN_ON_SLOWPATH
+#endif
+#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
+#endif
+
+#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) { \
- printk("WARNING: at %s:%d %s()\n", __FILE__, \
- __LINE__, __FUNCTION__); \
- dump_stack(); \
- } \
+ if (unlikely(__ret_warn_on)) \
+ __WARN(); \
unlikely(__ret_warn_on); \
})
#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d85172e9ed4..4b8d31cda1a 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -3,54 +3,79 @@
#include <linux/compiler.h>
#include <linux/threads.h>
-#define __GENERIC_PER_CPU
+/*
+ * Determine the real variable name from the name visible in the
+ * kernel sources.
+ */
+#define per_cpu_var(var) per_cpu__##var
+
#ifdef CONFIG_SMP
+/*
+ * per_cpu_offset() is the offset that has to be added to a
+ * percpu variable to get to the instance for a certain processor.
+ *
+ * Most arches use the __per_cpu_offset array for those offsets but
+ * some arches have their own ways of determining the offset (x86_64, s390).
+ */
+#ifndef __per_cpu_offset
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu_offset(x) (__per_cpu_offset[x])
+#endif
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- __attribute__((__section__(".data.percpu.shared_aligned"))) \
- __typeof__(type) per_cpu__##name \
- ____cacheline_aligned_in_smp
-
-/* var is in discarded region: offset to particular copy we want */
-#define per_cpu(var, cpu) (*({ \
- extern int simple_identifier_##var(void); \
- RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
-#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
-#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id())
-
-/* A macro to avoid #include hell... */
-#define percpu_modcopy(pcpudst, src, size) \
-do { \
- unsigned int __i; \
- for_each_possible_cpu(__i) \
- memcpy((pcpudst)+__per_cpu_offset[__i], \
- (src), (size)); \
-} while (0)
-#else /* ! SMP */
+/*
+ * Determine the offset for the currently active processor.
+ * An arch may define __my_cpu_offset to provide a more effective
+ * means of obtaining the offset to the per cpu variables of the
+ * current processor.
+ */
+#ifndef __my_cpu_offset
+#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
+#define my_cpu_offset per_cpu_offset(smp_processor_id())
+#else
+#define my_cpu_offset __my_cpu_offset
+#endif
+
+/*
+ * Add a offset to a pointer but keep the pointer as is.
+ *
+ * Only S390 provides its own means of moving the pointer.
+ */
+#ifndef SHIFT_PERCPU_PTR
+#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
+#endif
-#define DEFINE_PER_CPU(type, name) \
- __typeof__(type) per_cpu__##name
+/*
+ * A percpu variable may point to a discarded regions. The following are
+ * established ways to produce a usable pointer from the percpu variable
+ * offset.
+ */
+#define per_cpu(var, cpu) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
+#define __get_cpu_var(var) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
+#define __raw_get_cpu_var(var) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
-#define __get_cpu_var(var) per_cpu__##var
-#define __raw_get_cpu_var(var) per_cpu__##var
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+extern void setup_per_cpu_areas(void);
+#endif
+
+#else /* ! SMP */
+
+#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
+#define __get_cpu_var(var) per_cpu_var(var)
+#define __raw_get_cpu_var(var) per_cpu_var(var)
#endif /* SMP */
-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+#ifndef PER_CPU_ATTRIBUTES
+#define PER_CPU_ATTRIBUTES
+#endif
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
+ __typeof__(type) per_cpu_var(name)
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 75f2bfab614..6ce9f3ab928 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -15,7 +15,6 @@
#include <linux/swap.h>
#include <linux/quicklist.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/*
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 76df771be58..f784d2f3414 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -184,6 +184,7 @@
VMLINUX_SYMBOL(__start___param) = .; \
*(__param) \
VMLINUX_SYMBOL(__stop___param) = .; \
+ . = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
} \
. = ALIGN((align));