aboutsummaryrefslogtreecommitdiff
path: root/include/asm-powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/irq.h2
-rw-r--r--include/asm-powerpc/processor.h6
-rw-r--r--include/asm-powerpc/rio.h18
-rw-r--r--include/asm-powerpc/system.h24
-rw-r--r--include/asm-powerpc/unaligned.h11
5 files changed, 40 insertions, 21 deletions
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index b5c03127a9b..5089deb8fec 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -619,8 +619,6 @@ struct pt_regs;
#define __ARCH_HAS_DO_SOFTIRQ
-extern void __do_softirq(void);
-
#ifdef CONFIG_IRQSTACKS
/*
* Per-cpu stacks for handling hard and soft interrupts.
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index fd98ca998b4..cf83f2d7e2a 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -138,6 +138,8 @@ typedef struct {
struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
+ unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
+
#ifdef CONFIG_PPC64
unsigned long ksp_vsid;
#endif
@@ -182,11 +184,14 @@ struct thread_struct {
#define ARCH_MIN_TASKALIGN 16
#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
+#define INIT_SP_LIMIT \
+ (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
#ifdef CONFIG_PPC32
#define INIT_THREAD { \
.ksp = INIT_SP, \
+ .ksp_limit = INIT_SP_LIMIT, \
.fs = KERNEL_DS, \
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
@@ -194,6 +199,7 @@ struct thread_struct {
#else
#define INIT_THREAD { \
.ksp = INIT_SP, \
+ .ksp_limit = INIT_SP_LIMIT, \
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.fs = KERNEL_DS, \
.fpr = {0}, \
diff --git a/include/asm-powerpc/rio.h b/include/asm-powerpc/rio.h
new file mode 100644
index 00000000000..0018bf80cb2
--- /dev/null
+++ b/include/asm-powerpc/rio.h
@@ -0,0 +1,18 @@
+/*
+ * RapidIO architecture support
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ASM_PPC_RIO_H
+#define ASM_PPC_RIO_H
+
+extern void platform_rio_init(void);
+
+#endif /* ASM_PPC_RIO_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index fab1674b31b..2b6559a6d11 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -204,7 +204,7 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */
* Changes the memory location '*ptr' to be val and returns
* the previous value stored there.
*/
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg_u32(volatile void *p, unsigned long val)
{
unsigned long prev;
@@ -229,7 +229,7 @@ __xchg_u32(volatile void *p, unsigned long val)
* Changes the memory location '*ptr' to be val and returns
* the previous value stored there.
*/
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg_u32_local(volatile void *p, unsigned long val)
{
unsigned long prev;
@@ -247,7 +247,7 @@ __xchg_u32_local(volatile void *p, unsigned long val)
}
#ifdef CONFIG_PPC64
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg_u64(volatile void *p, unsigned long val)
{
unsigned long prev;
@@ -266,7 +266,7 @@ __xchg_u64(volatile void *p, unsigned long val)
return prev;
}
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg_u64_local(volatile void *p, unsigned long val)
{
unsigned long prev;
@@ -290,7 +290,7 @@ __xchg_u64_local(volatile void *p, unsigned long val)
*/
extern void __xchg_called_with_bad_pointer(void);
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg(volatile void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
@@ -305,7 +305,7 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int size)
return x;
}
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
@@ -338,7 +338,7 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
*/
#define __HAVE_ARCH_CMPXCHG 1
-static __inline__ unsigned long
+static __always_inline unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
{
unsigned int prev;
@@ -361,7 +361,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
return prev;
}
-static __inline__ unsigned long
+static __always_inline unsigned long
__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
unsigned long new)
{
@@ -384,7 +384,7 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
}
#ifdef CONFIG_PPC64
-static __inline__ unsigned long
+static __always_inline unsigned long
__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
{
unsigned long prev;
@@ -406,7 +406,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
return prev;
}
-static __inline__ unsigned long
+static __always_inline unsigned long
__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
unsigned long new)
{
@@ -432,7 +432,7 @@ __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
-static __inline__ unsigned long
+static __always_inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
@@ -448,7 +448,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
return old;
}
-static __inline__ unsigned long
+static __always_inline unsigned long
__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
diff --git a/include/asm-powerpc/unaligned.h b/include/asm-powerpc/unaligned.h
index 6c95dfa2652..5f1b1e3c213 100644
--- a/include/asm-powerpc/unaligned.h
+++ b/include/asm-powerpc/unaligned.h
@@ -5,15 +5,12 @@
/*
* The PowerPC can do unaligned accesses itself in big endian mode.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
*/
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
-#define get_unaligned(ptr) (*(ptr))
-
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define get_unaligned __get_unaligned_be
+#define put_unaligned __put_unaligned_be
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UNALIGNED_H */