diff options
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/eeh.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/futex.h | 28 | ||||
-rw-r--r-- | include/asm-powerpc/io.h | 43 | ||||
-rw-r--r-- | include/asm-powerpc/kdump.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/paca.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/spinlock.h | 17 |
6 files changed, 77 insertions, 17 deletions
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h index 4df3e80118f..6a784396660 100644 --- a/include/asm-powerpc/eeh.h +++ b/include/asm-powerpc/eeh.h @@ -205,6 +205,7 @@ static inline void eeh_memset_io(volatile void __iomem *addr, int c, lc |= lc << 8; lc |= lc << 16; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && !EEH_CHECK_ALIGN(p, 4)) { *((volatile u8 *)p) = c; p++; @@ -229,6 +230,7 @@ static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *sr void *destsave = dest; unsigned long nsave = n; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { *((u8 *)dest) = *((volatile u8 *)vsrc); __asm__ __volatile__ ("eieio" : : : "memory"); @@ -266,6 +268,7 @@ static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, { void *vdest = (void __force *) dest; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { *((volatile u8 *)vdest) = *((u8 *)src); src++; diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index f1b3c00bc1c..936422e5489 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h @@ -84,7 +84,33 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - return -ENOSYS; + int prev; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + __asm__ __volatile__ ( + LWSYNC_ON_SMP +"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ + cmpw 0,%0,%3\n\ + bne- 3f\n" + PPC405_ERR77(0,%2) +"2: stwcx. %4,0,%2\n\ + bne- 1b\n" + ISYNC_ON_SMP +"3: .section .fixup,\"ax\"\n\ +4: li %0,%5\n\ + b 3b\n\ + .previous\n\ + .section __ex_table,\"a\"\n\ + .align 3\n\ + " PPC_LONG "1b,4b,2b,4b\n\ + .previous" \ + : "=&r" (prev), "+m" (*uaddr) + : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) + : "cc", "memory"); + + return prev; } #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 36c4c34bf56..212428db0d8 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -19,6 +19,7 @@ extern int check_legacy_ioport(unsigned long base_port); #include <linux/compiler.h> #include <asm/page.h> #include <asm/byteorder.h> +#include <asm/paca.h> #ifdef CONFIG_PPC_ISERIES #include <asm/iseries/iseries_io.h> #endif @@ -162,7 +163,11 @@ extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns); extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); -#define mmiowb() +static inline void mmiowb(void) +{ + __asm__ __volatile__ ("sync" : : : "memory"); + get_paca()->io_sync = 0; +} /* * output pause versions need a delay at least for the @@ -278,22 +283,23 @@ static inline int in_8(const volatile unsigned char __iomem *addr) { int ret; - __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lbz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_8(volatile unsigned char __iomem *addr, int val) { - __asm__ __volatile__("stb%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; stb%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline int in_le16(const volatile unsigned short __iomem *addr) { int ret; - __asm__ __volatile__("lhbrx %0,0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lhbrx %0,0,%1; twi 0,%0,0; isync" : "=r" (ret) : "r" (addr), "m" (*addr)); return ret; } @@ -302,28 +308,30 @@ static inline int in_be16(const volatile unsigned short __iomem *addr) { int ret; - __asm__ __volatile__("lhz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lhz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_le16(volatile unsigned short __iomem *addr, int val) { - __asm__ __volatile__("sthbrx %1,0,%2; sync" + __asm__ __volatile__("sync; sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); + get_paca()->io_sync = 1; } static inline void out_be16(volatile unsigned short __iomem *addr, int val) { - __asm__ __volatile__("sth%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline unsigned in_le32(const volatile unsigned __iomem *addr) { unsigned ret; - __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lwbrx %0,0,%1; twi 0,%0,0; isync" : "=r" (ret) : "r" (addr), "m" (*addr)); return ret; } @@ -332,21 +340,23 @@ static inline unsigned in_be32(const volatile unsigned __iomem *addr) { unsigned ret; - __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lwz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_le32(volatile unsigned __iomem *addr, int val) { - __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr) + __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); + get_paca()->io_sync = 1; } static inline void out_be32(volatile unsigned __iomem *addr, int val) { - __asm__ __volatile__("stw%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) @@ -354,6 +364,7 @@ static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) unsigned long tmp, ret; __asm__ __volatile__( + "sync\n" "ld %1,0(%2)\n" "twi 0,%1,0\n" "isync\n" @@ -372,7 +383,7 @@ static inline unsigned long in_be64(const volatile unsigned long __iomem *addr) { unsigned long ret; - __asm__ __volatile__("ld%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; ld%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } @@ -389,14 +400,16 @@ static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long "rldicl %1,%1,32,0\n" "rlwimi %0,%1,8,8,31\n" "rlwimi %0,%1,24,16,23\n" - "std %0,0(%3)\n" - "sync" + "sync\n" + "std %0,0(%3)" : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); + get_paca()->io_sync = 1; } static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val) { - __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val)); + __asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } #ifndef CONFIG_PPC_ISERIES diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h index dc1574c945f..10e8eb1e6f4 100644 --- a/include/asm-powerpc/kdump.h +++ b/include/asm-powerpc/kdump.h @@ -7,7 +7,7 @@ /* How many bytes to reserve at zero for kdump. The reserve limit should * be greater or equal to the trampoline's end address. * Reserve to the end of the FWNMI area, see head_64.S */ -#define KDUMP_RESERVE_LIMIT 0x8000 +#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */ #ifdef CONFIG_CRASH_DUMP diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 2d4585f0620..3d5d590bc4b 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -93,6 +93,7 @@ struct paca_struct { u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_msr; /* MSR saved here by enter_rtas */ u8 proc_enabled; /* irq soft-enable flag */ + u8 io_sync; /* writel() needs spin_unlock sync */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 895cb6d3a42..c31e4382a77 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h @@ -36,6 +36,19 @@ #define LOCK_TOKEN 1 #endif +#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) +#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) +#define SYNC_IO do { \ + if (unlikely(get_paca()->io_sync)) { \ + mb(); \ + get_paca()->io_sync = 0; \ + } \ + } while (0) +#else +#define CLEAR_IO_SYNC +#define SYNC_IO +#endif + /* * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. @@ -61,6 +74,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) { + CLEAR_IO_SYNC; return __spin_trylock(lock) == 0; } @@ -91,6 +105,7 @@ extern void __rw_yield(raw_rwlock_t *lock); static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) { + CLEAR_IO_SYNC; while (1) { if (likely(__spin_trylock(lock) == 0)) break; @@ -107,6 +122,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long { unsigned long flags_dis; + CLEAR_IO_SYNC; while (1) { if (likely(__spin_trylock(lock) == 0)) break; @@ -124,6 +140,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) { + SYNC_IO; __asm__ __volatile__("# __raw_spin_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); lock->slock = 0; |