diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-28 08:29:59 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-28 08:29:59 -0400 |
commit | 185a257f2f73bcd89050ad02da5bedbc28fc43fa (patch) | |
tree | 5e32586114534ed3f2165614cba3d578f5d87307 /include/asm-i386/spinlock.h | |
parent | 3f1a9aaeffd8d1cbc5ab9776c45cbd66af1c9699 (diff) | |
parent | a77c64c1a641950626181b4857abb701d8f38ccc (diff) |
Merge branch 'master' into gfs2
Diffstat (limited to 'include/asm-i386/spinlock.h')
-rw-r--r-- | include/asm-i386/spinlock.h | 134 |
1 files changed, 70 insertions, 64 deletions
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index d1020363c41..b0b3043f05e 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -4,8 +4,12 @@ #include <asm/atomic.h> #include <asm/rwlock.h> #include <asm/page.h> +#include <asm/processor.h> #include <linux/compiler.h> +#define CLI_STRING "cli" +#define STI_STRING "sti" + /* * Your basic SMP spinlocks, allowing only a single CPU anywhere * @@ -17,67 +21,64 @@ * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) \ - (*(volatile signed char *)(&(x)->slock) <= 0) - -#define __raw_spin_lock_string \ - "\n1:\t" \ - LOCK_PREFIX " ; decb %0\n\t" \ - "jns 3f\n" \ - "2:\t" \ - "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - "3:\n\t" - -/* - * NOTE: there's an irqs-on section here, which normally would have to be - * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use - * __raw_spin_lock_string_flags(). - */ -#define __raw_spin_lock_string_flags \ - "\n1:\t" \ - LOCK_PREFIX " ; decb %0\n\t" \ - "jns 5f\n" \ - "2:\t" \ - "testl $0x200, %1\n\t" \ - "jz 4f\n\t" \ - "sti\n" \ - "3:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jle 3b\n\t" \ - "cli\n\t" \ - "jmp 1b\n" \ - "4:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jg 1b\n\t" \ - "jmp 4b\n" \ - "5:\n\t" +static inline int __raw_spin_is_locked(raw_spinlock_t *x) +{ + return *(volatile signed char *)(&(x)->slock) <= 0; +} static inline void __raw_spin_lock(raw_spinlock_t *lock) { - asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); + asm volatile("\n1:\t" + LOCK_PREFIX " ; decb %0\n\t" + "jns 3f\n" + "2:\t" + "rep;nop\n\t" + "cmpb $0,%0\n\t" + "jle 2b\n\t" + "jmp 1b\n" + "3:\n\t" + : "+m" (lock->slock) : : "memory"); } /* * It is easier for the lock validator if interrupts are not re-enabled * in the middle of a lock-acquire. This is a performance feature anyway * so we turn it off: + * + * NOTE: there's an irqs-on section here, which normally would have to be + * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. */ #ifndef CONFIG_PROVE_LOCKING static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { - asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); + asm volatile( + "\n1:\t" + LOCK_PREFIX " ; decb %0\n\t" + "jns 5f\n" + "2:\t" + "testl $0x200, %1\n\t" + "jz 4f\n\t" + STI_STRING "\n" + "3:\t" + "rep;nop\n\t" + "cmpb $0, %0\n\t" + "jle 3b\n\t" + CLI_STRING "\n\t" + "jmp 1b\n" + "4:\t" + "rep;nop\n\t" + "cmpb $0, %0\n\t" + "jg 1b\n\t" + "jmp 4b\n" + "5:\n\t" + : "+m" (lock->slock) : "r" (flags) : "memory"); } #endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval; - __asm__ __volatile__( + asm volatile( "xchgb %b0,%1" :"=q" (oldval), "+m" (lock->slock) :"0" (0) : "memory"); @@ -93,38 +94,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) -#define __raw_spin_unlock_string \ - "movb $1,%0" \ - :"+m" (lock->slock) : : "memory" - - static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - __raw_spin_unlock_string - ); + asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); } #else -#define __raw_spin_unlock_string \ - "xchgb %b0, %1" \ - :"=q" (oldval), "+m" (lock->slock) \ - :"0" (oldval) : "memory" - static inline void __raw_spin_unlock(raw_spinlock_t *lock) { char oldval = 1; - __asm__ __volatile__( - __raw_spin_unlock_string - ); + asm volatile("xchgb %b0, %1" + : "=q" (oldval), "+m" (lock->slock) + : "0" (oldval) : "memory"); } #endif -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (__raw_spin_is_locked(lock)) + cpu_relax(); +} /* * Read-write spinlocks, allowing multiple readers @@ -151,22 +143,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +static inline int __raw_read_can_lock(raw_rwlock_t *x) +{ + return (int)(x)->lock > 0; +} /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +static inline int __raw_write_can_lock(raw_rwlock_t *x) +{ + return (x)->lock == RW_LOCK_BIAS; +} static inline void __raw_read_lock(raw_rwlock_t *rw) { - __build_read_lock(rw, "__read_lock_failed"); + asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" + "jns 1f\n" + "call __read_lock_failed\n\t" + "1:\n" + ::"a" (rw) : "memory"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { - __build_write_lock(rw, "__write_lock_failed"); + asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" + "jz 1f\n" + "call __write_lock_failed\n\t" + "1:\n" + ::"a" (rw) : "memory"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) |