aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 13:30:33 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:30:33 +0100
commit2fed0c507cf0101d511366f36e8573f403dbfea5 (patch)
treed73332fca3b4a515755ad10b313780ca54f84fc1 /include
parent6abcd98ffafbff81f0bfd7ee1d129e634af13245 (diff)
x86: consolidate spinlock.h
The cli and sti instructions need to be replaced by paravirt hooks. For the i386 architecture, this is already done. The code requirements aren't much different from x86_64 POV, so this part is consolidated in the common header Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Acked-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/spinlock.h14
-rw-r--r--include/asm-x86/spinlock_32.h71
-rw-r--r--include/asm-x86/spinlock_64.h37
3 files changed, 64 insertions, 58 deletions
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index d74d85e71dc..e1d555a3dfe 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -1,5 +1,19 @@
+#ifndef _X86_SPINLOCK_H_
+#define _X86_SPINLOCK_H_
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define CLI_STRING "cli"
+#define STI_STRING "sti"
+#define CLI_STI_CLOBBERS
+#define CLI_STI_INPUT_ARGS
+#endif /* CONFIG_PARAVIRT */
+
#ifdef CONFIG_X86_32
# include "spinlock_32.h"
#else
# include "spinlock_64.h"
#endif
+
+#endif
diff --git a/include/asm-x86/spinlock_32.h b/include/asm-x86/spinlock_32.h
index d3bcebed60c..c42c3f12d7c 100644
--- a/include/asm-x86/spinlock_32.h
+++ b/include/asm-x86/spinlock_32.h
@@ -5,16 +5,6 @@
#include <asm/rwlock.h>
#include <asm/page.h>
#include <asm/processor.h>
-#include <linux/compiler.h>
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-#define CLI_STRING "cli"
-#define STI_STRING "sti"
-#define CLI_STI_CLOBBERS
-#define CLI_STI_INPUT_ARGS
-#endif /* CONFIG_PARAVIRT */
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -27,23 +17,24 @@
* (the type definitions are in asm/spinlock_types.h)
*/
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
- return *(volatile signed char *)(&(x)->slock) <= 0;
+ return *(volatile signed char *)(&(lock)->slock) <= 0;
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
- asm volatile("\n1:\t"
- LOCK_PREFIX " ; decb %0\n\t"
- "jns 3f\n"
- "2:\t"
- "rep;nop\n\t"
- "cmpb $0,%0\n\t"
- "jle 2b\n\t"
- "jmp 1b\n"
- "3:\n\t"
- : "+m" (lock->slock) : : "memory");
+ asm volatile(
+ "\n1:\t"
+ LOCK_PREFIX " ; decb %0\n\t"
+ "jns 3f\n"
+ "2:\t"
+ "rep;nop\n\t"
+ "cmpb $0,%0\n\t"
+ "jle 2b\n\t"
+ "jmp 1b\n"
+ "3:\n\t"
+ : "+m" (lock->slock) : : "memory");
}
/*
@@ -55,7 +46,8 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
* irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
*/
#ifndef CONFIG_PROVE_LOCKING
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+ unsigned long flags)
{
asm volatile(
"\n1:\t"
@@ -79,18 +71,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
"5:\n\t"
: [slock] "+m" (lock->slock)
: [flags] "r" (flags)
- CLI_STI_INPUT_ARGS
+ CLI_STI_INPUT_ARGS
: "memory" CLI_STI_CLOBBERS);
}
#endif
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
- char oldval;
+ signed char oldval;
+
asm volatile(
"xchgb %b0,%1"
:"=q" (oldval), "+m" (lock->slock)
:"0" (0) : "memory");
+
return oldval > 0;
}
@@ -112,7 +106,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
- char oldval = 1;
+ unsigned char oldval = 1;
asm volatile("xchgb %b0, %1"
: "=q" (oldval), "+m" (lock->slock)
@@ -139,31 +133,16 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
*
* On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
- *
- * The inline assembly is non-obvious. Think about it.
- *
- * Changed to use the same technique as rw semaphores. See
- * semaphore.h for details. -ben
- *
- * the helpers are in arch/i386/kernel/semaphore.c
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int __raw_read_can_lock(raw_rwlock_t *lock)
{
- return (int)(x)->lock > 0;
+ return (int)(lock)->lock > 0;
}
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int __raw_write_can_lock(raw_rwlock_t *lock)
{
- return (x)->lock == RW_LOCK_BIAS;
+ return (lock)->lock == RW_LOCK_BIAS;
}
static inline void __raw_read_lock(raw_rwlock_t *rw)
@@ -187,6 +166,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
+
atomic_dec(count);
if (atomic_read(count) >= 0)
return 1;
@@ -197,6 +177,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
+
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
return 1;
atomic_add(RW_LOCK_BIAS, count);
diff --git a/include/asm-x86/spinlock_64.h b/include/asm-x86/spinlock_64.h
index 88bf981e73c..3b5adf92ad0 100644
--- a/include/asm-x86/spinlock_64.h
+++ b/include/asm-x86/spinlock_64.h
@@ -33,14 +33,21 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
"cmpl $0,%0\n\t"
"jle 3b\n\t"
"jmp 1b\n"
- "2:\t" : "=m" (lock->slock) : : "memory");
+ "2:\t"
+ : "=m" (lock->slock) : : "memory");
}
/*
- * Same as __raw_spin_lock, but reenable interrupts during spinning.
+ * It is easier for the lock validator if interrupts are not re-enabled
+ * in the middle of a lock-acquire. This is a performance feature anyway
+ * so we turn it off:
+ *
+ * NOTE: there's an irqs-on section here, which normally would have to be
+ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
*/
#ifndef CONFIG_PROVE_LOCKING
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+ unsigned long flags)
{
asm volatile(
"\n1:\t"
@@ -48,12 +55,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
"jns 5f\n"
"testl $0x200, %1\n\t" /* interrupts were disabled? */
"jz 4f\n\t"
- "sti\n"
+ STI_STRING "\n"
"3:\t"
"rep;nop\n\t"
"cmpl $0, %0\n\t"
"jle 3b\n\t"
- "cli\n\t"
+ CLI_STRING "\n\t"
"jmp 1b\n"
"4:\t"
"rep;nop\n\t"
@@ -61,7 +68,9 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
"jg 1b\n\t"
"jmp 4b\n"
"5:\n\t"
- : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
+ : "+m" (lock->slock)
+ : "r" ((unsigned)flags) CLI_STI_INPUT_ARGS
+ : "memory" CLI_STI_CLOBBERS);
}
#endif
@@ -79,7 +88,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
- asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory");
+ asm volatile("movl $1,%0" : "=m" (lock->slock) :: "memory");
}
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
@@ -114,18 +123,18 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t"
+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
- "call __read_lock_failed\n"
+ "call __read_lock_failed\n\t"
"1:\n"
::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
}
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t"
+ asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
- "\tcall __write_lock_failed\n\t"
+ "call __write_lock_failed\n\t"
"1:\n"
::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
}
@@ -133,6 +142,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
+
atomic_dec(count);
if (atomic_read(count) >= 0)
return 1;
@@ -143,6 +153,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
+
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
return 1;
atomic_add(RW_LOCK_BIAS, count);
@@ -151,12 +162,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
+ asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",%0"
: "=m" (rw->lock) : : "memory");
}