aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-02 19:49:50 +0100
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 23:55:32 +0100
commit445c89514be242b1b0080056d50bdc1b72adeb5c (patch)
tree96ed062794ad0fb6a649713c83f009eea382e8b2 /include
parent6b6b4792f89346e47437682c7ba3438e6681c0f9 (diff)
locking: Convert raw_spinlock to arch_spinlock
The raw_spin* namespace was taken by lockdep for the architecture specific implementations. raw_spin_* would be the ideal name space for the spinlocks which are not converted to sleeping locks in preempt-rt. Linus suggested to convert the raw_ to arch_ locks and cleanup the name space instead of using an artifical name like core_spin, atomic_spin or whatever No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bitops/atomic.h6
-rw-r--r--include/linux/spinlock.h4
-rw-r--r--include/linux/spinlock_types.h2
-rw-r--r--include/linux/spinlock_types_up.h4
-rw-r--r--include/linux/spinlock_up.h8
5 files changed, 12 insertions, 12 deletions
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63..dcf0afad4a7 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,18 +15,18 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
__raw_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
__raw_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a9aaa709fb9..5ef7a4c060b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,7 +8,7 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
@@ -75,7 +75,7 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and raw_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index f979d5d8a16..d4af2d7a86e 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -18,7 +18,7 @@
#include <linux/lockdep.h>
typedef struct {
- raw_spinlock_t raw_lock;
+ arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198..34d36691c4e 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,13 +16,13 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
#else
-typedef struct { } raw_spinlock_t;
+typedef struct { } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { }
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215..8ee2ac1bf63 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -20,19 +20,19 @@
#ifdef CONFIG_DEBUG_SPINLOCK
#define __raw_spin_is_locked(x) ((x)->slock == 0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return oldval > 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = 1;
}