diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-09-30 23:27:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-01 00:39:22 -0700 |
commit | 3c1fcfe229e99752c74efb945a4a3f560be04204 (patch) | |
tree | 44085d9b599e06a92426141811a6f712beac17aa | |
parent | cdc39363d33506b0e067d41fc91f89d186bdf7f7 (diff) |
[PATCH] Directed yield: direct yield of spinlocks for s390.
Use the new diagnose 0x9c in the spinlock implementation for s390. It
yields the remaining timeslice of the virtual cpu that tries to acquire a
lock to the virtual cpu that is the current holder of the lock.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/s390/kernel/head31.S | 11 | ||||
-rw-r--r-- | arch/s390/kernel/head64.S | 11 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 62 | ||||
-rw-r--r-- | include/asm-s390/setup.h | 1 | ||||
-rw-r--r-- | include/asm-s390/spinlock.h | 33 | ||||
-rw-r--r-- | include/asm-s390/spinlock_types.h | 6 |
6 files changed, 88 insertions, 36 deletions
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 1fa9fa1ca74..1b952a3664e 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -254,6 +254,16 @@ startup_continue: oi 3(%r12),0x80 # set IDTE flag .Lchkidte: +# +# find out if the diag 0x9c is available +# + mvc __LC_PGM_NEW_PSW(8),.Lpcdiag9c-.LPG1(%r13) + stap __LC_CPUID+4 # store cpu address + lh %r1,__LC_CPUID+4 + diag %r1,0,0x9c # test diag 0x9c + oi 2(%r12),1 # set diag9c flag +.Lchkdiag9c: + lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space, # virtual and never return ... .align 8 @@ -281,6 +291,7 @@ startup_continue: .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg .Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte +.Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c .Lmemsize:.long memory_size .Lmchunk:.long memory_chunk .Lmflags:.long machine_flags diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 48998d50b00..b30e5897cdf 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -251,6 +251,17 @@ startup_continue: 0: # +# find out if the diag 0x9c is available +# + la %r1,0f-.LPG1(%r13) # set program check address + stg %r1,__LC_PGM_NEW_PSW+8 + stap __LC_CPUID+4 # store cpu address + lh %r1,__LC_CPUID+4 + diag %r1,0,0x9c # test diag 0x9c + oi 6(%r12),1 # set diag9c flag +0: + +# # find out if we have the MVCOS instruction # la %r1,0f-.LPG1(%r13) # set program check address diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index b9b7958a226..8d76403fcf8 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -24,57 +24,76 @@ static int __init spin_retry_setup(char *str) } __setup("spin_retry=", spin_retry_setup); -static inline void -_diag44(void) +static inline void _raw_yield(void) { -#ifdef CONFIG_64BIT if (MACHINE_HAS_DIAG44) -#endif asm volatile("diag 0,0,0x44"); } -void -_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc) +static inline void _raw_yield_cpu(int cpu) +{ + if (MACHINE_HAS_DIAG9C) + asm volatile("diag %0,0,0x9c" + : : "d" (__cpu_logical_map[cpu])); + else + _raw_yield(); +} + +void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc) { int count = spin_retry; + unsigned int cpu = ~smp_processor_id(); while (1) { if (count-- <= 0) { - _diag44(); + unsigned int owner = lp->owner_cpu; + if (owner != 0) + _raw_yield_cpu(~owner); count = spin_retry; } if (__raw_spin_is_locked(lp)) continue; - if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0) + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) { + lp->owner_pc = pc; return; + } } } EXPORT_SYMBOL(_raw_spin_lock_wait); -int -_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc) +int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc) { - int count = spin_retry; + unsigned int cpu = ~smp_processor_id(); + int count; - while (count-- > 0) { + for (count = spin_retry; count > 0; count--) { if (__raw_spin_is_locked(lp)) continue; - if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0) + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) { + lp->owner_pc = pc; return 1; + } } return 0; } EXPORT_SYMBOL(_raw_spin_trylock_retry); -void -_raw_read_lock_wait(raw_rwlock_t *rw) +void _raw_spin_relax(raw_spinlock_t *lock) +{ + unsigned int cpu = lock->owner_cpu; + if (cpu != 0) + _raw_yield_cpu(~cpu); +} +EXPORT_SYMBOL(_raw_spin_relax); + +void _raw_read_lock_wait(raw_rwlock_t *rw) { unsigned int old; int count = spin_retry; while (1) { if (count-- <= 0) { - _diag44(); + _raw_yield(); count = spin_retry; } if (!__raw_read_can_lock(rw)) @@ -86,8 +105,7 @@ _raw_read_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_lock_wait); -int -_raw_read_trylock_retry(raw_rwlock_t *rw) +int _raw_read_trylock_retry(raw_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -103,14 +121,13 @@ _raw_read_trylock_retry(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_trylock_retry); -void -_raw_write_lock_wait(raw_rwlock_t *rw) +void _raw_write_lock_wait(raw_rwlock_t *rw) { int count = spin_retry; while (1) { if (count-- <= 0) { - _diag44(); + _raw_yield(); count = spin_retry; } if (!__raw_write_can_lock(rw)) @@ -121,8 +138,7 @@ _raw_write_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); -int -_raw_write_trylock_retry(raw_rwlock_t *rw) +int _raw_write_trylock_retry(raw_rwlock_t *rw) { int count = spin_retry; diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h index f1959732b6f..5d72eda8a11 100644 --- a/include/asm-s390/setup.h +++ b/include/asm-s390/setup.h @@ -39,6 +39,7 @@ extern unsigned long machine_flags; #define MACHINE_IS_P390 (machine_flags & 4) #define MACHINE_HAS_MVPG (machine_flags & 16) #define MACHINE_HAS_IDTE (machine_flags & 128) +#define MACHINE_HAS_DIAG9C (machine_flags & 256) #ifndef __s390x__ #define MACHINE_HAS_IEEE (machine_flags & 2) diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 5f00feaf1be..6b78af16999 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h @@ -13,6 +13,8 @@ #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) +#include <linux/smp.h> + static inline int _raw_compare_and_swap(volatile unsigned int *lock, unsigned int old, unsigned int new) @@ -50,34 +52,46 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) ((x)->lock != 0) +#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) #define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) + do { while (__raw_spin_is_locked(lock)) \ + _raw_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc); -extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc); +extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc); +extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc); +extern void _raw_spin_relax(raw_spinlock_t *lock); static inline void __raw_spin_lock(raw_spinlock_t *lp) { unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); - - if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0)) - _raw_spin_lock_wait(lp, pc); + int old; + + old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); + if (likely(old == 0)) { + lp->owner_pc = pc; + return; + } + _raw_spin_lock_wait(lp, pc); } static inline int __raw_spin_trylock(raw_spinlock_t *lp) { unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); + int old; - if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0)) + old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); + if (likely(old == 0)) { + lp->owner_pc = pc; return 1; + } return _raw_spin_trylock_retry(lp, pc); } static inline void __raw_spin_unlock(raw_spinlock_t *lp) { - _raw_compare_and_swap(&lp->lock, lp->lock, 0); + lp->owner_pc = 0; + _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } /* @@ -154,7 +168,6 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return _raw_write_trylock_retry(rw); } -#define _raw_spin_relax(lock) cpu_relax() #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h index f79a2216204..b7ac13f7aa3 100644 --- a/include/asm-s390/spinlock_types.h +++ b/include/asm-s390/spinlock_types.h @@ -6,16 +6,16 @@ #endif typedef struct { - volatile unsigned int lock; + volatile unsigned int owner_cpu; + volatile unsigned int owner_pc; } __attribute__ ((aligned (4))) raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; - volatile unsigned int owner_pc; } raw_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } +#define __RAW_RW_LOCK_UNLOCKED { 0 } #endif |