diff options
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/atomic.h | 21 | ||||
-rw-r--r-- | include/asm-i386/mach-default/mach_reboot.h | 2 | ||||
-rw-r--r-- | include/asm-i386/processor.h | 4 | ||||
-rw-r--r-- | include/asm-i386/signal.h | 31 | ||||
-rw-r--r-- | include/asm-i386/system.h | 42 |
5 files changed, 91 insertions, 9 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 509720be772..c68557aa04b 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -215,6 +215,27 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) return atomic_add_return(-i,v); } +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h index 06ae4d81ba6..a955e57ad01 100644 --- a/include/asm-i386/mach-default/mach_reboot.h +++ b/include/asm-i386/mach-default/mach_reboot.h @@ -19,7 +19,7 @@ static inline void kb_wait(void) static inline void mach_reboot(void) { int i; - for (i = 0; i < 100; i++) { + for (i = 0; i < 10; i++) { kb_wait(); udelay(50); outb(0x60, 0x64); /* write Controller Command Byte */ diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 8c02b031870..5c96cf6dcb3 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -65,7 +65,9 @@ struct cpuinfo_x86 { int f00f_bug; int coma_bug; unsigned long loops_per_jiffy; - unsigned char x86_num_cores; + unsigned char x86_max_cores; /* cpuid returned max cores value */ + unsigned char booted_cores; /* number of cores as seen by OS */ + unsigned char apicid; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index cbb47d34aa3..76524b4052a 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -159,14 +159,37 @@ typedef struct sigaltstack { #define __HAVE_ARCH_SIG_BITOPS -static __inline__ void sigaddset(sigset_t *set, int _sig) +#define sigaddset(set,sig) \ + (__builtin_constant_p(sig) ? \ + __const_sigaddset((set),(sig)) : \ + __gen_sigaddset((set),(sig))) + +static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) { - __asm__("btsl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); + __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } -static __inline__ void sigdelset(sigset_t *set, int _sig) +static __inline__ void __const_sigaddset(sigset_t *set, int _sig) { - __asm__("btrl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); + unsigned long sig = _sig - 1; + set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); +} + +#define sigdelset(set,sig) \ + (__builtin_constant_p(sig) ? \ + __const_sigdelset((set),(sig)) : \ + __gen_sigdelset((set),(sig))) + + +static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) +{ + __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); +} + +static __inline__ void __const_sigdelset(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); } static __inline__ int __const_sigismember(sigset_t *set, int _sig) diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 97d52ac49e4..772f85da120 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -263,6 +263,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 +#define cmpxchg(ptr,o,n)\ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ + (unsigned long)(n),sizeof(*(ptr)))) +#endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) @@ -291,10 +295,42 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return old; } -#define cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) +#ifndef CONFIG_X86_CMPXCHG +/* + * Building a kernel capable running on 80386. It may be necessary to + * simulate the cmpxchg on the 80386 CPU. For that purpose we define + * a function for each of the sizes we support. + */ +extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); +extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); +extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); + +static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + return cmpxchg_386_u8(ptr, old, new); + case 2: + return cmpxchg_386_u16(ptr, old, new); + case 4: + return cmpxchg_386_u32(ptr, old, new); + } + return old; +} + +#define cmpxchg(ptr,o,n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + if (likely(boot_cpu_data.x86 > 3)) \ + __ret = __cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + else \ + __ret = cmpxchg_386((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ +}) #endif #ifdef CONFIG_X86_CMPXCHG64 |