aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-05-18 09:17:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-05-18 09:17:37 -0700
commit13bba6fda98fe03a955665c9d4bf63c8fd9c19c0 (patch)
treef0058f8b307eab6da6106cda02b4edd0245977e3 /arch/x86/include
parent0130b2d7010fe8e046b7a6c44911a1d3d0d16c96 (diff)
parentb4ecc126991b30fe5f9a59dfacda046aeac124b2 (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: Fix performance regression caused by paravirt_ops on native kernels xen: use header for EXPORT_SYMBOL_GPL x86, 32-bit: fix kernel_trap_sp() x86: fix percpu_{to,from}_op() x86: mtrr: Fix high_width computation when phys-addr is >= 44bit x86: Fix false positive section mismatch warnings in the apic code
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/percpu.h10
-rw-r--r--arch/x86/include/asm/ptrace.h7
-rw-r--r--arch/x86/include/asm/spinlock.h4
4 files changed, 12 insertions, 11 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 378e3691c08..a53da004e08 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64);
#define paravirt_nop ((void *)_paravirt_nop)
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
{
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index aee103b26d0..02ecb30982a 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -82,22 +82,22 @@ do { \
case 1: \
asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)val)); \
+ : "qi" ((T__)(val))); \
break; \
case 2: \
asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)val)); \
+ : "ri" ((T__)(val))); \
break; \
case 4: \
asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)val)); \
+ : "ri" ((T__)(val))); \
break; \
case 8: \
asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
- : "re" ((T__)val)); \
+ : "re" ((T__)(val))); \
break; \
default: __bad_percpu_size(); \
} \
@@ -109,7 +109,7 @@ do { \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=q" (ret__) \
: "m" (var)); \
break; \
case 2: \
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index e304b66abee..624f133943e 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs)
/*
* X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps. So regs will be the current sp.
+ * when it traps. The previous stack will be directly underneath the saved
+ * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
*
* This is valid only for kernel mode traps.
*/
-static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
- return (unsigned long)regs;
+ return (unsigned long)(&regs->sp);
#else
return regs->sp;
#endif
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index e5e6caffec8..b7e5db87639 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
-#ifndef CONFIG_PARAVIRT
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
@@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
__raw_spin_lock(lock);
}
-#endif
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{