diff options
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 3 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 11 | ||||
-rw-r--r-- | arch/i386/kernel/sysenter.c | 12 | ||||
-rw-r--r-- | arch/i386/power/cpu.c | 6 |
5 files changed, 34 insertions, 8 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index b9954248d0a..d58e169fbdb 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -432,6 +432,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_MCE mcheck_init(c); #endif + if (c == &boot_cpu_data) + sysenter_setup(); + enable_sep_cpu(); } #ifdef CONFIG_X86_HT diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 35f521612b2..cec4bde6716 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -495,6 +495,16 @@ struct call_data_struct { int wait; }; +void lock_ipi_call_lock(void) +{ + spin_lock_irq(&call_lock); +} + +void unlock_ipi_call_lock(void) +{ + spin_unlock_irq(&call_lock); +} + static struct call_data_struct * call_data; /* diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index ad74a46e9ef..c5517f33230 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -449,7 +449,18 @@ static void __init start_secondary(void *unused) * the local TLBs too. */ local_flush_tlb(); + + /* + * We need to hold call_lock, so there is no inconsistency + * between the time smp_call_function() determines number of + * IPI receipients, and the time when the determination is made + * for which cpus receive the IPI. Holding this + * lock helps us to not include this cpu in a currently in progress + * smp_call_function(). + */ + lock_ipi_call_lock(); cpu_set(smp_processor_id(), cpu_online_map); + unlock_ipi_call_lock(); /* We can take interrupts now: we're officially "up". */ local_irq_enable(); diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c index 960d8bd137d..0bada1870bd 100644 --- a/arch/i386/kernel/sysenter.c +++ b/arch/i386/kernel/sysenter.c @@ -21,11 +21,16 @@ extern asmlinkage void sysenter_entry(void); -void enable_sep_cpu(void *info) +void enable_sep_cpu(void) { int cpu = get_cpu(); struct tss_struct *tss = &per_cpu(init_tss, cpu); + if (!boot_cpu_has(X86_FEATURE_SEP)) { + put_cpu(); + return; + } + tss->ss1 = __KERNEL_CS; tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss; wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); @@ -41,7 +46,7 @@ void enable_sep_cpu(void *info) extern const char vsyscall_int80_start, vsyscall_int80_end; extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; -static int __init sysenter_setup(void) +int __init sysenter_setup(void) { void *page = (void *)get_zeroed_page(GFP_ATOMIC); @@ -58,8 +63,5 @@ static int __init sysenter_setup(void) &vsyscall_sysenter_start, &vsyscall_sysenter_end - &vsyscall_sysenter_start); - on_each_cpu(enable_sep_cpu, NULL, 1, 1); return 0; } - -__initcall(sysenter_setup); diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c index 6f521cf19a1..d099d01461f 100644 --- a/arch/i386/power/cpu.c +++ b/arch/i386/power/cpu.c @@ -22,9 +22,11 @@ #include <linux/device.h> #include <linux/suspend.h> #include <linux/acpi.h> + #include <asm/uaccess.h> #include <asm/acpi.h> #include <asm/tlbflush.h> +#include <asm/processor.h> static struct saved_context saved_context; @@ -33,8 +35,6 @@ unsigned long saved_context_esp, saved_context_ebp; unsigned long saved_context_esi, saved_context_edi; unsigned long saved_context_eflags; -extern void enable_sep_cpu(void *); - void __save_processor_state(struct saved_context *ctxt) { kernel_fpu_begin(); @@ -136,7 +136,7 @@ void __restore_processor_state(struct saved_context *ctxt) * sysenter MSRs */ if (boot_cpu_has(X86_FEATURE_SEP)) - enable_sep_cpu(NULL); + enable_sep_cpu(); fix_processor_context(); do_fpu_end(); |