diff options
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/Kconfig | 4 | ||||
-rw-r--r-- | arch/i386/Kconfig.debug | 9 | ||||
-rw-r--r-- | arch/i386/kernel/acpi/boot.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/apic.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/apm.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/amd.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpuid.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/kprobes.c | 39 | ||||
-rw-r--r-- | arch/i386/kernel/msr.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/ptrace.c | 7 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/timers/timer_tsc.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/vm86.c | 2 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_cat.c | 1 | ||||
-rw-r--r-- | arch/i386/pci/irq.c | 1 |
18 files changed, 59 insertions, 50 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 18ec9fe6deb..c6fe99e57a0 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -467,7 +467,7 @@ endchoice choice depends on EXPERIMENTAL && !X86_PAE - prompt "Memory split" + prompt "Memory split" if EMBEDDED default VMSPLIT_3G help Select the desired split between kernel and user memory. @@ -756,7 +756,7 @@ config PHYSICAL_START config HOTPLUG_CPU bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" - depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER && !X86_PC + depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER ---help--- Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug index 6e97df6979e..c92191b1fb6 100644 --- a/arch/i386/Kconfig.debug +++ b/arch/i386/Kconfig.debug @@ -81,4 +81,13 @@ config X86_MPPARSE depends on X86_LOCAL_APIC && !X86_VISWS default y +config DOUBLEFAULT + default y + bool "Enable doublefault exception handler" if EMBEDDED + help + This option allows trapping of rare doublefault exceptions that + would otherwise cause a system to silently reboot. Disabling this + option saves about 4k and might cause you much additional grey + hair. + endmenu diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 030a0007a4e..4c785a67d58 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -168,7 +168,7 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) unsigned long i; int config_size; - if (!phys_addr || !size || !cpu_has_apic) + if (!phys_addr || !size) return -EINVAL; mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size); @@ -215,7 +215,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) { struct acpi_table_madt *madt = NULL; - if (!phys_addr || !size || !cpu_has_apic) + if (!phys_addr || !size) return -EINVAL; madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); @@ -1102,6 +1102,9 @@ int __init acpi_boot_table_init(void) dmi_check_system(acpi_dmi_table); #endif + if (!cpu_has_apic) + return -ENODEV; + /* * If acpi_disabled, bail out * One exception: acpi=ht continues far enough to enumerate LAPICs @@ -1148,6 +1151,9 @@ int __init acpi_boot_init(void) acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); + if (!cpu_has_apic) + return -ENODEV; + /* * set sci_int and PM timer address */ diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 254cee9f0b7..013b85df18c 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -757,10 +757,6 @@ static int __init apic_set_verbosity(char *str) apic_verbosity = APIC_DEBUG; else if (strcmp("verbose", str) == 0) apic_verbosity = APIC_VERBOSE; - else - printk(KERN_WARNING "APIC Verbosity level %s not recognised" - " use apic=verbose or apic=debug\n", str); - return 1; } diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index da30a374dd4..df0e1745f18 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c @@ -1079,7 +1079,7 @@ static int apm_console_blank(int blank) break; } - if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) { + if (error == APM_NOT_ENGAGED) { static int tried; int eng_error; if (tried++ == 0) { diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index ff2b2154ac1..786d1a57048 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -207,6 +207,8 @@ static void __init init_amd(struct cpuinfo_x86 *c) set_bit(X86_FEATURE_K7, c->x86_capability); break; } + if (c->x86 >= 6) + set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability); display_cacheinfo(c); diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 7c0e160a214..71fffa17442 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c @@ -905,14 +905,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi { cpumask_t oldmask = CPU_MASK_ALL; struct powernow_k8_data *data = powernow_data[pol->cpu]; - u32 checkfid = data->currfid; - u32 checkvid = data->currvid; + u32 checkfid; + u32 checkvid; unsigned int newstate; int ret = -EIO; if (!data) return -EINVAL; + checkfid = data->currfid; + checkvid = data->currvid; + /* only run on specific CPU from here on */ oldmask = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); @@ -1109,9 +1112,6 @@ static unsigned int powernowk8_get (unsigned int cpu) if (!data) return -EINVAL; - if (!data) - return -EINVAL; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); if (smp_processor_id() != cpu) { printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 9df87b03612..c8547a6fa7e 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -642,7 +642,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) return; } -static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, +static int cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index 006141d1c12..1d9a4abcdfc 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c @@ -168,7 +168,7 @@ static int cpuid_class_device_create(int i) return err; } -static int __devinit cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index f19768789e8..38806f42784 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -43,7 +43,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); /* insert a jmp code */ -static inline void set_jmp_op(void *from, void *to) +static __always_inline void set_jmp_op(void *from, void *to) { struct __arch_jmp_op { char op; @@ -57,7 +57,7 @@ static inline void set_jmp_op(void *from, void *to) /* * returns non-zero if opcodes can be boosted. */ -static inline int can_boost(kprobe_opcode_t opcode) +static __always_inline int can_boost(kprobe_opcode_t opcode) { switch (opcode & 0xf0 ) { case 0x70: @@ -88,7 +88,7 @@ static inline int can_boost(kprobe_opcode_t opcode) /* * returns non-zero if opcode modifies the interrupt flag. */ -static inline int is_IF_modifier(kprobe_opcode_t opcode) +static int __kprobes is_IF_modifier(kprobe_opcode_t opcode) { switch (opcode) { case 0xfa: /* cli */ @@ -138,7 +138,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p) mutex_unlock(&kprobe_mutex); } -static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; @@ -146,7 +146,7 @@ static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags; } -static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; @@ -154,7 +154,7 @@ static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags; } -static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; @@ -164,7 +164,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_saved_eflags &= ~IF_MASK; } -static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { regs->eflags |= TF_MASK; regs->eflags &= ~IF_MASK; @@ -242,10 +242,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_REENTER; return 1; } else { - if (regs->eflags & VM_MASK) { - /* We are in virtual-8086 mode. Return 0 */ - goto no_kprobe; - } if (*addr != BREAKPOINT_INSTRUCTION) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further @@ -265,11 +261,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) p = get_kprobe(addr); if (!p) { - if (regs->eflags & VM_MASK) { - /* We are in virtual-8086 mode. Return 0 */ - goto no_kprobe; - } - if (*addr != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right @@ -452,10 +443,11 @@ static void __kprobes resume_execution(struct kprobe *p, *tos &= ~(TF_MASK | IF_MASK); *tos |= kcb->kprobe_old_eflags; break; - case 0xc3: /* ret/lret */ - case 0xcb: - case 0xc2: + case 0xc2: /* iret/ret/lret */ + case 0xc3: case 0xca: + case 0xcb: + case 0xcf: case 0xea: /* jmp absolute -- eip is correct */ /* eip is already adjusted, no more changes required */ p->ainsn.boostable = 1; @@ -463,10 +455,13 @@ static void __kprobes resume_execution(struct kprobe *p, case 0xe8: /* call relative - Fix return addr */ *tos = orig_eip + (*tos - copy_eip); break; + case 0x9a: /* call absolute -- same as call absolute, indirect */ + *tos = orig_eip + (*tos - copy_eip); + goto no_change; case 0xff: if ((p->ainsn.insn[1] & 0x30) == 0x10) { - /* call absolute, indirect */ /* + * call absolute, indirect * Fix return addr; eip is correct. * But this is not boostable */ @@ -507,7 +502,7 @@ no_change: * Interrupts are disabled on entry as trap1 is an interrupt gate and they * remain disabled thoroughout this function. */ -static inline int post_kprobe_handler(struct pt_regs *regs) +static int __kprobes post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -543,7 +538,7 @@ out: return 1; } -static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index 1d0a55e6876..7a328230e54 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c @@ -251,7 +251,7 @@ static int msr_class_device_create(int i) return err; } -static int __devinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 506462ef36a..fd7eaf7866e 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c @@ -671,7 +671,7 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit) if (unlikely(current->audit_context)) { if (entryexit) - audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), + audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax); /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is @@ -720,14 +720,13 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit) ret = is_sysemu; out: if (unlikely(current->audit_context) && !entryexit) - audit_syscall_entry(current, AUDIT_ARCH_I386, regs->orig_eax, + audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax, regs->ebx, regs->ecx, regs->edx, regs->esi); if (ret == 0) return 0; regs->orig_eax = -1; /* force skip of syscall restarting */ if (unlikely(current->audit_context)) - audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), - regs->eax); + audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax); return 1; } diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 80cb3b2d099..d77e89ac0d5 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -970,8 +970,10 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) * not-overlapping, which is the case */ int __init -e820_all_mapped(unsigned long start, unsigned long end, unsigned type) +e820_all_mapped(unsigned long s, unsigned long e, unsigned type) { + u64 start = s; + u64 end = e; int i; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index a6969903f2d..825b2b4ca72 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -313,7 +313,9 @@ static void __init synchronize_tsc_bp (void) if (tsc_values[i] < avg) realdelta = -realdelta; - printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta); + if (realdelta > 0) + printk(KERN_INFO "CPU#%d had %ld usecs TSC " + "skew, fixed it up.\n", i, realdelta); } sum += delta; diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index 5e41ee29c8c..f1187ddb0d0 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c @@ -279,7 +279,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, { struct cpufreq_freqs *freq = data; - if (val != CPUFREQ_RESUMECHANGE) + if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) write_seqlock_irq(&xtime_lock); if (!ref_freq) { if (!freq->old){ @@ -312,7 +312,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, } end: - if (val != CPUFREQ_RESUMECHANGE) + if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) write_sequnlock_irq(&xtime_lock); return 0; diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index aee14fafd13..00e0118e717 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -312,7 +312,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk /*call audit_syscall_exit since we do not exit via the normal paths */ if (unlikely(current->audit_context)) - audit_syscall_exit(current, AUDITSC_RESULT(eax), eax); + audit_syscall_exit(AUDITSC_RESULT(eax), eax); __asm__ __volatile__( "movl %0,%%esp\n\t" diff --git a/arch/i386/mach-voyager/voyager_cat.c b/arch/i386/mach-voyager/voyager_cat.c index 3039539de51..10d21df1453 100644 --- a/arch/i386/mach-voyager/voyager_cat.c +++ b/arch/i386/mach-voyager/voyager_cat.c @@ -120,7 +120,6 @@ static struct resource qic_res = { * It writes num_bits of the data buffer in msg starting at start_bit. * Note: This function assumes that any unused bit in the data stream * is set to zero so that the ors will work correctly */ -#define BITS_PER_BYTE 8 static void cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits) { diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c index 73235443fda..06dab00aaad 100644 --- a/arch/i386/pci/irq.c +++ b/arch/i386/pci/irq.c @@ -591,7 +591,6 @@ static __init int via_router_probe(struct irq_router *r, case PCI_DEVICE_ID_VIA_8233A: case PCI_DEVICE_ID_VIA_8235: case PCI_DEVICE_ID_VIA_8237: - case PCI_DEVICE_ID_VIA_8237_SATA: /* FIXME: add new ones for 8233/5 */ r->name = "VIA"; r->get = pirq_via_get; |