aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig7
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c5
-rw-r--r--arch/x86_64/ia32/syscall32.c6
-rw-r--r--arch/x86_64/kernel/e820.c3
-rw-r--r--arch/x86_64/kernel/io_apic.c9
-rw-r--r--arch/x86_64/kernel/kprobes.c8
-rw-r--r--arch/x86_64/kernel/mce.c12
-rw-r--r--arch/x86_64/kernel/nmi.c8
-rw-r--r--arch/x86_64/kernel/setup.c22
-rw-r--r--arch/x86_64/kernel/time.c3
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c4
-rw-r--r--arch/x86_64/lib/Makefile2
-rw-r--r--arch/x86_64/lib/dec_and_lock.c40
-rw-r--r--arch/x86_64/mm/numa.c10
14 files changed, 43 insertions, 96 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index e63323e03ea..21afa69a086 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -277,11 +277,6 @@ source "mm/Kconfig"
config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
-config HAVE_DEC_LOCK
- bool
- depends on SMP
- default y
-
config NR_CPUS
int "Maximum number of CPUs (2-256)"
range 2 256
@@ -313,7 +308,7 @@ config HPET_TIMER
present. The HPET provides a stable time base on SMP
systems, unlike the TSC, but it is more expensive to access,
as it is off-chip. You can find the HPET spec at
- <http://www.intel.com/labs/platcomp/hpet/hpetspec.htm>.
+ <http://www.intel.com/hardwaredesign/hpetspec.htm>.
config X86_PM_TIMER
bool "PM timer"
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index c8131f342cf..d9161e39597 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -353,11 +353,6 @@ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int exec
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!mpnt)
return -ENOMEM;
-
- if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))>>PAGE_SHIFT)) {
- kmem_cache_free(vm_area_cachep, mpnt);
- return -ENOMEM;
- }
memset(mpnt, 0, sizeof(*mpnt));
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
index adbc5f8089e..3a01329473a 100644
--- a/arch/x86_64/ia32/syscall32.c
+++ b/arch/x86_64/ia32/syscall32.c
@@ -52,17 +52,13 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!vma)
return -ENOMEM;
- if (security_vm_enough_memory(npages)) {
- kmem_cache_free(vm_area_cachep, vma);
- return -ENOMEM;
- }
memset(vma, 0, sizeof(struct vm_area_struct));
/* Could randomize here */
vma->vm_start = VSYSCALL32_BASE;
vma->vm_end = VSYSCALL32_END;
/* MAYWRITE to allow gdb to COW and set breakpoints */
- vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYEXEC|VM_MAYWRITE;
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 7];
vma->vm_ops = &syscall32_vm_ops;
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index eb7929eea7b..ab3f87aaff7 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -17,6 +17,8 @@
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/kexec.h>
+#include <linux/module.h>
+
#include <asm/page.h>
#include <asm/e820.h>
#include <asm/proto.h>
@@ -28,6 +30,7 @@ extern char _end[];
* PFN of last memory page.
*/
unsigned long end_pfn;
+EXPORT_SYMBOL(end_pfn);
/*
* end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 522944a000a..c8eee20cd51 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -299,15 +299,6 @@ void __init check_ioapic(void)
#endif
/* RED-PEN skip them on mptables too? */
return;
- case PCI_VENDOR_ID_ATI:
- /* All timer interrupts on atiixp
- are doubled. Disable one. */
- if (disable_timer_pin_1 == 0) {
- disable_timer_pin_1 = 1;
- printk(KERN_INFO
- "ATI board detected. Disabling timer pin 1.\n");
- }
- return;
}
/* No multi-function device? */
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index df08c43276a..76a28b007be 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -77,9 +77,9 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* insn: must be on special executable page on x86_64. */
- up(&kprobe_mutex);
- p->ainsn.insn = get_insn_slot();
down(&kprobe_mutex);
+ p->ainsn.insn = get_insn_slot();
+ up(&kprobe_mutex);
if (!p->ainsn.insn) {
return -ENOMEM;
}
@@ -231,9 +231,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- up(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn);
down(&kprobe_mutex);
+ free_insn_slot(p->ainsn.insn);
+ up(&kprobe_mutex);
}
static inline void save_previous_kprobe(void)
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 969365c0771..69541db5ff2 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -54,9 +54,12 @@ void mce_log(struct mce *mce)
{
unsigned next, entry;
mce->finished = 0;
- smp_wmb();
+ wmb();
for (;;) {
entry = rcu_dereference(mcelog.next);
+ /* The rmb forces the compiler to reload next in each
+ iteration */
+ rmb();
for (;;) {
/* When the buffer fills up discard new entries. Assume
that the earlier errors are the more interesting. */
@@ -69,6 +72,7 @@ void mce_log(struct mce *mce)
entry++;
continue;
}
+ break;
}
smp_rmb();
next = entry + 1;
@@ -76,9 +80,9 @@ void mce_log(struct mce *mce)
break;
}
memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
- smp_wmb();
+ wmb();
mcelog.entry[entry].finished = 1;
- smp_wmb();
+ wmb();
if (!test_and_set_bit(0, &console_logged))
notify_user = 1;
@@ -217,7 +221,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
panicm_found = 1;
}
- tainted |= TAINT_MACHINE_CHECK;
+ add_taint(TAINT_MACHINE_CHECK);
}
/* Never do anything final in the polling timer */
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 4388b8a5bae..39d445e16f2 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -366,7 +366,7 @@ static void setup_k7_watchdog(void)
| K7_NMI_EVENT;
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
- wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
+ wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= K7_EVNTSEL_ENABLE;
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
@@ -407,8 +407,8 @@ static int setup_p4_watchdog(void)
wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
- Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000));
- wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1);
+ Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz * 1000UL / nmi_hz));
+ wrmsrl(MSR_P4_IQ_COUNTER0, -((u64)cpu_khz * 1000 / nmi_hz));
apic_write(APIC_LVTPC, APIC_DM_NMI);
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
return 1;
@@ -506,7 +506,7 @@ void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
- wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
+ wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
}
}
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 351d8d64c2f..cb28df14ff6 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -836,6 +836,23 @@ static int __init init_amd(struct cpuinfo_x86 *c)
int r;
int level;
+#ifdef CONFIG_SMP
+ unsigned long value;
+
+ /*
+ * Disable TLB flush filter by setting HWCR.FFDIS on K8
+ * bit 6 of msr C001_0015
+ *
+ * Errata 63 for SH-B3 steppings
+ * Errata 122 for all steppings (F+ have it disabled by default)
+ */
+ if (c->x86 == 15) {
+ rdmsrl(MSR_K8_HWCR, value);
+ value |= 1 << 6;
+ wrmsrl(MSR_K8_HWCR, value);
+ }
+#endif
+
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability);
@@ -950,13 +967,12 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
static void srat_detect_node(void)
{
#ifdef CONFIG_NUMA
- unsigned apicid, node;
+ unsigned node;
int cpu = smp_processor_id();
/* Don't do the funky fallback heuristics the AMD version employs
for now. */
- apicid = phys_proc_id[cpu];
- node = apicid_to_node[apicid];
+ node = apicid_to_node[hard_smp_processor_id()];
if (node == NUMA_NO_NODE)
node = 0;
cpu_to_node[cpu] = node;
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 2373cb8b862..703acde2a1a 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -959,9 +959,6 @@ static __init int unsynchronized_tsc(void)
are handled in the OEM check above. */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return 0;
- /* All in a single socket - should be synchronized */
- if (cpus_weight(cpu_core_map[0]) == num_online_cpus())
- return 0;
#endif
/* Assume multi socket systems are not synchronized */
return num_online_cpus() > 1;
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 68ec03070e5..fd99ddd009b 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -178,10 +178,6 @@ EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
EXPORT_SYMBOL(empty_zero_page);
-#ifdef CONFIG_HAVE_DEC_LOCK
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif
-
EXPORT_SYMBOL(die_chain);
EXPORT_SYMBOL(register_die_notifier);
diff --git a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile
index 6b26a1c1e9f..bba5db6cebd 100644
--- a/arch/x86_64/lib/Makefile
+++ b/arch/x86_64/lib/Makefile
@@ -10,5 +10,3 @@ lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \
usercopy.o getuser.o putuser.o \
thunk.o clear_page.o copy_page.o bitstr.o bitops.o
lib-y += memcpy.o memmove.o memset.o copy_user.o
-
-lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff --git a/arch/x86_64/lib/dec_and_lock.c b/arch/x86_64/lib/dec_and_lock.c
deleted file mode 100644
index ab43394dc77..00000000000
--- a/arch/x86_64/lib/dec_and_lock.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * x86 version of "atomic_dec_and_lock()" using
- * the atomic "cmpxchg" instruction.
- *
- * (For CPU's lacking cmpxchg, we use the slow
- * generic version, and this one never even gets
- * compiled).
- */
-
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-
-int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
-{
- int counter;
- int newcount;
-
-repeat:
- counter = atomic_read(atomic);
- newcount = counter-1;
-
- if (!newcount)
- goto slow_path;
-
- asm volatile("lock; cmpxchgl %1,%2"
- :"=a" (newcount)
- :"r" (newcount), "m" (atomic->counter), "0" (counter));
-
- /* If the above failed, "eax" will have changed */
- if (newcount != counter)
- goto repeat;
- return 0;
-
-slow_path:
- spin_lock(lock);
- if (atomic_dec_and_test(atomic))
- return 1;
- spin_unlock(lock);
- return 0;
-}
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 80a49d9bd8a..21480382100 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -167,18 +167,16 @@ void __init numa_init_array(void)
mapping. To avoid this fill in the mapping for all possible
CPUs, as the number of CPUs is not known yet.
We round robin the existing nodes. */
- rr = 0;
+ rr = first_node(node_online_map);
for (i = 0; i < NR_CPUS; i++) {
if (cpu_to_node[i] != NUMA_NO_NODE)
continue;
+ cpu_to_node[i] = rr;
rr = next_node(rr, node_online_map);
if (rr == MAX_NUMNODES)
rr = first_node(node_online_map);
- cpu_to_node[i] = rr;
- rr++;
}
- set_bit(0, &node_to_cpumask[cpu_to_node(0)]);
}
#ifdef CONFIG_NUMA_EMU
@@ -266,9 +264,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
__cpuinit void numa_add_cpu(int cpu)
{
- /* BP is initialized elsewhere */
- if (cpu)
- set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
+ set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
}
unsigned long __init numa_free_all_bootmem(void)