aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/amd_iommu.c9
-rw-r--r--arch/x86/kernel/cpuid.c4
-rw-r--r--arch/x86/kernel/dumpstack_32.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c2
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/kernel/kvmclock.c30
-rw-r--r--arch/x86/kernel/msr.c4
-rw-r--r--arch/x86/kernel/pci-calgary_64.c18
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/pci-gart_64.c8
-rw-r--r--arch/x86/kernel/pvclock.c12
-rw-r--r--arch/x86/kernel/rtc.c22
-rw-r--r--arch/x86/kernel/smpboot.c14
13 files changed, 91 insertions, 42 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 34e4d112b1e..a8fd9ebdc8e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -295,7 +295,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
u64 address, size_t size)
{
int s = 0;
- unsigned pages = iommu_num_pages(address, size);
+ unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
address &= PAGE_MASK;
@@ -680,7 +680,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
iommu->exclusion_start < dma_dom->aperture_size) {
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
int pages = iommu_num_pages(iommu->exclusion_start,
- iommu->exclusion_length);
+ iommu->exclusion_length,
+ PAGE_SIZE);
dma_ops_reserve_addresses(dma_dom, startpage, pages);
}
@@ -935,7 +936,7 @@ static dma_addr_t __map_single(struct device *dev,
unsigned long align_mask = 0;
int i;
- pages = iommu_num_pages(paddr, size);
+ pages = iommu_num_pages(paddr, size, PAGE_SIZE);
paddr &= PAGE_MASK;
if (align)
@@ -980,7 +981,7 @@ static void __unmap_single(struct amd_iommu *iommu,
if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
return;
- pages = iommu_num_pages(dma_addr, size);
+ pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 6a44d646599..72cefd1e649 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -147,8 +147,8 @@ static __cpuinit int cpuid_device_create(int cpu)
{
struct device *dev;
- dev = device_create_drvdata(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu),
- NULL, "cpu%d", cpu);
+ dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL,
+ "cpu%d", cpu);
return IS_ERR(dev) ? PTR_ERR(dev) : 0;
}
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 201ee359a1a..1a78180f08d 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -13,6 +13,7 @@
#include <linux/kexec.h>
#include <linux/bug.h>
#include <linux/nmi.h>
+#include <linux/sysfs.h>
#include <asm/stacktrace.h>
@@ -343,6 +344,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
+ sysfs_printk_last_file();
if (notify_die(DIE_OOPS, str, regs, err,
current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
return 1;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 086cc8118e3..96a5db7da8a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -13,6 +13,7 @@
#include <linux/kexec.h>
#include <linux/bug.h>
#include <linux/nmi.h>
+#include <linux/sysfs.h>
#include <asm/stacktrace.h>
@@ -489,6 +490,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
+ sysfs_printk_last_file();
if (notify_die(DIE_OOPS, str, regs, err,
current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
return 1;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 78e642feac3..ce97bf3bed1 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1282,12 +1282,10 @@ void __init e820_reserve_resources(void)
e820_res = res;
for (i = 0; i < e820.nr_map; i++) {
end = e820.map[i].addr + e820.map[i].size - 1;
-#ifndef CONFIG_RESOURCES_64BIT
- if (end > 0x100000000ULL) {
+ if (end != (resource_size_t)end) {
res++;
continue;
}
-#endif
res->name = e820_type_to_string(e820.map[i].type);
res->start = e820.map[i].addr;
res->end = end;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d02def06ca9..774ac499156 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -78,6 +78,34 @@ static cycle_t kvm_clock_read(void)
return ret;
}
+/*
+ * If we don't do that, there is the possibility that the guest
+ * will calibrate under heavy load - thus, getting a lower lpj -
+ * and execute the delays themselves without load. This is wrong,
+ * because no delay loop can finish beforehand.
+ * Any heuristics is subject to fail, because ultimately, a large
+ * poll of guests can be running and trouble each other. So we preset
+ * lpj here
+ */
+static unsigned long kvm_get_tsc_khz(void)
+{
+ return preset_lpj;
+}
+
+static void kvm_get_preset_lpj(void)
+{
+ struct pvclock_vcpu_time_info *src;
+ unsigned long khz;
+ u64 lpj;
+
+ src = &per_cpu(hv_clock, 0);
+ khz = pvclock_tsc_khz(src);
+
+ lpj = ((u64)khz * 1000);
+ do_div(lpj, HZ);
+ preset_lpj = lpj;
+}
+
static struct clocksource kvm_clock = {
.name = "kvm-clock",
.read = kvm_clock_read,
@@ -153,6 +181,7 @@ void __init kvmclock_init(void)
pv_time_ops.get_wallclock = kvm_get_wallclock;
pv_time_ops.set_wallclock = kvm_set_wallclock;
pv_time_ops.sched_clock = kvm_clock_read;
+ pv_time_ops.get_tsc_khz = kvm_get_tsc_khz;
#ifdef CONFIG_X86_LOCAL_APIC
pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
#endif
@@ -163,6 +192,7 @@ void __init kvmclock_init(void)
#ifdef CONFIG_KEXEC
machine_ops.crash_shutdown = kvm_crash_shutdown;
#endif
+ kvm_get_preset_lpj();
clocksource_register(&kvm_clock);
}
}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 2e2af5d1819..82a7c7ed6d4 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -163,8 +163,8 @@ static int __cpuinit msr_device_create(int cpu)
{
struct device *dev;
- dev = device_create_drvdata(msr_class, NULL, MKDEV(MSR_MAJOR, cpu),
- NULL, "msr%d", cpu);
+ dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
+ "msr%d", cpu);
return IS_ERR(dev) ? PTR_ERR(dev) : 0;
}
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 080d1d27f37..e1e731d78f3 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -217,16 +217,6 @@ static inline unsigned long verify_bit_range(unsigned long* bitmap,
#endif /* CONFIG_IOMMU_DEBUG */
-static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
-{
- unsigned int npages;
-
- npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
- npages >>= PAGE_SHIFT;
-
- return npages;
-}
-
static inline int translation_enabled(struct iommu_table *tbl)
{
/* only PHBs with translation enabled have an IOMMU table */
@@ -408,7 +398,7 @@ static void calgary_unmap_sg(struct device *dev,
if (dmalen == 0)
break;
- npages = num_dma_pages(dma, dmalen);
+ npages = iommu_num_pages(dma, dmalen, PAGE_SIZE);
iommu_free(tbl, dma, npages);
}
}
@@ -427,7 +417,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
BUG_ON(!sg_page(s));
vaddr = (unsigned long) sg_virt(s);
- npages = num_dma_pages(vaddr, s->length);
+ npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
entry = iommu_range_alloc(dev, tbl, npages);
if (entry == bad_dma_address) {
@@ -464,7 +454,7 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
struct iommu_table *tbl = find_iommu_table(dev);
uaddr = (unsigned long)vaddr;
- npages = num_dma_pages(uaddr, size);
+ npages = iommu_num_pages(uaddr, size, PAGE_SIZE);
return iommu_alloc(dev, tbl, vaddr, npages, direction);
}
@@ -475,7 +465,7 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
struct iommu_table *tbl = find_iommu_table(dev);
unsigned int npages;
- npages = num_dma_pages(dma_handle, size);
+ npages = iommu_num_pages(dma_handle, size, PAGE_SIZE);
iommu_free(tbl, dma_handle, npages);
}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 0a3824e837b..19262482021 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -125,13 +125,13 @@ void __init pci_iommu_alloc(void)
pci_swiotlb_init();
}
-unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
+unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
{
unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
return size >> PAGE_SHIFT;
}
-EXPORT_SYMBOL(iommu_num_pages);
+EXPORT_SYMBOL(iommu_nr_pages);
#endif
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 145f1c83369..e3f75bbcede 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -231,7 +231,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir, unsigned long align_mask)
{
- unsigned long npages = iommu_num_pages(phys_mem, size);
+ unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
int i;
@@ -285,7 +285,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
return;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
- npages = iommu_num_pages(dma_addr, size);
+ npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
for (i = 0; i < npages; i++) {
iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
CLEAR_LEAK(iommu_page + i);
@@ -368,7 +368,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
}
addr = phys_addr;
- pages = iommu_num_pages(s->offset, s->length);
+ pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
while (pages--) {
iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
SET_LEAK(iommu_page);
@@ -451,7 +451,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
seg_size += s->length;
need = nextneed;
- pages += iommu_num_pages(s->offset, s->length);
+ pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
ps = s;
}
if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 05fbe9a0325..4f9c55f3a7c 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -97,6 +97,18 @@ static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
return dst->version;
}
+unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
+{
+ u64 pv_tsc_khz = 1000000ULL << 32;
+
+ do_div(pv_tsc_khz, src->tsc_to_system_mul);
+ if (src->tsc_shift < 0)
+ pv_tsc_khz <<= -src->tsc_shift;
+ else
+ pv_tsc_khz >>= src->tsc_shift;
+ return pv_tsc_khz;
+}
+
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
struct pvclock_shadow_time shadow;
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 05191bbc68b..0a23b5795b2 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -223,11 +223,25 @@ static struct platform_device rtc_device = {
static __init int add_rtc_cmos(void)
{
#ifdef CONFIG_PNP
- if (!pnp_platform_devices)
- platform_device_register(&rtc_device);
-#else
+ static const char *ids[] __initconst =
+ { "PNP0b00", "PNP0b01", "PNP0b02", };
+ struct pnp_dev *dev;
+ struct pnp_id *id;
+ int i;
+
+ pnp_for_each_dev(dev) {
+ for (id = dev->id; id; id = id->next) {
+ for (i = 0; i < ARRAY_SIZE(ids); i++) {
+ if (compare_pnp_id(id, ids[i]) != 0)
+ return 0;
+ }
+ }
+ }
+#endif
+
platform_device_register(&rtc_device);
-#endif /* CONFIG_PNP */
+ dev_info(&rtc_device.dev,
+ "registered platform RTC device (no PNP device found)\n");
return 0;
}
device_initcall(add_rtc_cmos);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8c3aca7cb34..7ed9e070a6e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -282,6 +282,8 @@ static void __cpuinit smp_callin(void)
cpu_set(cpuid, cpu_callin_map);
}
+static int __cpuinitdata unsafe_smp;
+
/*
* Activate a secondary processor.
*/
@@ -397,7 +399,7 @@ static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
goto valid_k7;
/* If we get here, not a certified SMP capable AMD system. */
- add_taint(TAINT_UNSAFE_SMP);
+ unsafe_smp = 1;
}
valid_k7:
@@ -414,12 +416,10 @@ static void __cpuinit smp_checks(void)
* Don't taint if we are running SMP kernel on a single non-MP
* approved Athlon
*/
- if (tainted & TAINT_UNSAFE_SMP) {
- if (num_online_cpus())
- printk(KERN_INFO "WARNING: This combination of AMD"
- "processors is not suitable for SMP.\n");
- else
- tainted &= ~TAINT_UNSAFE_SMP;
+ if (unsafe_smp && num_online_cpus() > 1) {
+ printk(KERN_INFO "WARNING: This combination of AMD"
+ "processors is not suitable for SMP.\n");
+ add_taint(TAINT_UNSAFE_SMP);
}
}