diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-08 20:23:02 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-08 20:23:02 +0100 |
commit | 895e031707954a9ca26ed4f5f794575313854ed1 (patch) | |
tree | ec7cd5515f346e6f94bc43e63d55ebc8ddf4173d /arch/x86 | |
parent | 838e8bb71dc0c892bf8f84abd3c709d8fe3a8d3c (diff) | |
parent | a622cf69b806bbb1887913c04e01d0c25b892876 (diff) |
Merge branch 'linus' into x86/cleanups
Diffstat (limited to 'arch/x86')
65 files changed, 395 insertions, 207 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f843de13e24..7bf10f9b25a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -28,7 +28,7 @@ config X86 select HAVE_KRETPROBES select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE - select HAVE_FTRACE + select HAVE_FUNCTION_TRACER select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER select HAVE_ARCH_TRACEHOOK @@ -231,6 +231,10 @@ config SMP If you don't know what to do here, say N. +config X86_HAS_BOOT_CPU_ID + def_bool y + depends on X86_VOYAGER + config X86_FIND_SMP_CONFIG def_bool y depends on X86_MPPARSE || X86_VOYAGER @@ -1482,7 +1486,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID def_bool X86_64 depends on NUMA -menu "Power management options" +menu "Power management and ACPI options" depends on !X86_VOYAGER config ARCH_HIBERNATION_HEADER @@ -1882,6 +1886,10 @@ config SYSVIPC_COMPAT endmenu +config HAVE_ATOMIC_IOMAP + def_bool y + depends on X86_32 + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 0b7c4a3f065..b815664fe37 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -513,19 +513,19 @@ config CPU_SUP_UMC_32 If unsure, say N. config X86_DS - bool "Debug Store support" - default y - help - Add support for Debug Store. - This allows the kernel to provide a memory buffer to the hardware - to store various profiling and tracing events. + def_bool X86_PTRACE_BTS + depends on X86_DEBUGCTLMSR config X86_PTRACE_BTS - bool "ptrace interface to Branch Trace Store" + bool "Branch Trace Store" default y - depends on (X86_DS && X86_DEBUGCTLMSR) + depends on X86_DEBUGCTLMSR help - Add a ptrace interface to allow collecting an execution trace - of the traced task. - This collects control flow changes in a (cyclic) buffer and allows - debuggers to fill in the gaps and show an execution trace of the debuggee. + This adds a ptrace interface to the hardware's branch trace store. + + Debuggers may use it to collect an execution trace of the debugged + application in order to answer the question 'how did I get here?'. + Debuggers may trace user mode as well as kernel mode. + + Say Y unless there is no application development on this machine + and you want to save a small amount of code size. diff --git a/arch/x86/boot/compressed/.gitignore b/arch/x86/boot/compressed/.gitignore index be0ed065249..63eff3b04d0 100644 --- a/arch/x86/boot/compressed/.gitignore +++ b/arch/x86/boot/compressed/.gitignore @@ -1 +1,3 @@ relocs +vmlinux.bin.all +vmlinux.relocs diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index f73e95d75b4..cfdf8c2c5c3 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -91,7 +91,7 @@ #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ -#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */ +#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 4a5397bfce2..7f225a4b2a2 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -255,9 +255,11 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) { -#ifdef CONFIG_X86_64 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); + if (dma_mask <= DMA_24BIT_MASK) + gfp |= GFP_DMA; +#ifdef CONFIG_X86_64 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) gfp |= GFP_DMA32; #endif diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index 3ffc5a7bf66..39849346191 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h @@ -50,10 +50,9 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { } -#if APIC_DEBUG - #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) -#else - #define inquire_remote_apic(apicid) {} -#endif +#define inquire_remote_apic(apicid) do { \ + if (apic_verbosity >= APIC_DEBUG) \ + __inquire_remote_apic(apicid); \ + } while (0) #endif /* __ASM_MACH_WAKECPU_H */ diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 8668a94f850..23696d44a0a 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -9,6 +9,10 @@ extern int fixmaps_set; +extern pte_t *kmap_pte; +extern pgprot_t kmap_prot; +extern pte_t *pkmap_page_table; + void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags); diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h index 09f29ab5c13..c7115c1d721 100644 --- a/arch/x86/include/asm/fixmap_32.h +++ b/arch/x86/include/asm/fixmap_32.h @@ -28,10 +28,8 @@ extern unsigned long __FIXADDR_TOP; #include <asm/acpi.h> #include <asm/apicdef.h> #include <asm/page.h> -#ifdef CONFIG_HIGHMEM #include <linux/threads.h> #include <asm/kmap_types.h> -#endif /* * Here we define all the compile-time 'special' virtual @@ -75,10 +73,8 @@ enum fixed_addresses { #ifdef CONFIG_X86_CYCLONE_TIMER FIX_CYCLONE_TIMER, /*cyclone timer register*/ #endif -#ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, -#endif #ifdef CONFIG_PCI_MMCONFIG FIX_PCIE_MCFG, #endif diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 47f7e65e6c1..9e8bc29b8b1 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_FTRACE_H #define _ASM_X86_FTRACE_H -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_ADDR ((long)(mcount)) #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ @@ -19,6 +19,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) } #endif -#endif /* CONFIG_FTRACE */ +#endif /* CONFIG_FUNCTION_TRACER */ #endif /* _ASM_X86_FTRACE_H */ diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index a3b3b7c3027..bf9276bea66 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -25,14 +25,11 @@ #include <asm/kmap_types.h> #include <asm/tlbflush.h> #include <asm/paravirt.h> +#include <asm/fixmap.h> /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; -extern pte_t *kmap_pte; -extern pgprot_t kmap_prot; -extern pte_t *pkmap_page_table; - /* * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 5618a103f39..ac2abc88cd9 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -82,9 +82,9 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); extern void early_ioremap_init(void); extern void early_ioremap_clear(void); extern void early_ioremap_reset(void); -extern void *early_ioremap(unsigned long offset, unsigned long size); -extern void *early_memremap(unsigned long offset, unsigned long size); -extern void early_iounmap(void *addr, unsigned long size); +extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); +extern void __iomem *early_memremap(unsigned long offset, unsigned long size); +extern void early_iounmap(void __iomem *addr, unsigned long size); extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 98e28ea8cd1..e4a552d4446 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -7,7 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; extern int dmar_disabled; -extern int forbid_dac; extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index d843ed0e9b2..0005adb0f94 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -101,30 +101,22 @@ #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) -#ifdef CONFIG_X86_64 +#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) # if NR_CPUS < MAX_IO_APICS # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) # else # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) # endif -#elif !defined(CONFIG_X86_VOYAGER) +#elif defined(CONFIG_X86_VOYAGER) -# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) - -# define NR_IRQS 224 - -# else /* IO_APIC || PARAVIRT */ - -# define NR_IRQS 16 - -# endif +# define NR_IRQS 224 -#else /* !VISWS && !VOYAGER */ +#else /* IO_APIC || VOYAGER */ -# define NR_IRQS 224 +# define NR_IRQS 16 -#endif /* VISWS */ +#endif /* Voyager specific defines */ /* These define the CPIs we use in linux */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 65679d00633..8346be87cfa 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -364,6 +364,9 @@ struct kvm_arch{ struct page *ept_identity_pagetable; bool ept_identity_pagetable_done; + + unsigned long irq_sources_bitmap; + unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; }; struct kvm_vm_stat { diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index d5c0b826a4f..9d80db91e99 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -33,10 +33,9 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { } -#if APIC_DEBUG - #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) -#else - #define inquire_remote_apic(apicid) {} -#endif +#define inquire_remote_apic(apicid) do { \ + if (apic_verbosity >= APIC_DEBUG) \ + __inquire_remote_apic(apicid); \ + } while (0) #endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 46be2fa7ac2..c2a812ebde8 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -108,9 +108,7 @@ static __always_inline unsigned long long __native_read_tsc(void) { DECLARE_ARGS(val, low, high); - rdtsc_barrier(); asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); - rdtsc_barrier(); return EAX_EDX_VAL(val, low, high); } diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index fb16cec702e..52597aeadff 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -120,13 +120,13 @@ static inline void pud_clear(pud_t *pudp) write_cr3(pgd); } -#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK)) +#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) /* Find an entry in the second-level page table.. */ -#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ +#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \ pmd_index(address)) #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 2766021aef8..d12811ce51d 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -225,5 +225,11 @@ static inline int hard_smp_processor_id(void) #endif /* CONFIG_X86_LOCAL_APIC */ +#ifdef CONFIG_X86_HAS_BOOT_CPU_ID +extern unsigned char boot_cpu_id; +#else +#define boot_cpu_id 0 +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_SMP_H */ diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 90ac7718469..4850e4b02b6 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -154,7 +154,7 @@ extern unsigned long node_remap_size[]; #endif -/* sched_domains SD_NODE_INIT for NUMAQ machines */ +/* sched_domains SD_NODE_INIT for NUMA machines */ #define SD_NODE_INIT (struct sched_domain) { \ .min_interval = 8, \ .max_interval = 32, \ @@ -169,8 +169,9 @@ extern unsigned long node_remap_size[]; .flags = SD_LOAD_BALANCE \ | SD_BALANCE_EXEC \ | SD_BALANCE_FORK \ - | SD_SERIALIZE \ - | SD_WAKE_BALANCE, \ + | SD_WAKE_AFFINE \ + | SD_WAKE_BALANCE \ + | SD_SERIALIZE, \ .last_balance = jiffies, \ .balance_interval = 1, \ } diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 38ae163cc91..9cd83a8e40d 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -34,6 +34,8 @@ static inline cycles_t get_cycles(void) static __always_inline cycles_t vget_cycles(void) { + cycles_t cycles; + /* * We only do VDSOs on TSC capable CPUs, so this shouldnt * access boot_cpu_data (which is not VDSO-safe): @@ -42,7 +44,11 @@ static __always_inline cycles_t vget_cycles(void) if (!cpu_has_tsc) return 0; #endif - return (cycles_t)__native_read_tsc(); + rdtsc_barrier(); + cycles = (cycles_t)__native_read_tsc(); + rdtsc_barrier(); + + return cycles; } extern void tsc_init(void); diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index c6ad93e315c..7a5782610b2 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -13,6 +13,7 @@ #include <linux/numa.h> #include <linux/percpu.h> +#include <linux/timer.h> #include <asm/types.h> #include <asm/percpu.h> diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h index 9c811d2e6f9..b3e64730762 100644 --- a/arch/x86/include/asm/voyager.h +++ b/arch/x86/include/asm/voyager.h @@ -520,6 +520,7 @@ extern void voyager_restart(void); extern void voyager_cat_power_off(void); extern void voyager_cat_do_common_interrupt(void); extern void voyager_handle_nmi(void); +extern void voyager_smp_intr_init(void); /* Commands for the following are */ #define VOYAGER_PSI_READ 0 #define VOYAGER_PSI_WRITE 1 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d7e5a58ee22..e489ff9cb3e 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -6,11 +6,12 @@ extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) -ifdef CONFIG_FTRACE +ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities CFLAGS_REMOVE_tsc.o = -pg CFLAGS_REMOVE_rtc.o = -pg CFLAGS_REMOVE_paravirt-spinlocks.o = -pg +CFLAGS_REMOVE_ftrace.o = -pg endif # diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a8fd9ebdc8e..331b318304e 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -50,7 +50,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, /* returns !0 if the IOMMU is caching non-present entries in its TLB */ static int iommu_has_npcache(struct amd_iommu *iommu) { - return iommu->cap & IOMMU_CAP_NPCACHE; + return iommu->cap & (1UL << IOMMU_CAP_NPCACHE); } /**************************************************************************** @@ -536,6 +536,9 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, { address >>= PAGE_SHIFT; iommu_area_free(dom->bitmap, address, pages); + + if (address + pages >= dom->next_bit) + dom->need_flush = true; } /**************************************************************************** @@ -992,8 +995,10 @@ static void __unmap_single(struct amd_iommu *iommu, dma_ops_free_addresses(dma_dom, dma_addr, pages); - if (amd_iommu_unmap_flush) + if (amd_iommu_unmap_flush || dma_dom->need_flush) { iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); + dma_dom->need_flush = false; + } } /* diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 0d9c993aa93..ef8f831af82 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) */ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) { -#ifdef CONFIG_SMP +#ifdef CONFIG_X86_SMP unsigned int eax, ebx, ecx, edx, sub_index; unsigned int ht_mask_width, core_plus_mask_width; unsigned int core_select_mask, core_level_siblings; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 25581dcb280..b9c9ea0217a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -20,6 +20,7 @@ #include <asm/pat.h> #include <asm/asm.h> #include <asm/numa.h> +#include <asm/smp.h> #ifdef CONFIG_X86_LOCAL_APIC #include <asm/mpspec.h> #include <asm/apic.h> @@ -549,6 +550,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) this_cpu->c_early_init(c); validate_pat_support(c); + +#ifdef CONFIG_SMP + c->cpu_index = boot_cpu_id; +#endif } void __init early_cpu_init(void) @@ -1134,7 +1139,7 @@ void __cpuinit cpu_init(void) /* * Boot processor to setup the FP and extended state context info. */ - if (!smp_processor_id()) + if (smp_processor_id() == boot_cpu_id) init_thread_xstate(); xsave_init(); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index ce97bf3bed1..7aafeb5263e 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1290,15 +1290,17 @@ void __init e820_reserve_resources(void) res->start = e820.map[i].addr; res->end = end; - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + res->flags = IORESOURCE_MEM; /* * don't register the region that could be conflicted with * pci device BAR resource and insert them later in * pcibios_resource_survey() */ - if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) + if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) { + res->flags |= IORESOURCE_BUSY; insert_resource(&iomem_resource, res); + } res++; } @@ -1318,7 +1320,7 @@ void __init e820_reserve_resources_late(void) res = e820_res; for (i = 0; i < e820.nr_map; i++) { if (!res->parent && res->end) - reserve_region_with_split(&iomem_resource, res->start, res->end, res->name); + insert_resource_expand_to_fit(&iomem_resource, res); res++; } } diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index dd65143941a..28b597ef9ca 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1149,7 +1149,7 @@ ENDPROC(xen_failsafe_callback) #endif /* CONFIG_XEN */ -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) @@ -1204,7 +1204,7 @@ trace: jmp ftrace_stub END(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ -#endif /* CONFIG_FTRACE */ +#endif /* CONFIG_FUNCTION_TRACER */ .section .rodata,"a" #include "syscall_table_32.S" diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 09e7145484c..b86f332c96a 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -61,7 +61,7 @@ .code64 -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) retq @@ -138,7 +138,7 @@ trace: jmp ftrace_stub END(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ -#endif /* CONFIG_FTRACE */ +#endif /* CONFIG_FUNCTION_TRACER */ #ifndef CONFIG_PREEMPT #define retint_kernel retint_restore_args diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d073d981a73..50ea0ac8c9b 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -21,8 +21,7 @@ #include <asm/nops.h> -/* Long is fine, even if it is only 4 bytes ;-) */ -static unsigned long *ftrace_nop; +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; union ftrace_code_union { char code[MCOUNT_INSN_SIZE]; @@ -33,17 +32,17 @@ union ftrace_code_union { }; -static int notrace ftrace_calc_offset(long ip, long addr) +static int ftrace_calc_offset(long ip, long addr) { return (int)(addr - ip); } -notrace unsigned char *ftrace_nop_replace(void) +unsigned char *ftrace_nop_replace(void) { - return (char *)ftrace_nop; + return ftrace_nop; } -notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) { static union ftrace_code_union calc; @@ -57,7 +56,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) return calc.code; } -notrace int +int ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned char *new_code) { @@ -66,26 +65,31 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, /* * Note: Due to modules and __init, code can * disappear and change, we need to protect against faulting - * as well as code changing. + * as well as code changing. We do this by using the + * probe_kernel_* functions. * * No real locking needed, this code is run through * kstop_machine, or before SMP starts. */ - if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE)) - return 1; + /* read the text we want to modify */ + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure it is what we expect it to be */ if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) - return 2; + return -EINVAL; - WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code, - MCOUNT_INSN_SIZE)); + /* replace the text with the new text */ + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) + return -EPERM; sync_core(); return 0; } -notrace int ftrace_update_ftrace_func(ftrace_func_t func) +int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); unsigned char old[MCOUNT_INSN_SIZE], *new; @@ -98,13 +102,6 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func) return ret; } -notrace int ftrace_mcount_set(unsigned long *data) -{ - /* mcount is initialized as a nop */ - *data = 0; - return 0; -} - int __init ftrace_dyn_arch_init(void *data) { extern const unsigned char ftrace_test_p6nop[]; @@ -127,9 +124,6 @@ int __init ftrace_dyn_arch_init(void *data) * TODO: check the cpuid to determine the best nop. */ asm volatile ( - "jmp ftrace_test_jmp\n" - /* This code needs to stay around */ - ".section .text, \"ax\"\n" "ftrace_test_jmp:" "jmp ftrace_test_p6nop\n" "nop\n" @@ -140,8 +134,6 @@ int __init ftrace_dyn_arch_init(void *data) "jmp 1f\n" "ftrace_test_nop5:" ".byte 0x66,0x66,0x66,0x66,0x90\n" - "jmp 1f\n" - ".previous\n" "1:" ".section .fixup, \"ax\"\n" "2: movl $1, %0\n" @@ -156,15 +148,15 @@ int __init ftrace_dyn_arch_init(void *data) switch (faulted) { case 0: pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); - ftrace_nop = (unsigned long *)ftrace_test_p6nop; + memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); break; case 1: pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); - ftrace_nop = (unsigned long *)ftrace_test_nop5; + memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); break; case 2: pr_info("ftrace: converting mcount calls to jmp . + 5\n"); - ftrace_nop = (unsigned long *)ftrace_test_jmp; + memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); break; } diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 680a06557c5..2c7dbdb9827 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -15,7 +15,6 @@ #include <linux/ctype.h> #include <linux/init.h> #include <linux/sched.h> -#include <linux/bootmem.h> #include <linux/module.h> #include <linux/hardirq.h> #include <asm/smp.h> @@ -398,16 +397,16 @@ void __init uv_system_init(void) printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); - uv_blade_info = alloc_bootmem_pages(bytes); + uv_blade_info = kmalloc(bytes, GFP_KERNEL); get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); - uv_node_to_blade = alloc_bootmem_pages(bytes); + uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); memset(uv_node_to_blade, 255, bytes); bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); - uv_cpu_to_blade = alloc_bootmem_pages(bytes); + uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); memset(uv_cpu_to_blade, 255, bytes); blade = 0; diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index dd7ebee446a..43cec6bdda6 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c @@ -5,7 +5,7 @@ #include <asm/desc.h> #include <asm/ftrace.h> -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER /* mcount is defined in assembly */ EXPORT_SYMBOL(mcount); #endif diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index b764d7429c6..7a3f2028e2e 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3611,6 +3611,8 @@ int __init probe_nr_irqs(void) /* something wrong ? */ if (nr < nr_min) nr = nr_min; + if (WARN_ON(nr > NR_IRQS)) + nr = NR_IRQS; return nr; } diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c index 304d8bad655..cbc4332a77b 100644 --- a/arch/x86/kernel/k8.c +++ b/arch/x86/kernel/k8.c @@ -18,7 +18,6 @@ static u32 *flush_words; struct pci_device_id k8_nb_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, {} }; EXPORT_SYMBOL(k8_nb_ids); diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 0732adba05c..7a385746509 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -162,7 +162,10 @@ void machine_kexec(struct kimage *image) page_list[VA_PTE_0] = (unsigned long)kexec_pte0; page_list[PA_PTE_1] = __pa(kexec_pte1); page_list[VA_PTE_1] = (unsigned long)kexec_pte1; - page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT); + + if (image->type == KEXEC_TYPE_DEFAULT) + page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) + << PAGE_SHIFT); /* The segment registers are funny things, they have both a * visible and an invisible part. Whenever the visible part is diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7a1f8eeac2c..5f8e5d75a25 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -39,7 +39,7 @@ #include <asm/microcode.h> MODULE_DESCRIPTION("AMD Microcode Update Driver"); -MODULE_AUTHOR("Peter Oruba <peter.oruba@amd.com>"); +MODULE_AUTHOR("Peter Oruba"); MODULE_LICENSE("GPL v2"); #define UCODE_MAGIC 0x00414d44 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 936d8d55f23..82fb2809ce3 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -480,8 +480,8 @@ static int __init microcode_init(void) printk(KERN_INFO "Microcode Update Driver: v" MICROCODE_VERSION - " <tigran@aivazian.fsnet.co.uk>" - " <peter.oruba@amd.com>\n"); + " <tigran@aivazian.fsnet.co.uk>," + " Peter Oruba\n"); return 0; } diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1972266e8ba..19262482021 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -9,6 +9,8 @@ #include <asm/calgary.h> #include <asm/amd_iommu.h> +static int forbid_dac __read_mostly; + struct dma_mapping_ops *dma_ops; EXPORT_SYMBOL(dma_ops); @@ -291,3 +293,17 @@ void pci_iommu_shutdown(void) } /* Must execute after PCI subsystem */ fs_initcall(pci_iommu_init); + +#ifdef CONFIG_PCI +/* Many VIA bridges seem to corrupt data for DAC. Disable it here */ + +static __devinit void via_no_dac(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { + printk(KERN_INFO "PCI: VIA PCI bridge detected." + "Disabling DAC.\n"); + forbid_dac = 1; + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); +#endif diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index e3f75bbcede..a42b02b4df6 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -744,7 +744,7 @@ void __init gart_iommu_init(void) long i; if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { - printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); + printk(KERN_INFO "PCI-GART: No AMD GART found.\n"); return; } diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index c4ce0332759..3c539d111ab 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -18,9 +18,21 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); } +static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + void *vaddr; + + vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); + if (vaddr) + return vaddr; + + return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); +} + struct dma_mapping_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, - .alloc_coherent = swiotlb_alloc_coherent, + .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, .map_single = swiotlb_map_single_phys, .unmap_single = swiotlb_unmap_single, diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f4c93f1cfc1..724adfc63cb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -29,11 +29,7 @@ EXPORT_SYMBOL(pm_power_off); static const struct desc_ptr no_idt = {}; static int reboot_mode; -/* - * Keyboard reset and triple fault may result in INIT, not RESET, which - * doesn't work when we're in vmx root mode. Try ACPI first. - */ -enum reboot_type reboot_type = BOOT_ACPI; +enum reboot_type reboot_type = BOOT_KBD; int reboot_force; #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index e00534b3353..f4049f3513b 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -154,6 +154,12 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, flush_mm = mm; flush_va = va; cpus_or(flush_cpumask, cpumask, flush_cpumask); + + /* + * Make the above memory operations globally visible before + * sending the IPI. + */ + smp_mb(); /* * We have to send the IPI only to * CPUs affected. diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index dcbf7a1159e..8f919ca6949 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -183,6 +183,11 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); /* + * Make the above memory operations globally visible before + * sending the IPI. + */ + smp_mb(); + /* * We have to send the IPI only to * CPUs affected. */ diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 161bb850fc4..424093b157d 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -55,7 +55,7 @@ u64 native_sched_clock(void) rdtscll(this_offset); /* return the value in ns */ - return cycles_2_ns(this_offset); + return __cycles_2_ns(this_offset); } /* We need to define a real function for sched_clock, to override the @@ -759,7 +759,7 @@ __cpuinit int unsynchronized_tsc(void) if (!cpu_has_tsc || tsc_unstable) return 1; -#ifdef CONFIG_SMP +#ifdef CONFIG_X86_SMP if (apic_is_clustered_box()) return 1; #endif @@ -813,10 +813,6 @@ void __init tsc_init(void) cpu_khz = calibrate_cpu(); #endif - lpj = ((u64)tsc_khz * 1000); - do_div(lpj, HZ); - lpj_fine = lpj; - printk("Detected %lu.%03lu MHz processor.\n", (unsigned long)cpu_khz / 1000, (unsigned long)cpu_khz % 1000); @@ -836,6 +832,10 @@ void __init tsc_init(void) /* now allow native_sched_clock() to use rdtsc */ tsc_disabled = 0; + lpj = ((u64)tsc_khz * 1000); + do_div(lpj, HZ); + lpj_fine = lpj; + use_tsc_delay(); /* Check and install the TSC clocksource */ dmi_check_system(bad_tsc_dmi_table); diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 7766d36983f..a688f3bfaec 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -78,7 +78,7 @@ static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, static void __init set_vsmp_pv_ops(void) { - void *address; + void __iomem *address; unsigned int cap, ctl, cfg; /* set vSMP magic bits to indicate vSMP capable kernel */ diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index b545f371b5f..695e426aa35 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -12,7 +12,7 @@ #include <asm/desc.h> #include <asm/ftrace.h> -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER /* mcount is defined in assembly */ EXPORT_SYMBOL(mcount); #endif diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 11c6725fb79..8772dc94682 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -545,6 +545,12 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) if (!pit) return NULL; + mutex_lock(&kvm->lock); + pit->irq_source_id = kvm_request_irq_source_id(kvm); + mutex_unlock(&kvm->lock); + if (pit->irq_source_id < 0) + return NULL; + mutex_init(&pit->pit_state.lock); mutex_lock(&pit->pit_state.lock); spin_lock_init(&pit->pit_state.inject_lock); @@ -587,6 +593,7 @@ void kvm_free_pit(struct kvm *kvm) mutex_lock(&kvm->arch.vpit->pit_state.lock); timer = &kvm->arch.vpit->pit_state.pit_timer.timer; hrtimer_cancel(timer); + kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); mutex_unlock(&kvm->arch.vpit->pit_state.lock); kfree(kvm->arch.vpit); } @@ -595,8 +602,8 @@ void kvm_free_pit(struct kvm *kvm) static void __inject_pit_timer_intr(struct kvm *kvm) { mutex_lock(&kvm->lock); - kvm_set_irq(kvm, 0, 1); - kvm_set_irq(kvm, 0, 0); + kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); + kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); mutex_unlock(&kvm->lock); } diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index e436d4983aa..4178022b97a 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -44,6 +44,7 @@ struct kvm_pit { struct kvm_io_device speaker_dev; struct kvm *kvm; struct kvm_kpit_state pit_state; + int irq_source_id; }; #define KVM_PIT_BASE_ADDRESS 0x40 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 99c239c5c0a..2a5e64881d9 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2634,6 +2634,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) { kvm_x86_ops->tlb_flush(vcpu); + set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); return 1; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4f0677d1eae..f1f8ff2f1fa 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1742,7 +1742,8 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; if (irqchip_in_kernel(kvm)) { mutex_lock(&kvm->lock); - kvm_set_irq(kvm, irq_event.irq, irq_event.level); + kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + irq_event.irq, irq_event.level); mutex_unlock(&kvm->lock); r = 0; } @@ -4013,6 +4014,9 @@ struct kvm *kvm_arch_create_vm(void) INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); + /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ + set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); + return kvm; } diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 48ee4f9435f..a5d8e1ace1c 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -367,10 +367,9 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, * lazily after a task switch, and Linux uses that gratefully, but wouldn't a * name like "FPUTRAP bit" be a little less cryptic? * - * We store cr0 (and cr3) locally, because the Host never changes it. The - * Guest sometimes wants to read it and we'd prefer not to bother the Host - * unnecessarily. */ -static unsigned long current_cr0, current_cr3; + * We store cr0 locally because the Host never changes it. The Guest sometimes + * wants to read it and we'd prefer not to bother the Host unnecessarily. */ +static unsigned long current_cr0; static void lguest_write_cr0(unsigned long val) { lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); @@ -399,17 +398,23 @@ static unsigned long lguest_read_cr2(void) return lguest_data.cr2; } +/* See lguest_set_pte() below. */ +static bool cr3_changed = false; + /* cr3 is the current toplevel pagetable page: the principle is the same as - * cr0. Keep a local copy, and tell the Host when it changes. */ + * cr0. Keep a local copy, and tell the Host when it changes. The only + * difference is that our local copy is in lguest_data because the Host needs + * to set it upon our initial hypercall. */ static void lguest_write_cr3(unsigned long cr3) { + lguest_data.pgdir = cr3; lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); - current_cr3 = cr3; + cr3_changed = true; } static unsigned long lguest_read_cr3(void) { - return current_cr3; + return lguest_data.pgdir; } /* cr4 is used to enable and disable PGE, but we don't care. */ @@ -498,13 +503,13 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) * to forget all of them. Fortunately, this is very rare. * * ... except in early boot when the kernel sets up the initial pagetables, - * which makes booting astonishingly slow. So we don't even tell the Host - * anything changed until we've done the first page table switch. */ + * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell + * the Host anything changed until we've done the first page table switch, + * which brings boot back to 0.25 seconds. */ static void lguest_set_pte(pte_t *ptep, pte_t pteval) { *ptep = pteval; - /* Don't bother with hypercall before initial setup. */ - if (current_cr3) + if (cr3_changed) lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); } @@ -521,7 +526,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) static void lguest_flush_tlb_single(unsigned long addr) { /* Simply set it to zero: if it was not, it will fault back in. */ - lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0); + lazy_hcall(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); } /* This is what happens after the Guest has removed a large number of entries. @@ -581,6 +586,9 @@ static void __init lguest_init_IRQ(void) for (i = 0; i < LGUEST_IRQS; i++) { int vector = FIRST_EXTERNAL_VECTOR + i; + /* Some systems map "vectors" to interrupts weirdly. Lguest has + * a straightforward 1 to 1 mapping, so force that here. */ + __get_cpu_var(vector_irq)[vector] = i; if (vector != SYSCALL_VECTOR) { set_intr_gate(vector, interrupt[vector]); set_irq_chip_and_handler_name(i, &lguest_irq_controller, diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index 6bbdd633864..a580b9562e7 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c @@ -27,7 +27,7 @@ static struct irqaction irq2 = { void __init intr_init_hook(void) { #ifdef CONFIG_SMP - smp_intr_init(); + voyager_smp_intr_init(); #endif setup_irq(2, &irq2); diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 0f6e8a6523a..0e331652681 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -90,6 +90,7 @@ static void ack_vic_irq(unsigned int irq); static void vic_enable_cpi(void); static void do_boot_cpu(__u8 cpuid); static void do_quad_bootstrap(void); +static void initialize_secondary(void); int hard_smp_processor_id(void); int safe_smp_processor_id(void); @@ -344,6 +345,12 @@ static void do_quad_bootstrap(void) } } +void prefill_possible_map(void) +{ + /* This is empty on voyager because we need a much + * earlier detection which is done in find_smp_config */ +} + /* Set up all the basic stuff: read the SMP config and make all the * SMP information reflect only the boot cpu. All others will be * brought on-line later. */ @@ -413,6 +420,7 @@ void __init smp_store_cpu_info(int id) struct cpuinfo_x86 *c = &cpu_data(id); *c = boot_cpu_data; + c->cpu_index = id; identify_secondary_cpu(c); } @@ -650,6 +658,8 @@ void __init smp_boot_cpus(void) smp_tune_scheduling(); */ smp_store_cpu_info(boot_cpu_id); + /* setup the jump vector */ + initial_code = (unsigned long)initialize_secondary; printk("CPU%d: ", boot_cpu_id); print_cpu_info(&cpu_data(boot_cpu_id)); @@ -702,7 +712,7 @@ void __init smp_boot_cpus(void) /* Reload the secondary CPUs task structure (this function does not * return ) */ -void __init initialize_secondary(void) +static void __init initialize_secondary(void) { #if 0 // AC kernels only @@ -1248,7 +1258,7 @@ static void handle_vic_irq(unsigned int irq, struct irq_desc *desc) #define QIC_SET_GATE(cpi, vector) \ set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) -void __init smp_intr_init(void) +void __init voyager_smp_intr_init(void) { int i; diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 59f89b434b4..fea4565ff57 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,7 +1,7 @@ obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ pat.o pgtable.o gup.o -obj-$(CONFIG_X86_32) += pgtable_32.o +obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 4ba373c5b8c..be54176e9eb 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -233,7 +233,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, - start, len))) + (void __user *)start, len))) goto slow_irqon; /* diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 7f8a2daa3fd..3ffed259883 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -336,7 +336,6 @@ int devmem_is_allowed(unsigned long pagenr) return 0; } -#ifdef CONFIG_HIGHMEM pte_t *kmap_pte; pgprot_t kmap_prot; @@ -359,6 +358,7 @@ static void __init kmap_init(void) kmap_prot = PAGE_KERNEL; } +#ifdef CONFIG_HIGHMEM static void __init permanent_kmaps_init(pgd_t *pgd_base) { unsigned long vaddr; @@ -438,7 +438,6 @@ static void __init set_highmem_pages_init(void) #endif /* !CONFIG_NUMA */ #else -# define kmap_init() do { } while (0) # define permanent_kmaps_init(pgd_base) do { } while (0) # define set_highmem_pages_init() do { } while (0) #endif /* CONFIG_HIGHMEM */ diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b8e461d4941..9db01db6e3c 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, * pagetable pages as RO. So assume someone who pre-setup * these mappings are more intelligent. */ - if (pte_val(*pte)) + if (pte_val(*pte)) { + pages++; continue; + } if (0) printk(" pte=%p addr=%lx pte=%016lx\n", @@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, * not differ with respect to page frame and * attributes. */ - if (page_size_mask & (1 << PG_LEVEL_2M)) + if (page_size_mask & (1 << PG_LEVEL_2M)) { + pages++; continue; + } new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); } @@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, * not differ with respect to page frame and * attributes. */ - if (page_size_mask & (1 << PG_LEVEL_1G)) + if (page_size_mask & (1 << PG_LEVEL_1G)) { + pages++; continue; + } prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); } @@ -665,12 +671,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long last_map_addr = 0; unsigned long page_size_mask = 0; unsigned long start_pfn, end_pfn; + unsigned long pos; struct map_range mr[NR_RANGE_MR]; int nr_range, i; int use_pse, use_gbpages; - printk(KERN_INFO "init_memory_mapping\n"); + printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); /* * Find space for the kernel direct mapping tables. @@ -704,35 +711,50 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, /* head if not big page alignment ?*/ start_pfn = start >> PAGE_SHIFT; - end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) + pos = start_pfn << PAGE_SHIFT; + end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); - nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); + if (start_pfn < end_pfn) { + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); + pos = end_pfn << PAGE_SHIFT; + } /* big page (2M) range*/ - start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) + start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); - end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) + end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); - if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) - end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); - nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, - page_size_mask & (1<<PG_LEVEL_2M)); + if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) + end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); + if (start_pfn < end_pfn) { + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, + page_size_mask & (1<<PG_LEVEL_2M)); + pos = end_pfn << PAGE_SHIFT; + } /* big page (1G) range */ - start_pfn = end_pfn; - end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); - nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, + start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) + << (PUD_SHIFT - PAGE_SHIFT); + end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); + if (start_pfn < end_pfn) { + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); + pos = end_pfn << PAGE_SHIFT; + } /* tail is not big page (1G) alignment */ - start_pfn = end_pfn; - end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); - nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, - page_size_mask & (1<<PG_LEVEL_2M)); + start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) + << (PMD_SHIFT - PAGE_SHIFT); + end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); + if (start_pfn < end_pfn) { + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, + page_size_mask & (1<<PG_LEVEL_2M)); + pos = end_pfn << PAGE_SHIFT; + } /* tail is not big page (2M) alignment */ - start_pfn = end_pfn; + start_pfn = pos>>PAGE_SHIFT; end_pfn = end>>PAGE_SHIFT; nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); @@ -831,12 +853,12 @@ int arch_add_memory(int nid, u64 start, u64 size) unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - last_mapped_pfn = init_memory_mapping(start, start + size-1); + last_mapped_pfn = init_memory_mapping(start, start + size); if (last_mapped_pfn > max_pfn_mapped) max_pfn_mapped = last_mapped_pfn; ret = __add_pages(zone, start_pfn, nr_pages); - WARN_ON(1); + WARN_ON_ONCE(ret); return ret; } @@ -878,6 +900,7 @@ static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, void __init mem_init(void) { long codesize, reservedpages, datasize, initsize; + unsigned long absent_pages; start_periodic_check_for_corruption(); @@ -893,8 +916,9 @@ void __init mem_init(void) #else totalram_pages = free_all_bootmem(); #endif - reservedpages = max_pfn - totalram_pages - - absent_pages_in_range(0, max_pfn); + + absent_pages = absent_pages_in_range(0, max_pfn); + reservedpages = max_pfn - totalram_pages - absent_pages; after_bootmem = 1; codesize = (unsigned long) &_etext - (unsigned long) &_text; @@ -911,10 +935,11 @@ void __init mem_init(void) VSYSCALL_END - VSYSCALL_START); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " - "%ldk reserved, %ldk data, %ldk init)\n", + "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), max_pfn << (PAGE_SHIFT-10), codesize >> 10, + absent_pages << (PAGE_SHIFT-10), reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c new file mode 100644 index 00000000000..d0151d8ce45 --- /dev/null +++ b/arch/x86/mm/iomap_32.c @@ -0,0 +1,59 @@ +/* + * Copyright © 2008 Ingo Molnar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#include <asm/iomap.h> +#include <linux/module.h> + +/* Map 'pfn' using fixed map 'type' and protections 'prot' + */ +void * +iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) +{ + enum fixed_addresses idx; + unsigned long vaddr; + + pagefault_disable(); + + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); + arch_flush_lazy_mmu_mode(); + + return (void*) vaddr; +} +EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); + +void +iounmap_atomic(void *kvaddr, enum km_type type) +{ + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + + /* + * Force other mappings to Oops if they'll try to access this pte + * without first remap it. Keeping stale mappings around is a bad idea + * also, in case the page changes cacheability attributes or becomes + * a protected page in a hypervisor. + */ + if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) + kpte_clear_flush(kmap_pte-idx, vaddr); + + arch_flush_lazy_mmu_mode(); + pagefault_enable(); +} +EXPORT_SYMBOL_GPL(iounmap_atomic); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index ae71e11eb3e..d4c4307ff3e 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -387,7 +387,7 @@ static void __iomem *ioremap_default(resource_size_t phys_addr, unsigned long size) { unsigned long flags; - void *ret; + void __iomem *ret; int err; /* @@ -399,11 +399,11 @@ static void __iomem *ioremap_default(resource_size_t phys_addr, if (err < 0) return NULL; - ret = (void *) __ioremap_caller(phys_addr, size, flags, - __builtin_return_address(0)); + ret = __ioremap_caller(phys_addr, size, flags, + __builtin_return_address(0)); free_memtype(phys_addr, phys_addr + size); - return (void __iomem *)ret; + return ret; } void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, @@ -622,7 +622,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx) __early_set_fixmap(idx, 0, __pgprot(0)); } -static void *prev_map[FIX_BTMAPS_SLOTS] __initdata; +static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; static int __init check_early_ioremap_leak(void) { @@ -645,7 +645,7 @@ static int __init check_early_ioremap_leak(void) } late_initcall(check_early_ioremap_leak); -static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) +static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) { unsigned long offset, last_addr; unsigned int nrpages; @@ -713,23 +713,23 @@ static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, if (early_ioremap_debug) printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); - prev_map[slot] = (void *) (offset + fix_to_virt(idx0)); + prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0)); return prev_map[slot]; } /* Remap an IO device */ -void __init *early_ioremap(unsigned long phys_addr, unsigned long size) +void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); } /* Remap memory */ -void __init *early_memremap(unsigned long phys_addr, unsigned long size) +void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, PAGE_KERNEL); } -void __init early_iounmap(void *addr, unsigned long size) +void __init early_iounmap(void __iomem *addr, unsigned long size) { unsigned long virt_addr; unsigned long offset; @@ -779,7 +779,7 @@ void __init early_iounmap(void *addr, unsigned long size) --idx; --nrpages; } - prev_map[slot] = 0; + prev_map[slot] = NULL; } void __this_fixmap_does_not_exist(void) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index f1dc1b75d16..e89d24815f2 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -67,18 +67,18 @@ static void split_page_count(int level) void arch_report_meminfo(struct seq_file *m) { - seq_printf(m, "DirectMap4k: %8lu kB\n", + seq_printf(m, "DirectMap4k: %8lu kB\n", direct_pages_count[PG_LEVEL_4K] << 2); #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) - seq_printf(m, "DirectMap2M: %8lu kB\n", + seq_printf(m, "DirectMap2M: %8lu kB\n", direct_pages_count[PG_LEVEL_2M] << 11); #else - seq_printf(m, "DirectMap4M: %8lu kB\n", + seq_printf(m, "DirectMap4M: %8lu kB\n", direct_pages_count[PG_LEVEL_2M] << 12); #endif #ifdef CONFIG_X86_64 if (direct_gbpages) - seq_printf(m, "DirectMap1G: %8lu kB\n", + seq_printf(m, "DirectMap1G: %8lu kB\n", direct_pages_count[PG_LEVEL_1G] << 20); #endif } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 738fd0f2495..eb1bf000d12 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -481,12 +481,16 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) return 1; } #else +/* This check is needed to avoid cache aliasing when PAT is enabled */ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; + if (!pat_enabled) + return 1; + while (cursor < to) { if (!devmem_is_allowed(pfn)) { printk(KERN_INFO diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 0620d6d45f7..3f1b81a83e2 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -27,8 +27,7 @@ static int num_counters = 2; static int counter_width = 32; #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) -#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) -#define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1)))) +#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) @@ -124,14 +123,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) static int ppro_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { - unsigned int low, high; + u64 val; int i; for (i = 0 ; i < num_counters; ++i) { if (!reset_value[i]) continue; - CTR_READ(low, high, msrs, i); - if (CTR_OVERFLOWED(low)) { + rdmsrl(msrs->counters[i].addr, val); + if (CTR_OVERFLOWED(val)) { oprofile_add_sample(regs, i); wrmsrl(msrs->counters[i].addr, -reset_value[i]); } diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 313947940a1..6dcefba7836 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -1,4 +1,4 @@ -ifdef CONFIG_FTRACE +ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities CFLAGS_REMOVE_spinlock.o = -pg CFLAGS_REMOVE_time.o = -pg diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index b61534c7a4c..5e4686d70f6 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l if (PagePinned(virt_to_page(mm->pgd))) { SetPagePinned(page); + vm_unmap_aliases(); if (!PageHighMem(page)) { make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); if (level == PT_PTE && USE_SPLIT_PTLOCKS) pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); - } else + } else { /* make sure there are no stray mappings of this page */ kmap_flush_unused(); - vm_unmap_aliases(); + } } } diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d4d52f5a1cf..688936044dc 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; - pte_t *pte = lookup_address(address, &level); - unsigned offset = address & ~PAGE_MASK; + pte_t *pte; + unsigned offset; - BUG_ON(pte == NULL); + /* + * if the PFN is in the linear mapped vaddr range, we can just use + * the (quick) virt_to_machine() p2m lookup + */ + if (virt_addr_valid(vaddr)) + return virt_to_machine(vaddr); + /* otherwise we have to do a (slower) full page-table walk */ + + pte = lookup_address(address, &level); + BUG_ON(pte == NULL); + offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); } @@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, xen_mc_batch(); - u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; + u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); @@ -840,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, read-only, and can be pinned. */ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) { + vm_unmap_aliases(); + xen_mc_batch(); - if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { - /* re-enable interrupts for kmap_flush_unused */ + if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { + /* re-enable interrupts for flushing */ xen_mc_issue(0); + kmap_flush_unused(); - vm_unmap_aliases(); + xen_mc_batch(); } @@ -864,7 +877,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) #else /* CONFIG_X86_32 */ #ifdef CONFIG_X86_PAE /* Need to make sure unshared kernel PMD is pinnable */ - xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), + xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), PT_PMD); #endif xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); @@ -981,7 +994,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) #ifdef CONFIG_X86_PAE /* Need to make sure unshared kernel PMD is unpinned */ - xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), + xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), PT_PMD); #endif |