From 4a922a969cb0190ce4580d4b064e2ac35f3ac9bf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 13 Jan 2009 16:11:00 +0100 Subject: x86, cpufreq: remove leftover copymask_copy() Impact: fix potential boot crash on MAXSMP Remove code left over by: 50c668d: Revert "cpumask: use work_on_cpu in acpi-cpufreq.c for drv_read That cmd.cpumask is not allocated anymore. No impact on default !MAXSMP kernels. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 6f11e029e8c..8f3c95c7e61 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -235,8 +235,6 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } - cpumask_copy(cmd.mask, mask); - drv_read(&cmd); dprintk("get_cur_val = %u\n", cmd.val); -- cgit v1.2.3 From c8399943bdb70fef78798b97f975506ecc99e039 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 12 Jan 2009 23:01:15 +0100 Subject: x86, generic: mark complex bitops.h inlines as __always_inline Impact: reduce kernel image size Hugh Dickins noticed that older gcc versions when the kernel is built for code size didn't inline some of the bitops. Mark all complex x86 bitops that have more than a single asm statement or two as always inline to avoid this problem. Probably should be done for other architectures too. Ingo then found a better fix that only requires a single line change, but it unfortunately only works on gcc 4.3. On older gccs the original patch still makes a ~0.3% defconfig difference with CONFIG_OPTIMIZE_INLINING=y. With gcc 4.1 and a defconfig like build: 6116998 1138540 883788 8139326 7c323e vmlinux-oi-with-patch 6137043 1138540 883788 8159371 7c808b vmlinux-optimize-inlining ~20k / 0.3% difference. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bitops.h | 14 ++++++++++---- include/asm-generic/bitops/__ffs.h | 2 +- include/asm-generic/bitops/__fls.h | 2 +- include/asm-generic/bitops/fls.h | 2 +- include/asm-generic/bitops/fls64.h | 4 ++-- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index e02a359d2aa..02b47a603fc 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -3,6 +3,9 @@ /* * Copyright 1992, Linus Torvalds. + * + * Note: inlines with more than a single statement should be marked + * __always_inline to avoid problems with older gcc's inlining heuristics. */ #ifndef _LINUX_BITOPS_H @@ -53,7 +56,8 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline void +set_bit(unsigned int nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "orb %1,%0" @@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +clear_bit(int nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "andb %1,%0" @@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) * * This is the same as test_and_set_bit on x86. */ -static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) +static __always_inline int +test_and_set_bit_lock(int nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); } @@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return oldbit; } -static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) +static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) { return ((1UL << (nr % BITS_PER_LONG)) & (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h index 9a3274aecf8..937d7c43557 100644 --- a/include/asm-generic/bitops/__ffs.h +++ b/include/asm-generic/bitops/__ffs.h @@ -9,7 +9,7 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static inline unsigned long __ffs(unsigned long word) +static __always_inline unsigned long __ffs(unsigned long word) { int num = 0; diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h index be24465403d..a60a7ccb678 100644 --- a/include/asm-generic/bitops/__fls.h +++ b/include/asm-generic/bitops/__fls.h @@ -9,7 +9,7 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static inline unsigned long __fls(unsigned long word) +static __always_inline unsigned long __fls(unsigned long word) { int num = BITS_PER_LONG - 1; diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h index 850859bc506..0576d1f42f4 100644 --- a/include/asm-generic/bitops/fls.h +++ b/include/asm-generic/bitops/fls.h @@ -9,7 +9,7 @@ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static inline int fls(int x) +static __always_inline int fls(int x) { int r = 32; diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h index 86d403f8b25..b097cf8444e 100644 --- a/include/asm-generic/bitops/fls64.h +++ b/include/asm-generic/bitops/fls64.h @@ -15,7 +15,7 @@ * at position 64. */ #if BITS_PER_LONG == 32 -static inline int fls64(__u64 x) +static __always_inline int fls64(__u64 x) { __u32 h = x >> 32; if (h) @@ -23,7 +23,7 @@ static inline int fls64(__u64 x) return fls(x); } #elif BITS_PER_LONG == 64 -static inline int fls64(__u64 x) +static __always_inline int fls64(__u64 x) { if (x == 0) return 0; -- cgit v1.2.3 From 5cca0cf15a94417f49625ce52e23589eed0a1675 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 9 Jan 2009 14:35:20 -0800 Subject: x86, pat: fix reserve_memtype() for legacy 1MB range Thierry Vignaud reported: > http://bugzilla.kernel.org/show_bug.cgi?id=12372 > > On P4 with an SiS motherboard (video card is a SiS 651) > X server fails to start with error: > xf86MapVidMem: Could not mmap framebuffer (0x00000000,0x2000) (Invalid > argument) Here X is trying to map first 8KB of memory using /dev/mem. Existing code treats first 0-4KB of memory as non-RAM and 4KB-8KB as RAM. Recent code changes don't allow to map memory with different attributes at the same time. Fix this by treating the first 1MB legacy region as special and always track the attribute requests with in this region using linear linked list (and don't bother if the range is RAM or non-RAM or mixed) Reported-and-tested-by: Thierry Vignaud Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 85cbd3cd372..070ee4a3b22 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -333,11 +333,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, req_type & _PAGE_CACHE_MASK); } - is_range_ram = pagerange_is_ram(start, end); - if (is_range_ram == 1) - return reserve_ram_pages_type(start, end, req_type, new_type); - else if (is_range_ram < 0) - return -EINVAL; + /* + * For legacy reasons, some parts of the physical address range in the + * legacy 1MB region is treated as non-RAM (even when listed as RAM in + * the e820 tables). So we will track the memory attributes of this + * legacy 1MB region using the linear memtype_list always. + */ + if (end >= ISA_END_ADDRESS) { + is_range_ram = pagerange_is_ram(start, end); + if (is_range_ram == 1) + return reserve_ram_pages_type(start, end, req_type, + new_type); + else if (is_range_ram < 0) + return -EINVAL; + } new = kmalloc(sizeof(struct memtype), GFP_KERNEL); if (!new) @@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end) if (is_ISA_range(start, end - 1)) return 0; - is_range_ram = pagerange_is_ram(start, end); - if (is_range_ram == 1) - return free_ram_pages_type(start, end); - else if (is_range_ram < 0) - return -EINVAL; + /* + * For legacy reasons, some parts of the physical address range in the + * legacy 1MB region is treated as non-RAM (even when listed as RAM in + * the e820 tables). So we will track the memory attributes of this + * legacy 1MB region using the linear memtype_list always. + */ + if (end >= ISA_END_ADDRESS) { + is_range_ram = pagerange_is_ram(start, end); + if (is_range_ram == 1) + return free_ram_pages_type(start, end); + else if (is_range_ram < 0) + return -EINVAL; + } spin_lock(&memtype_lock); list_for_each_entry(entry, &memtype_list, nd) { -- cgit v1.2.3 From 4a13ad0bd8869bb491c67918662f9b1852595af5 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 14 Jan 2009 12:28:51 +0000 Subject: x86: avoid early crash in disable_local_APIC() E.g. when called due to an early panic. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 0f830e4f567..d19aa3aab62 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -895,6 +895,10 @@ void disable_local_APIC(void) { unsigned int value; + /* APIC hasn't been mapped yet */ + if (!apic_phys) + return; + clear_local_APIC(); /* -- cgit v1.2.3 From 18c07cf530cf4aa8b7551801f68ed40db5ee4e45 Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Thu, 15 Jan 2009 09:51:20 -0600 Subject: x86, UV: cpu_relax in uv_wait_completion The function uv_wait_completion() spins on reads of a memory-mapped register, waiting for completion of BAU hardware replies. It should call "cpu_relax()" between those reads to improve performance on hyperthreaded configurations. Signed-off-by: Cliff Wickman Acked-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/kernel/tlb_uv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f885023167e..6812b829ed8 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -200,6 +200,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc, destination_timeouts = 0; } } + cpu_relax(); } return FLUSH_COMPLETE; } -- cgit v1.2.3 From a3c6018e565dc07cf3738ace6bbe412f97b1bba8 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 16 Jan 2009 11:59:33 +0000 Subject: x86: fix assumed to be contiguous leaf page tables for kmap_atomic region (take 2) Debugging and original patch from Nick Piggin The early fixmap pmd entry inserted at the very top of the KVA is causing the subsequent fixmap mapping code to not provide physically linear pte pages over the kmap atomic portion of the fixmap (which relies on said property to calculate pte addresses). This has caused weird boot failures in kmap_atomic much later in the boot process (initial userspace faults) on a 32-bit PAE system with a larger number of CPUs (smaller CPU counts tend not to run over into the next page so don't show up the problem). Solve this by attempting to clear out the page table, and copy any of its entries to the new one. Also, add a bug if a nonlinear condition is encountered and can't be resolved, which might save some hours of debugging if this fragile scheme ever breaks again... Once we have such logic, we can also use it to eliminate the early ioremap trickery around the page table setup for the fixmap area. This also fixes potential issues with FIX_* entries sharing the leaf page table with the early ioremap ones getting discarded by early_ioremap_clear() and not restored by early_ioremap_reset(). It at once eliminates the temporary (and configuration, namely NR_CPUS, dependent) unavailability of early fixed mappings during the time the fixmap area page tables get constructed. Finally, also replace the hard coded calculation of the initial table space needed for the fixmap area with a proper one, allowing kernels configured for large CPU counts to actually boot. Based-on: Nick Piggin Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io.h | 1 - arch/x86/mm/init_32.c | 48 ++++++++++++++++++++++++++++++++++++++++++++--- arch/x86/mm/ioremap.c | 25 ------------------------ 3 files changed, 45 insertions(+), 29 deletions(-) diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 05cfed4485f..1dbbdf4be9b 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); * A boot-time mapping is currently limited to at most 16 pages. */ extern void early_ioremap_init(void); -extern void early_ioremap_clear(void); extern void early_ioremap_reset(void); extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); extern void __iomem *early_memremap(unsigned long offset, unsigned long size); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 88f1b10de3b..2cef0507441 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -138,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) return pte_offset_kernel(pmd, 0); } +static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, + unsigned long vaddr, pte_t *lastpte) +{ +#ifdef CONFIG_HIGHMEM + /* + * Something (early fixmap) may already have put a pte + * page here, which causes the page table allocation + * to become nonlinear. Attempt to fix it, and if it + * is still nonlinear then we have to bug. + */ + int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; + int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; + + if (pmd_idx_kmap_begin != pmd_idx_kmap_end + && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin + && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end + && ((__pa(pte) >> PAGE_SHIFT) < table_start + || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { + pte_t *newpte; + int i; + + BUG_ON(after_init_bootmem); + newpte = alloc_low_page(); + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(newpte + i, pte[i]); + + paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); + BUG_ON(newpte != pte_offset_kernel(pmd, 0)); + __flush_tlb_all(); + + paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); + pte = newpte; + } + BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) + && vaddr > fix_to_virt(FIX_KMAP_END) + && lastpte && lastpte + PTRS_PER_PTE != pte); +#endif + return pte; +} + /* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in @@ -154,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; + pte_t *pte = NULL; vaddr = start; pgd_idx = pgd_index(vaddr); @@ -165,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { - one_page_table_init(pmd); + pte = page_table_kmap_check(one_page_table_init(pmd), + pmd, vaddr, pte); vaddr += PMD_SIZE; } @@ -508,7 +551,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ - early_ioremap_clear(); vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); @@ -801,7 +843,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse) tables += PAGE_ALIGN(ptes * sizeof(pte_t)); /* for fixmap */ - tables += PAGE_SIZE * 2; + tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); /* * RED-PEN putting page tables only on node 0 could diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index bd85d42819e..af750ab973b 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -557,34 +557,9 @@ void __init early_ioremap_init(void) } } -void __init early_ioremap_clear(void) -{ - pmd_t *pmd; - - if (early_ioremap_debug) - printk(KERN_INFO "early_ioremap_clear()\n"); - - pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); - pmd_clear(pmd); - paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); - __flush_tlb_all(); -} - void __init early_ioremap_reset(void) { - enum fixed_addresses idx; - unsigned long addr, phys; - pte_t *pte; - after_paging_init = 1; - for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { - addr = fix_to_virt(idx); - pte = early_ioremap_pte(addr); - if (pte_present(*pte)) { - phys = pte_val(*pte) & PAGE_MASK; - set_fixmap(idx, phys); - } - } } static void __init __early_set_fixmap(enum fixed_addresses idx, -- cgit v1.2.3 From 64dec40ddee9d36d7b83f8a0513fdeaffe260a0e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 16 Jan 2009 13:23:35 -0500 Subject: x86: define ARCH_WANT_FRAME_POINTERS Commit da4276b8299a6544dc41ac2485d3ffca5811b3fb changed a dependency for FRAME_POINTER from X86 to ARCH_WANT_FRAME_POINTERS, but didn't actually define it. This patch adds the definition for ARCH_WANT_FRAME_POINTERS. Without it, FRAME_POINTER can't be enabled on x86. Signed-off-by: Jeff Mahoney Signed-off-by: Ingo Molnar --- lib/Kconfig.debug | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4c9ae6085c7..d30561dda90 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -570,6 +570,15 @@ config DEBUG_NOTIFIERS This is a relatively cheap check but if you care about maximum performance, say N. +# +# Select this config option from the architecture Kconfig, if it +# it is preferred to always offer frame pointers as a config +# option on the architecture (regardless of KERNEL_DEBUG): +# +config ARCH_WANT_FRAME_POINTERS + bool + help + config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && \ -- cgit v1.2.3 From b2b815d80a5c4e5b50be0a98aba8c445ce8f3e1f Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 16 Jan 2009 15:22:16 -0800 Subject: x86: put trigger in to detect mismatched apic versions Impact: add debug warning Fire off one message if two apic's discovered with different apic versions. (this code is only called during CPU init) The goal of this is to pave the way of the removal of the apic_version[] array. We dont expect any apic version incompatibilities in the x86 landscape of systems [if so we dont handle them very well and probably never will handle deep apic version assymetries well], but it's prudent to have a debug check for one kernel cycle nevertheless. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index d19aa3aab62..4b6df2469fe 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1837,6 +1837,11 @@ void __cpuinit generic_processor_info(int apicid, int version) num_processors++; cpu = cpumask_next_zero(-1, cpu_present_mask); + if (version != apic_version[boot_cpu_physical_apicid]) + WARN_ONCE(1, + "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", + apic_version[boot_cpu_physical_apicid], cpu, version); + physid_set(apicid, phys_cpu_present_map); if (apicid == boot_cpu_physical_apicid) { /* -- cgit v1.2.3 From c7f8562a51c2e5dcc1a00a2bdd232b9965ff960d Mon Sep 17 00:00:00 2001 From: Leonardo Potenza Date: Sun, 18 Jan 2009 23:03:56 +0100 Subject: x86: fix section mismatch warnings in kernel/setup_percpu.c The function setup_cpu_local_masks() has been marked __init, in order to remove the following section mismatch messages: WARNING: vmlinux.o(.text+0x3c2c7): Section mismatch in reference from the function setup_cpu_local_masks() to the function .init.text:alloc_bootmem_cpumask_var() The function setup_cpu_local_masks() references the function __init alloc_bootmem_cpumask_var(). This is often because setup_cpu_local_masks lacks a __init annotation or the annotation of alloc_bootmem_cpumask_var is wrong. WARNING: vmlinux.o(.text+0x3c2d3): Section mismatch in reference from the function setup_cpu_local_masks() to the function .init.text:alloc_bootmem_cpumask_var() The function setup_cpu_local_masks() references the function __init alloc_bootmem_cpumask_var(). This is often because setup_cpu_local_masks lacks a __init annotation or the annotation of alloc_bootmem_cpumask_var is wrong. WARNING: vmlinux.o(.text+0x3c2df): Section mismatch in reference from the function setup_cpu_local_masks() to the function .init.text:alloc_bootmem_cpumask_var() The function setup_cpu_local_masks() references the function __init alloc_bootmem_cpumask_var(). This is often because setup_cpu_local_masks lacks a __init annotation or the annotation of alloc_bootmem_cpumask_var is wrong. WARNING: vmlinux.o(.text+0x3c2eb): Section mismatch in reference from the function setup_cpu_local_masks() to the function .init.text:alloc_bootmem_cpumask_var() The function setup_cpu_local_masks() references the function __init alloc_bootmem_cpumask_var(). This is often because setup_cpu_local_masks lacks a __init annotation or the annotation of alloc_bootmem_cpumask_var is wrong. Signed-off-by: Leonardo Potenza Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 55c46074eba..01161077a49 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -136,7 +136,7 @@ static void __init setup_cpu_pda_map(void) #ifdef CONFIG_X86_64 /* correctly size the local cpu masks */ -static void setup_cpu_local_masks(void) +static void __init setup_cpu_local_masks(void) { alloc_bootmem_cpumask_var(&cpu_initialized_mask); alloc_bootmem_cpumask_var(&cpu_callin_mask); -- cgit v1.2.3 From 31ad9081200c06ccc350625d41d1f8b2d1cef29f Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 16 Jan 2009 15:31:15 -0800 Subject: work_on_cpu: don't try to get_online_cpus() in work_on_cpu. Impact: remove potential circular lock dependency with cpu hotplug lock This has caused more problems than it solved, with a pile of cpu hotplug locking issues. Followup patches will get_online_cpus() in callers that need it, but if they don't do it they're no worse than before when they were using set_cpus_allowed without locking. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- kernel/workqueue.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2f445833ae3..a35afdbc016 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -991,8 +991,8 @@ static void do_work_for_cpu(struct work_struct *w) * @fn: the function to run * @arg: the function arg * - * This will return -EINVAL in the cpu is not online, or the return value - * of @fn otherwise. + * This will return the value @fn returns. + * It is up to the caller to ensure that the cpu doesn't go offline. */ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { @@ -1001,14 +1001,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) INIT_WORK(&wfc.work, do_work_for_cpu); wfc.fn = fn; wfc.arg = arg; - get_online_cpus(); - if (unlikely(!cpu_online(cpu))) - wfc.ret = -EINVAL; - else { - schedule_work_on(cpu, &wfc.work); - flush_work(&wfc.work); - } - put_online_cpus(); + schedule_work_on(cpu, &wfc.work); + flush_work(&wfc.work); return wfc.ret; } -- cgit v1.2.3 From 8ccad40df8d314f786fdb06bdbedd4f43f3257cd Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 16 Jan 2009 15:31:15 -0800 Subject: work_on_cpu: Use our own workqueue. Impact: remove potential clashes with generic kevent workqueue Annoyingly, some places we want to use work_on_cpu are already in workqueues. As per Ingo's suggestion, we create a different workqueue for work_on_cpu. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- kernel/workqueue.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a35afdbc016..1f0c509b40d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -971,6 +971,8 @@ undo: } #ifdef CONFIG_SMP +static struct workqueue_struct *work_on_cpu_wq __read_mostly; + struct work_for_cpu { struct work_struct work; long (*fn)(void *); @@ -1001,7 +1003,7 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) INIT_WORK(&wfc.work, do_work_for_cpu); wfc.fn = fn; wfc.arg = arg; - schedule_work_on(cpu, &wfc.work); + queue_work_on(cpu, work_on_cpu_wq, &wfc.work); flush_work(&wfc.work); return wfc.ret; @@ -1019,4 +1021,8 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); +#ifdef CONFIG_SMP + work_on_cpu_wq = create_workqueue("work_on_cpu"); + BUG_ON(!work_on_cpu_wq); +#endif } -- cgit v1.2.3 From 72859081851af2bda04117ca3d64206ffa199e5e Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 16 Jan 2009 15:31:15 -0800 Subject: cpufreq: use work_on_cpu in acpi-cpufreq.c for drv_read and drv_write Impact: use new work_on_cpu function to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for drv_read() and drv_write(). Basically converts do_drv_{read,write} into "work_on_cpu" functions that are now called by drv_read and drv_write. Note: This patch basically reverts 50c668d6 which reverted 7503bfba, now that the work_on_cpu() function is more stable. Signed-off-by: Mike Travis Acked-by: Rusty Russell Tested-by: Dieter Ries Tested-by: Maciej Rutecki Cc: Dave Jones Cc: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 8f3c95c7e61..5bf2c834a23 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -150,8 +150,9 @@ struct drv_cmd { u32 val; }; -static void do_drv_read(struct drv_cmd *cmd) +static long do_drv_read(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { @@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd) default: break; } + return 0; } -static void do_drv_write(struct drv_cmd *cmd) +static long do_drv_write(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { @@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd) default: break; } + return 0; } static void drv_read(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - set_cpus_allowed_ptr(current, cmd->mask); - do_drv_read(cmd); - set_cpus_allowed_ptr(current, &saved_mask); + work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); } static void drv_write(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; unsigned int i; for_each_cpu(i, cmd->mask) { - set_cpus_allowed_ptr(current, cpumask_of(i)); - do_drv_write(cmd); + work_on_cpu(i, do_drv_write, cmd); } - - set_cpus_allowed_ptr(current, &saved_mask); - return; } static u32 get_cur_val(const struct cpumask *mask) @@ -366,7 +362,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) return freq; } -static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, +static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, struct acpi_cpufreq_data *data) { unsigned int cur_freq; -- cgit v1.2.3 From bfa318ad52a23f1e303d176b44366cdd2bb71ad2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Jan 2009 15:46:08 +0100 Subject: fix: crash: IP: __bitmap_intersects+0x48/0x73 -tip testing found this crash: > [ 35.258515] calling acpi_cpufreq_init+0x0/0x127 @ 1 > [ 35.264127] BUG: unable to handle kernel NULL pointer dereference at (null) > [ 35.267554] IP: [] __bitmap_intersects+0x48/0x73 > [ 35.267554] PGD 0 > [ 35.267554] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c is still broken: there's no allocation of the variable mask, so we pass in an uninitialized cmd.mask field to drv_read(), which then passes it to the scheduler which then crashes ... Switch it over to the much simpler constant-cpumask-pointers approach. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 5bf2c834a23..4b1c319d30c 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -145,7 +145,7 @@ typedef union { struct drv_cmd { unsigned int type; - cpumask_var_t mask; + const struct cpumask *mask; drv_addr_union addr; u32 val; }; @@ -231,6 +231,7 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } + cmd.mask = mask; drv_read(&cmd); dprintk("get_cur_val = %u\n", cmd.val); @@ -397,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, return -ENODEV; } - if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) - return -ENOMEM; - perf = data->acpi_data; result = cpufreq_frequency_table_target(policy, data->freq_table, @@ -444,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); + cmd.mask = policy->cpus; else - cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); + cmd.mask = cpumask_of(policy->cpu); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; @@ -473,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, perf->state = next_perf_state; out: - free_cpumask_var(cmd.mask); return result; } -- cgit v1.2.3 From f5495506c3c1300d249d403c36f92de71920dbeb Mon Sep 17 00:00:00 2001 From: Gary Hade Date: Mon, 19 Jan 2009 13:46:41 -0800 Subject: x86: remove kernel_physical_mapping_init() from init section Impact: fix crash with memory hotplug enabled kernel_physical_mapping_init() is called during memory hotplug so it does not belong in the init section. If the kernel is built with CONFIG_DEBUG_SECTION_MISMATCH=y on the make command line, arch/x86/mm/init_64.c is compiled with the -fno-inline-functions-called-once gcc option defeating inlining of kernel_physical_mapping_init() within init_memory_mapping(). When kernel_physical_mapping_init() is not inlined it is placed in the .init.text section according to the __init in it's current declaration. A later call to kernel_physical_mapping_init() during a memory hotplug operation encounters an int3 trap because the .init.text section memory has been freed. This patch eliminates the crash caused by the int3 trap by moving the non-inlined kernel_physical_mapping_init() from .init.text to .meminit.text. Signed-off-by: Gary Hade Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 23f68e77ad1..e6d36b49025 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -596,7 +596,7 @@ static void __init init_gbpages(void) direct_gbpages = 0; } -static unsigned long __init kernel_physical_mapping_init(unsigned long start, +static unsigned long __meminit kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { -- cgit v1.2.3 From e0a96129db574d6365e3439d16d88517c437ab33 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 16 Jan 2009 15:22:11 +0100 Subject: x86: use early clobbers in usercopy*.c Impact: fix rare (but currently harmless) miscompile with certain configs and gcc versions Hugh Dickins noticed that strncpy_from_user() was miscompiled in some circumstances with gcc 4.3. Thanks to Hugh's excellent analysis it was easy to track down. Hugh writes: > Try building an x86_64 defconfig 2.6.29-rc1 kernel tree, > except not quite defconfig, switch CONFIG_PREEMPT_NONE=y > and CONFIG_PREEMPT_VOLUNTARY off (because it expands a > might_fault() there, which hides the issue): using a > gcc 4.3.2 (I've checked both openSUSE 11.1 and Fedora 10). > > It generates the following: > > 0000000000000000 <__strncpy_from_user>: > 0: 48 89 d1 mov %rdx,%rcx > 3: 48 85 c9 test %rcx,%rcx > 6: 74 0e je 16 <__strncpy_from_user+0x16> > 8: ac lods %ds:(%rsi),%al > 9: aa stos %al,%es:(%rdi) > a: 84 c0 test %al,%al > c: 74 05 je 13 <__strncpy_from_user+0x13> > e: 48 ff c9 dec %rcx > 11: 75 f5 jne 8 <__strncpy_from_user+0x8> > 13: 48 29 c9 sub %rcx,%rcx > 16: 48 89 c8 mov %rcx,%rax > 19: c3 retq > > Observe that "sub %rcx,%rcx; mov %rcx,%rax", whereas gcc 4.2.1 > (and many other configs) say "sub %rcx,%rdx; mov %rdx,%rax". > Isn't it returning 0 when it ought to be returning strlen? The asm constraints for the strncpy_from_user() result were missing an early clobber, which tells gcc that the last output arguments are written before all input arguments are read. Also add more early clobbers in the rest of the file and fix 32-bit usercopy.c in the same way. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin [ since this API is rarely used and no in-kernel user relies on a 'len' return value (they only rely on negative return values) this miscompile was never noticed in the field. But it's worth fixing it nevertheless. ] Signed-off-by: Ingo Molnar --- arch/x86/lib/usercopy_32.c | 4 ++-- arch/x86/lib/usercopy_64.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 4a20b2f9a38..7c8ca91bb9e 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -56,7 +56,7 @@ do { \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(0b,3b) \ - : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ + : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ "=&D" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ : "memory"); \ @@ -218,7 +218,7 @@ long strnlen_user(const char __user *s, long n) " .align 4\n" " .long 0b,2b\n" ".previous" - :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) + :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) :"0" (n), "1" (s), "2" (0), "3" (mask) :"cc"); return res & mask; diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 64d6c84e635..ec13cb5f17e 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -32,7 +32,7 @@ do { \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(0b,3b) \ - : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ + : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ "=&D" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ : "memory"); \ @@ -86,7 +86,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) ".previous\n" _ASM_EXTABLE(0b,3b) _ASM_EXTABLE(1b,2b) - : [size8] "=c"(size), [dst] "=&D" (__d0) + : [size8] "=&c"(size), [dst] "=&D" (__d0) : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), [zero] "r" (0UL), [eight] "r" (8UL)); return size; -- cgit v1.2.3 From 552b8aa4d1edcc1c764ff6f61a7686347a2d1827 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 20 Jan 2009 09:31:49 +0100 Subject: Revert "x86: signal: change type of paramter for sys_rt_sigreturn()" This reverts commit 4217458dafaa57d8e26a46f5d05ab8c53cf64191. Justin Madru bisected this commit, it was causing weird Firefox crashes. The reason is that GCC mis-optimizes (re-uses) the on-stack parameters of the calling frame, which corrupts the syscall return pt_regs state and thus corrupts user-space register state. So we go back to the slightly less clean but more optimization-safe method of getting to pt_regs. Also add a comment to explain this. Resolves: http://bugzilla.kernel.org/show_bug.cgi?id=12505 Reported-and-bisected-by: Justin Madru Tested-by: Justin Madru Signed-off-by: Ingo Molnar --- arch/x86/include/asm/syscalls.h | 2 +- arch/x86/kernel/signal.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 9c6797c3e56..c0b0bda754e 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, struct old_sigaction __user *); asmlinkage int sys_sigaltstack(unsigned long); asmlinkage unsigned long sys_sigreturn(unsigned long); -asmlinkage int sys_rt_sigreturn(struct pt_regs); +asmlinkage int sys_rt_sigreturn(unsigned long); /* kernel/ioport.c */ asmlinkage long sys_iopl(unsigned long); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 89bb7668041..df0587f24c5 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -632,9 +632,16 @@ badframe: } #ifdef CONFIG_X86_32 -asmlinkage int sys_rt_sigreturn(struct pt_regs regs) +/* + * Note: do not pass in pt_regs directly as with tail-call optimization + * GCC will incorrectly stomp on the caller's frame and corrupt user-space + * register state: + */ +asmlinkage int sys_rt_sigreturn(unsigned long __unused) { - return do_rt_sigreturn(®s); + struct pt_regs *regs = (struct pt_regs *)&__unused; + + return do_rt_sigreturn(regs); } #else /* !CONFIG_X86_32 */ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) -- cgit v1.2.3 From a1e46212a410793d575718818e81ddc442a65283 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 20 Jan 2009 14:20:21 -0800 Subject: x86: fix page attribute corruption with cpa() Impact: fix sporadic slowdowns and warning messages This patch fixes a performance issue reported by Linus on his Nehalem system. While Linus reverted the PAT patch (commit 58dab916dfb57328d50deb0aa9b3fc92efa248ff) which exposed the issue, existing cpa() code can potentially still cause wrong(page attribute corruption) behavior. This patch also fixes the "WARNING: at arch/x86/mm/pageattr.c:560" that various people reported. In 64bit kernel, kernel identity mapping might have holes depending on the available memory and how e820 reports the address range covering the RAM, ACPI, PCI reserved regions. If there is a 2MB/1GB hole in the address range that is not listed by e820 entries, kernel identity mapping will have a corresponding hole in its 1-1 identity mapping. If cpa() happens on the kernel identity mapping which falls into these holes, existing code fails like this: __change_page_attr_set_clr() __change_page_attr() returns 0 because of if (!kpte). But doesn't set cpa->numpages and cpa->pfn. cpa_process_alias() uses uninitialized cpa->pfn (random value) which can potentially lead to changing the page attribute of kernel text/data, kernel identity mapping of RAM pages etc. oops! This bug was easily exposed by another PAT patch which was doing cpa() more often on kernel identity mapping holes (physical range between max_low_pfn_mapped and 4GB), where in here it was setting the cache disable attribute(PCD) for kernel identity mappings aswell. Fix cpa() to handle the kernel identity mapping holes. Retain the WARN() for cpa() calls to other not present address ranges (kernel-text/data, ioremap() addresses) Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi Cc: Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 49 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e89d24815f2..84ba74820ad 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -534,6 +534,36 @@ out_unlock: return 0; } +static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, + int primary) +{ + /* + * Ignore all non primary paths. + */ + if (!primary) + return 0; + + /* + * Ignore the NULL PTE for kernel identity mapping, as it is expected + * to have holes. + * Also set numpages to '1' indicating that we processed cpa req for + * one virtual address page and its pfn. TBD: numpages can be set based + * on the initial value and the level returned by lookup_address(). + */ + if (within(vaddr, PAGE_OFFSET, + PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { + cpa->numpages = 1; + cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; + return 0; + } else { + WARN(1, KERN_WARNING "CPA: called for zero pte. " + "vaddr = %lx cpa->vaddr = %lx\n", vaddr, + *cpa->vaddr); + + return -EFAULT; + } +} + static int __change_page_attr(struct cpa_data *cpa, int primary) { unsigned long address; @@ -549,17 +579,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) repeat: kpte = lookup_address(address, &level); if (!kpte) - return 0; + return __cpa_process_fault(cpa, address, primary); old_pte = *kpte; - if (!pte_val(old_pte)) { - if (!primary) - return 0; - WARN(1, KERN_WARNING "CPA: called for zero pte. " - "vaddr = %lx cpa->vaddr = %lx\n", address, - *cpa->vaddr); - return -EINVAL; - } + if (!pte_val(old_pte)) + return __cpa_process_fault(cpa, address, primary); if (level == PG_LEVEL_4K) { pte_t new_pte; @@ -657,12 +681,7 @@ static int cpa_process_alias(struct cpa_data *cpa) vaddr = *cpa->vaddr; if (!(within(vaddr, PAGE_OFFSET, - PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) -#ifdef CONFIG_X86_64 - || within(vaddr, PAGE_OFFSET + (1UL<<32), - PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) -#endif - )) { + PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { alias_cpa = *cpa; temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); -- cgit v1.2.3 From 731f1872f4e8a0f1eabd49c3548207e79a421202 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Tue, 20 Jan 2009 10:37:39 +0100 Subject: x86: mtrr fix debug boot parameter while looking at: http://bugzilla.kernel.org/show_bug.cgi?id=11541 I realized that the mtrr.show param cannot work, because the code is processed much too early. This patch: - Declares mtrr.show as early_param - Stays consistent with the previous param (which I doubt that it ever worked), so mtrr.show=1 would still work - Declares mtrr_show as initdata Signed-off-by: Thomas Renninger Acked-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mtrr/generic.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index b59ddcc88cd..0c0a455fe95 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -33,11 +33,13 @@ u64 mtrr_tom2; struct mtrr_state_type mtrr_state = {}; EXPORT_SYMBOL_GPL(mtrr_state); -#undef MODULE_PARAM_PREFIX -#define MODULE_PARAM_PREFIX "mtrr." - -static int mtrr_show; -module_param_named(show, mtrr_show, bool, 0); +static int __initdata mtrr_show; +static int __init mtrr_debug(char *opt) +{ + mtrr_show = 1; + return 0; +} +early_param("mtrr.show", mtrr_debug); /* * Returns the effective MTRR type for the region -- cgit v1.2.3 From 9597134218300c045cf219be3664615e97cb239c Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 13 Jan 2009 10:21:30 -0800 Subject: x86: fix PTE corruption issue while mapping RAM using /dev/mem Beschorner Daniel reported: > hwinfo problem since 2.6.28, showing this in the oops: > Corrupted page table at address 7fd04de3ec00 Also, PaX Team reported a regression with this commit: > commit 9542ada803198e6eba29d3289abb39ea82047b92 > Author: Suresh Siddha > Date: Wed Sep 24 08:53:33 2008 -0700 > > x86: track memtype for RAM in page struct This commit breaks mapping any RAM page through /dev/mem, as the reserve_memtype() was not initializing the return attribute type and as such corrupting the PTE entry that was setup with the return attribute type. Because of this bug, application mapping this RAM page through /dev/mem will die with "Corrupted page table at address xxxx" message in the kernel log and also the kernel identity mapping which maps the underlying RAM page gets converted to UC. Fix this by initializing the return attribute type before calling reserve_ram_pages_type() Reported-by: PaX Team Reported-and-tested-by: Beschorner Daniel Tested-and-Acked-by: PaX Team Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 070ee4a3b22..ffc88cc00fd 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -333,6 +333,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, req_type & _PAGE_CACHE_MASK); } + if (new_type) + *new_type = actual_type; + /* * For legacy reasons, some parts of the physical address range in the * legacy 1MB region is treated as non-RAM (even when listed as RAM in @@ -356,9 +359,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, new->end = end; new->type = actual_type; - if (new_type) - *new_type = actual_type; - spin_lock(&memtype_lock); if (cached_entry && start >= cached_start) -- cgit v1.2.3 From bdf21a49bab28f0d9613e8d8724ef9c9168b61b9 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 21 Jan 2009 15:01:56 -0800 Subject: x86: add MSR_IA32_MISC_ENABLE bits to Impact: None (new bit definitions currently unused) Add bit definitions for the MSR_IA32_MISC_ENABLE MSRs to . Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/msr-index.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index cb58643947b..358acc59ae0 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -202,6 +202,35 @@ #define MSR_IA32_THERM_STATUS 0x0000019c #define MSR_IA32_MISC_ENABLE 0x000001a0 +/* MISC_ENABLE bits: architectural */ +#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) +#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) +#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) +#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) +#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) +#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) +#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) +#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22) +#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) +#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) + +/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ +#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2) +#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3) +#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) +#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) +#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) +#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9) +#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) +#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) +#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) +#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19) +#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20) +#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24) +#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37) +#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) +#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) + /* Intel Model 6 */ #define MSR_P6_EVNTSEL0 0x00000186 #define MSR_P6_EVNTSEL1 0x00000187 -- cgit v1.2.3 From 066941bd4eeb159307a5d7d795100d0887c00442 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 21 Jan 2009 15:04:32 -0800 Subject: x86: unmask CPUID levels on Intel CPUs Impact: Fixes crashes with misconfigured BIOSes on XSAVE hardware Avuton Olrich reported early boot crashes with v2.6.28 and bisected it down to dc1e35c6e95e8923cf1d3510438b63c600fee1e2 ("x86, xsave: enable xsave/xrstor on cpus with xsave support"). If the CPUID limit bit in MSR_IA32_MISC_ENABLE is set, clear it to make all CPUID information available. This is required for some features to work, in particular XSAVE. Reported-and-bisected-by: Avuton Olrich Tested-by: Avuton Olrich Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/intel.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8ea6929e974..43c1dcf0bec 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -29,6 +29,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { + u64 misc_enable; + + /* Unmask CPUID levels if masked */ + if (!rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_enable) && + (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)) { + misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; + wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + c->cpuid_level = cpuid_eax(0); + } + if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); -- cgit v1.2.3 From 1058a75f07b9bb8323fb5197be5526220f8b75cf Mon Sep 17 00:00:00 2001 From: Dan Magenheimer Date: Thu, 22 Jan 2009 14:36:08 -0800 Subject: xen: actually release memory when shrinking domain Fix this: > It appears that in the upstream balloon driver, > the call to HYPERVISOR_update_va_mapping is missing > from decrease_reservation. I think as a result, > the balloon driver is eating memory but not > releasing it to Xen, thus rendering the balloon > driver essentially useless. (Can be observed via xentop.) Signed-off-by: Dan Magenheimer Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- drivers/xen/balloon.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 8dc7109d61b..8069d520c46 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -298,6 +298,11 @@ static int decrease_reservation(unsigned long nr_pages) frame_list[i] = pfn_to_mfn(pfn); scrub_page(page); + + ret = HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + __pte_ma(0), 0); + BUG_ON(ret); } /* Ensure that ballooned highmem pages don't have kmaps. */ -- cgit v1.2.3 From 42ef73fe134732b2e91c0326df5fd568da17c4b2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 23 Jan 2009 17:37:49 +0100 Subject: x86, mm: fix pte_free() On -rt we were seeing spurious bad page states like: Bad page state in process 'firefox' page:c1bc2380 flags:0x40000000 mapping:c1bc2390 mapcount:0 count:0 Trying to fix it up, but a reboot is needed Backtrace: Pid: 503, comm: firefox Not tainted 2.6.26.8-rt13 #3 [] ? printk+0x14/0x19 [] bad_page+0x4e/0x79 [] free_hot_cold_page+0x5b/0x1d3 [] free_hot_page+0xf/0x11 [] __free_pages+0x20/0x2b [] __pte_alloc+0x87/0x91 [] handle_mm_fault+0xe4/0x733 [] ? rt_mutex_down_read_trylock+0x57/0x63 [] ? rt_mutex_down_read_trylock+0x57/0x63 [] do_page_fault+0x36f/0x88a This is the case where a concurrent fault already installed the PTE and we get to free the newly allocated one. This is due to pgtable_page_ctor() doing the spin_lock_init(&page->ptl) which is overlaid with the {private, mapping} struct. union { struct { unsigned long private; struct address_space *mapping; }; spinlock_t ptl; struct kmem_cache *slab; struct page *first_page; }; Normally the spinlock is small enough to not stomp on page->mapping, but PREEMPT_RT=y has huge 'spin'locks. But lockdep kernels should also be able to trigger this splat, as the lock tracking code grows the spinlock to cover page->mapping. The obvious fix is calling pgtable_page_dtor() like the regular pte free path __pte_free_tlb() does. It seems all architectures except x86 and nm10300 already do this, and nm10300 doesn't seem to use pgtable_page_ctor(), which suggests it doesn't do SMP or simply doesnt do MMU at all or something. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar Cc: --- arch/x86/include/asm/pgalloc.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index cb7c151a8bf..dd14c54ac71 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -42,6 +42,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free(struct mm_struct *mm, struct page *pte) { + pgtable_page_dtor(pte); __free_page(pte); } -- cgit v1.2.3 From ff4ce8c332859508dc97826ab8b7f42bb9c212c9 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Fri, 23 Jan 2009 16:26:21 +0000 Subject: xen: handle highmem pages correctly when shrinking a domain Commit 1058a75f07b9bb8323fb5197be5526220f8b75cf ("xen: actually release memory when shrinking domain") causes a crash if the page being released is a highmem page. If a page is highmem then there is no need to unmap it. Signed-off-by: Ian Campbell Acked-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- drivers/xen/balloon.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 8069d520c46..2ba8f95516a 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -299,10 +299,13 @@ static int decrease_reservation(unsigned long nr_pages) scrub_page(page); - ret = HYPERVISOR_update_va_mapping( - (unsigned long)__va(pfn << PAGE_SHIFT), - __pte_ma(0), 0); - BUG_ON(ret); + if (!PageHighMem(page)) { + ret = HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + __pte_ma(0), 0); + BUG_ON(ret); + } + } /* Ensure that ballooned highmem pages don't have kmaps. */ -- cgit v1.2.3 From e1b4d1143651fb3838be1117785b6e0386fa151f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 25 Jan 2009 16:57:00 +0100 Subject: x86: use standard PIT frequency the RDC and ELAN platforms use slighly different PIT clocks, resulting in a timex.h hack that changes PIT_TICK_RATE during build time. But if a tester enables any of these platform support .config options, the PIT will be miscalibrated on standard PC platforms. So use one frequency - in a subsequent patch we'll add a quirk to allow x86 platforms to define different PIT frequencies. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/timex.h | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h index 1287dc1347d..b5c9d45c981 100644 --- a/arch/x86/include/asm/timex.h +++ b/arch/x86/include/asm/timex.h @@ -1,18 +1,13 @@ -/* x86 architecture timex specifications */ #ifndef _ASM_X86_TIMEX_H #define _ASM_X86_TIMEX_H #include #include -#ifdef CONFIG_X86_ELAN -# define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ -#elif defined(CONFIG_X86_RDC321X) -# define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ -#else -# define PIT_TICK_RATE 1193182 /* Underlying HZ */ -#endif -#define CLOCK_TICK_RATE PIT_TICK_RATE +/* The PIT ticks at this frequency (in HZ): */ +#define PIT_TICK_RATE 1193182 + +#define CLOCK_TICK_RATE PIT_TICK_RATE #define ARCH_HAS_READ_CURRENT_TIMER -- cgit v1.2.3 From ef5fa0ab24b87646c7bc98645acbb4b51fc2acd4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 23 Jan 2009 14:14:21 -0800 Subject: x86: work around PAGE_KERNEL_WC not getting WC in iomap_atomic_prot_pfn. In the absence of PAT, PAGE_KERNEL_WC ends up mapping to a memory type that gets UC behavior even in the presence of a WC MTRR covering the area in question. By swapping to PAGE_KERNEL_UC_MINUS, we can get the actual behavior the caller wanted (WC if you can manage it, UC otherwise). This recovers the 40% performance improvement of using WC in the DRM to upload vertex data. Signed-off-by: Eric Anholt Signed-off-by: H. Peter Anvin --- arch/x86/mm/iomap_32.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index d0151d8ce45..ca53224fc56 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -17,6 +17,7 @@ */ #include +#include #include /* Map 'pfn' using fixed map 'type' and protections 'prot' @@ -29,6 +30,15 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) pagefault_disable(); + /* + * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. + * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the + * MTRR is UC or WC. UC_MINUS gets the real intention, of the + * user, which is "WC if the MTRR is WC, UC if you can't do that." + */ + if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) + prot = PAGE_KERNEL_UC_MINUS; + idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); -- cgit v1.2.3 From 99fb4d349db7e7dacb2099c5cc320a9e2d31c1ef Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 26 Jan 2009 04:30:41 +0100 Subject: x86: unmask CPUID levels on Intel CPUs, fix Impact: fix boot hang on pre-model-15 Intel CPUs rdmsrl_safe() does not work in very early bootup code yet, because we dont have the pagefault handler installed yet so exception section does not get parsed. rdmsr_safe() will just crash and hang the bootup. So limit the MSR_IA32_MISC_ENABLE MSR read to those CPU types that support it. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 43c1dcf0bec..549f2ada55f 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -29,14 +29,17 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { - u64 misc_enable; - - /* Unmask CPUID levels if masked */ - if (!rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_enable) && - (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)) { - misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; - wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - c->cpuid_level = cpuid_eax(0); + /* Unmask CPUID levels if masked: */ + if (c->x86 == 6 && c->x86_model >= 15) { + u64 misc_enable; + + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + + if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { + misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; + wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + c->cpuid_level = cpuid_eax(0); + } } if ((c->x86 == 0xf && c->x86_model >= 0x03) || -- cgit v1.2.3 From 659d2618b38f8a4d91bdb19cfc5c7fb330a4c55a Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Sat, 24 Jan 2009 01:46:03 +0600 Subject: x86: fix section mismatch warning Here function vmi_activate calls a init function activate_vmi , which causes the following section mismatch warnings: LD arch/x86/kernel/built-in.o WARNING: arch/x86/kernel/built-in.o(.text+0x13ba9): Section mismatch in reference from the function vmi_activate() to the function .init.text:vmi_time_init() The function vmi_activate() references the function __init vmi_time_init(). This is often because vmi_activate lacks a __init annotation or the annotation of vmi_time_init is wrong. WARNING: arch/x86/kernel/built-in.o(.text+0x13bd1): Section mismatch in reference from the function vmi_activate() to the function .devinit.text:vmi_time_bsp_init() The function vmi_activate() references the function __devinit vmi_time_bsp_init(). This is often because vmi_activate lacks a __devinit annotation or the annotation of vmi_time_bsp_init is wrong. WARNING: arch/x86/kernel/built-in.o(.text+0x13bdb): Section mismatch in reference from the function vmi_activate() to the function .devinit.text:vmi_time_ap_init() The function vmi_activate() references the function __devinit vmi_time_ap_init(). This is often because vmi_activate lacks a __devinit annotation or the annotation of vmi_time_ap_init is wrong. Fix it by marking vmi_activate() as __init too. Signed-off-by: Rakib Mullick Signed-off-by: Ingo Molnar --- arch/x86/kernel/vmi_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 23206ba1687..1d3302cc2dd 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -858,7 +858,7 @@ void __init vmi_init(void) #endif } -void vmi_activate(void) +void __init vmi_activate(void) { unsigned long flags; -- cgit v1.2.3 From e88a0faae5baaaa3bdc6f23a55ad6bc7a7b4aa77 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Sat, 24 Jan 2009 08:22:47 +0000 Subject: xen: unitialised return value in xenbus_write_transaction The return value of xenbus_write_transaction can be uninitialised in the success case leading to the userspace xenstore utilities failing. Signed-off-by: Ian Campbell Signed-off-by: Ingo Molnar --- drivers/xen/xenfs/xenbus.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c index 875a4c59c59..a9592d981b1 100644 --- a/drivers/xen/xenfs/xenbus.c +++ b/drivers/xen/xenfs/xenbus.c @@ -291,7 +291,7 @@ static void watch_fired(struct xenbus_watch *watch, static int xenbus_write_transaction(unsigned msg_type, struct xenbus_file_priv *u) { - int rc, ret; + int rc; void *reply; struct xenbus_transaction_holder *trans = NULL; LIST_HEAD(staging_q); @@ -326,15 +326,14 @@ static int xenbus_write_transaction(unsigned msg_type, } mutex_lock(&u->reply_mutex); - ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); - if (!ret) - ret = queue_reply(&staging_q, reply, u->u.msg.len); - if (!ret) { + rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); + if (!rc) + rc = queue_reply(&staging_q, reply, u->u.msg.len); + if (!rc) { list_splice_tail(&staging_q, &u->read_buffers); wake_up(&u->read_waitq); } else { queue_cleanup(&staging_q); - rc = ret; } mutex_unlock(&u->reply_mutex); -- cgit v1.2.3