diff options
Diffstat (limited to 'arch')
39 files changed, 379 insertions, 149 deletions
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index fc74e913c41..34a56a136ef 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -131,14 +131,14 @@ static struct musb_hdrc_platform_data musb_plat = { .power = 50, /* up to 100 mA */ }; -static u64 musb_dmamask = DMA_32BIT_MASK; +static u64 musb_dmamask = DMA_BIT_MASK(32); static struct platform_device musb_device = { .name = "musb_hdrc", .id = -1, .dev = { .dma_mask = &musb_dmamask, - .coherent_dma_mask = DMA_32BIT_MASK, + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), @@ -146,14 +146,14 @@ static struct platform_device musb_device = { }; #ifdef CONFIG_NOP_USB_XCEIV -static u64 nop_xceiv_dmamask = DMA_32BIT_MASK; +static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32); static struct platform_device nop_xceiv_device = { .name = "nop_usb_xceiv", .id = -1, .dev = { .dma_mask = &nop_xceiv_dmamask, - .coherent_dma_mask = DMA_32BIT_MASK, + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = NULL, }, }; diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 573f02c39a0..285aae8431c 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb); static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - if (dev->coherent_dma_mask != DMA_64BIT_MASK) + if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) gfp |= GFP_DMA; return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); } diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h index 3cb50d17b62..12ee8d51016 100644 --- a/arch/mips/include/asm/mach-rc32434/gpio.h +++ b/arch/mips/include/asm/mach-rc32434/gpio.h @@ -80,6 +80,9 @@ struct rb532_gpio_reg { /* Compact Flash GPIO pin */ #define CF_GPIO_NUM 13 +/* S1 button GPIO (shared with UART0_SIN) */ +#define GPIO_BTN_S1 1 + extern void rb532_gpio_set_ilevel(int bit, unsigned gpio); extern void rb532_gpio_set_istat(int bit, unsigned gpio); extern void rb532_gpio_set_func(unsigned gpio); diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index 4a5f05b662a..9f40e1ff9b4 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -200,26 +200,9 @@ static struct platform_device rb532_led = { .id = -1, }; -static struct gpio_keys_button rb532_gpio_btn[] = { - { - .gpio = 1, - .code = BTN_0, - .desc = "S1", - .active_low = 1, - } -}; - -static struct gpio_keys_platform_data rb532_gpio_btn_data = { - .buttons = rb532_gpio_btn, - .nbuttons = ARRAY_SIZE(rb532_gpio_btn), -}; - static struct platform_device rb532_button = { - .name = "gpio-keys", + .name = "rb532-button", .id = -1, - .dev = { - .platform_data = &rb532_gpio_btn_data, - } }; static struct resource rb532_wdt_res[] = { diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h index 414c50e2e88..94942d60ddf 100644 --- a/arch/powerpc/include/asm/parport.h +++ b/arch/powerpc/include/asm/parport.h @@ -29,7 +29,7 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) prop = of_get_property(np, "interrupts", NULL); if (!prop) continue; - if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL) + if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL) count++; } return count; diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5e4babecf93..e7390dd0283 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -14,6 +14,7 @@ config SUPERH select HAVE_GENERIC_DMA_COHERENT select HAVE_IOREMAP_PROT if MMU select HAVE_ARCH_TRACEHOOK + select HAVE_DMA_API_DEBUG help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast @@ -21,7 +22,7 @@ config SUPERH <http://www.linux-sh.org/>. config SUPERH32 - def_bool !SUPERH64 + def_bool ARCH = "sh" select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_FUNCTION_TRACER @@ -31,7 +32,7 @@ config SUPERH32 select ARCH_HIBERNATION_POSSIBLE if MMU config SUPERH64 - def_bool y if CPU_SH5 + def_bool ARCH = "sh64" config ARCH_DEFCONFIG string @@ -187,6 +188,8 @@ config ARCH_SHMOBILE bool select ARCH_SUSPEND_POSSIBLE +if SUPERH32 + choice prompt "Processor sub-type selection" @@ -408,6 +411,15 @@ config CPU_SUBTYPE_SH7366 select SYS_SUPPORTS_NUMA select SYS_SUPPORTS_CMT +endchoice + +endif + +if SUPERH64 + +choice + prompt "Processor sub-type selection" + # SH-5 Processor Support config CPU_SUBTYPE_SH5_101 @@ -420,6 +432,8 @@ config CPU_SUBTYPE_SH5_103 endchoice +endif + source "arch/sh/mm/Kconfig" source "arch/sh/Kconfig.cpu" diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 912458f666e..39e46919df1 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -349,6 +349,7 @@ static int ov7725_power(struct device *dev, int mode) static struct ov772x_camera_info ov7725_info = { .buswidth = SOCAM_DATAWIDTH_8, .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP, + .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0), .link = { .power = ov7725_power, }, diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c index 8367d1d789c..beb88c4da2c 100644 --- a/arch/sh/boards/board-urquell.c +++ b/arch/sh/boards/board-urquell.c @@ -2,6 +2,8 @@ * Renesas Technology Corp. SH7786 Urquell Support. * * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com> + * + * Based on board-sh7785lcr.c * Copyright (C) 2008 Yoshihiro Shimoda * * This file is subject to the terms and conditions of the GNU General Public @@ -21,6 +23,32 @@ #include <asm/heartbeat.h> #include <asm/sizes.h> +/* + * bit 1234 5678 + *---------------------------- + * SW1 0101 0010 -> Pck 33MHz version + * (1101 0010) Pck 66MHz version + * SW2 0x1x xxxx -> little endian + * 29bit mode + * SW47 0001 1000 -> CS0 : on-board flash + * CS1 : SRAM, registers, LAN, PCMCIA + * 38400 bps for SCIF1 + * + * Address + * 0x00000000 - 0x04000000 (CS0) Nor Flash + * 0x04000000 - 0x04200000 (CS1) SRAM + * 0x05000000 - 0x05800000 (CS1) on board register + * 0x05800000 - 0x06000000 (CS1) LAN91C111 + * 0x06000000 - 0x06400000 (CS1) PCMCIA + * 0x08000000 - 0x10000000 (CS2-CS3) DDR3 + * 0x10000000 - 0x14000000 (CS4) PCIe + * 0x14000000 - 0x14800000 (CS5) Core0 LRAM/URAM + * 0x14800000 - 0x15000000 (CS5) Core1 LRAM/URAM + * 0x18000000 - 0x1C000000 (CS6) ATA/NAND-Flash + * 0x1C000000 - (CS7) SH7786 Control register + */ + +/* HeartBeat */ static struct resource heartbeat_resources[] = { [0] = { .start = BOARDREG(SLEDR), @@ -43,6 +71,7 @@ static struct platform_device heartbeat_device = { .resource = heartbeat_resources, }; +/* LAN91C111 */ static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; @@ -69,6 +98,7 @@ static struct platform_device smc91x_eth_device = { }, }; +/* Nor Flash */ static struct mtd_partition nor_flash_partitions[] = { { .name = "loader", diff --git a/arch/sh/drivers/pci/ops-sh7785lcr.c b/arch/sh/drivers/pci/ops-sh7785lcr.c index e8b7446a7c2..fb0869f0bef 100644 --- a/arch/sh/drivers/pci/ops-sh7785lcr.c +++ b/arch/sh/drivers/pci/ops-sh7785lcr.c @@ -48,8 +48,13 @@ EXPORT_SYMBOL(board_pci_channels); static struct sh4_pci_address_map sh7785_pci_map = { .window0 = { +#if defined(CONFIG_32BIT) + .base = SH7780_32BIT_DDR_BASE_ADDR, + .size = 0x40000000, +#else .base = SH7780_CS0_BASE_ADDR, .size = 0x20000000, +#endif }, .flags = SH4_PCIC_NO_RESET, diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h index 97b2c98f05c..93adc7119b7 100644 --- a/arch/sh/drivers/pci/pci-sh7780.h +++ b/arch/sh/drivers/pci/pci-sh7780.h @@ -104,6 +104,8 @@ #define SH7780_CS5_BASE_ADDR (SH7780_CS4_BASE_ADDR + SH7780_MEM_REGION_SIZE) #define SH7780_CS6_BASE_ADDR (SH7780_CS5_BASE_ADDR + SH7780_MEM_REGION_SIZE) +#define SH7780_32BIT_DDR_BASE_ADDR 0x40000000 + struct sh4_pci_address_map; /* arch/sh/drivers/pci/pci-sh7780.c */ diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index e36c7b87086..0d6ac7a1db4 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> +#include <linux/dma-debug.h> #include <asm/io.h> static int __init pcibios_init(void) @@ -43,6 +44,8 @@ static int __init pcibios_init(void) pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq); + dma_debug_add_bus(&pci_bus_type); + return 0; } subsys_initcall(pcibios_init); diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 627315ecdb5..ea9d4f41c9d 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -3,6 +3,7 @@ #include <linux/mm.h> #include <linux/scatterlist.h> +#include <linux/dma-debug.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm-generic/dma-coherent.h> @@ -38,16 +39,26 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { + dma_addr_t addr = virt_to_phys(ptr); + #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) if (dev->bus == &pci_bus_type) - return virt_to_phys(ptr); + return addr; #endif dma_cache_sync(dev, ptr, size, dir); - return virt_to_phys(ptr); + debug_dma_map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, addr, true); + + return addr; } -#define dma_unmap_single(dev, addr, size, dir) do { } while (0) +static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + debug_dma_unmap_page(dev, addr, size, dir, true); +} static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) @@ -59,12 +70,19 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); #endif sg[i].dma_address = sg_phys(&sg[i]); + sg[i].dma_length = sg[i].length; } + debug_dma_map_sg(dev, sg, nents, i, dir); + return nents; } -#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) +static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + debug_dma_unmap_sg(dev, sg, nents, dir); +} static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, @@ -111,6 +129,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); #endif sg[i].dma_address = sg_phys(&sg[i]); + sg[i].dma_length = sg[i].length; } } @@ -119,6 +138,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, enum dma_data_direction dir) { dma_sync_single(dev, dma_handle, size, dir); + debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir); } static inline void dma_sync_single_for_device(struct device *dev, @@ -127,6 +147,7 @@ static inline void dma_sync_single_for_device(struct device *dev, enum dma_data_direction dir) { dma_sync_single(dev, dma_handle, size, dir); + debug_dma_sync_single_for_device(dev, dma_handle, size, dir); } static inline void dma_sync_single_range_for_cpu(struct device *dev, @@ -136,6 +157,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, enum dma_data_direction direction) { dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); + debug_dma_sync_single_range_for_cpu(dev, dma_handle, + offset, size, direction); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -145,6 +168,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, enum dma_data_direction direction) { dma_sync_single_for_device(dev, dma_handle+offset, size, direction); + debug_dma_sync_single_range_for_device(dev, dma_handle, + offset, size, direction); } @@ -153,6 +178,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, enum dma_data_direction dir) { dma_sync_sg(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); } static inline void dma_sync_sg_for_device(struct device *dev, @@ -160,9 +186,9 @@ static inline void dma_sync_sg_for_device(struct device *dev, enum dma_data_direction dir) { dma_sync_sg(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); } - static inline int dma_get_cache_alignment(void) { /* diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h index 2084d037369..c693d268a41 100644 --- a/arch/sh/include/asm/scatterlist.h +++ b/arch/sh/include/asm/scatterlist.h @@ -5,12 +5,13 @@ struct scatterlist { #ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; + unsigned long sg_magic; #endif - unsigned long page_link; - unsigned int offset;/* for highmem, page offset */ - dma_addr_t dma_address; - unsigned int length; + unsigned long page_link; + unsigned int offset; /* for highmem, page offset */ + unsigned int length; + dma_addr_t dma_address; + unsigned int dma_length; }; #define ISA_DMA_THRESHOLD PHYS_ADDR_MASK diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index a3f23954589..8489a0905a8 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h @@ -37,8 +37,11 @@ #define pcibus_to_node(bus) ((void)(bus), -1) #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ CPU_MASK_ALL : \ - node_to_cpumask(pcibus_to_node(bus)) \ - ) + node_to_cpumask(pcibus_to_node(bus))) +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + CPU_MASK_ALL_PTR : \ + cpumask_of_node(pcibus_to_node(bus))) + #endif #include <asm-generic/topology.h> diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h index d52c000cf92..2efb819e2db 100644 --- a/arch/sh/include/asm/unistd_32.h +++ b/arch/sh/include/asm/unistd_32.h @@ -341,8 +341,10 @@ #define __NR_dup3 330 #define __NR_pipe2 331 #define __NR_inotify_init1 332 +#define __NR_preadv 333 +#define __NR_pwritev 334 -#define NR_syscalls 333 +#define NR_syscalls 335 #ifdef __KERNEL__ diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h index 7c54e91753c..6eb9d2934c0 100644 --- a/arch/sh/include/asm/unistd_64.h +++ b/arch/sh/include/asm/unistd_64.h @@ -381,10 +381,12 @@ #define __NR_dup3 358 #define __NR_pipe2 359 #define __NR_inotify_init1 360 +#define __NR_preadv 361 +#define __NR_pwritev 362 #ifdef __KERNEL__ -#define NR_syscalls 361 +#define NR_syscalls 363 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index 5a47e1cf442..90e8cfff55f 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -143,14 +143,14 @@ static void __init sh7786_usb_setup(void) * Set the PHY and PLL enable bit */ __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1); - while (i-- && - ((__raw_readl(USBST) & ACT_PLL_STATUS) != ACT_PLL_STATUS)) + while (i--) { + if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) { + /* Set the PHY RST bit */ + __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1); + printk(KERN_INFO "sh7786 usb setup done\n"); + break; + } cpu_relax(); - - if (i) { - /* Set the PHY RST bit */ - __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1); - printk(KERN_INFO "sh7786 usb setup done\n"); } } diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index e67c1733e1b..05202edd8e2 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -349,3 +349,5 @@ ENTRY(sys_call_table) .long sys_dup3 /* 330 */ .long sys_pipe2 .long sys_inotify_init1 + .long sys_preadv + .long sys_writev diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index 557cb91f5ca..a083609f928 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S @@ -387,3 +387,5 @@ sys_call_table: .long sys_dup3 .long sys_pipe2 .long sys_inotify_init1 /* 360 */ + .long sys_preadv + .long sys_pwritev diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index edcd5fbf965..e098ec158dd 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -10,11 +10,22 @@ * for more details. */ #include <linux/mm.h> +#include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> +#include <linux/dma-debug.h> +#include <linux/io.h> #include <asm/cacheflush.h> #include <asm/addrspace.h> -#include <asm/io.h> + +#define PREALLOC_DMA_DEBUG_ENTRIES 4096 + +static int __init dma_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + return 0; +} +fs_initcall(dma_init); void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) @@ -45,6 +56,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size, split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); *dma_handle = virt_to_phys(ret); + + debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache); + return ret_nocache; } EXPORT_SYMBOL(dma_alloc_coherent); @@ -56,12 +70,15 @@ void dma_free_coherent(struct device *dev, size_t size, unsigned long pfn = dma_handle >> PAGE_SHIFT; int k; - if (!dma_release_from_coherent(dev, order, vaddr)) { - WARN_ON(irqs_disabled()); /* for portability */ - for (k = 0; k < (1 << order); k++) - __free_pages(pfn_to_page(pfn + k), 0); - iounmap(vaddr); - } + WARN_ON(irqs_disabled()); /* for portability */ + + if (dma_release_from_coherent(dev, order, vaddr)) + return; + + debug_dma_free_coherent(dev, size, vaddr, dma_handle); + for (k = 0; k < (1 << order); k++) + __free_pages(pfn_to_page(pfn + k), 0); + iounmap(vaddr); } EXPORT_SYMBOL(dma_free_coherent); diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index dff3f0253aa..ff9ead640c4 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -117,7 +117,7 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id if (!strcmp(parent->name, "dma")) { p = parport_pc_probe_port(base, base + 0x400, op->irqs[0], PARPORT_DMA_NOFIFO, - op->dev.parent->parent); + op->dev.parent->parent, 0); if (!p) return -ENOMEM; dev_set_drvdata(&op->dev, p); @@ -168,7 +168,8 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id p = parport_pc_probe_port(base, base + 0x400, op->irqs[0], slot, - op->dev.parent); + op->dev.parent, + 0); err = -ENOMEM; if (!p) goto out_disable_irq; diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index e5383e3d2f8..73739322b6d 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); */ extern void early_ioremap_init(void); extern void early_ioremap_reset(void); -extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); -extern void __iomem *early_memremap(unsigned long offset, unsigned long size); +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void __iomem *early_memremap(resource_size_t phys_addr, + unsigned long size); extern void early_iounmap(void __iomem *addr, unsigned long size); #define IO_SPACE_LIMIT 0xffff diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 34c52370f2f..fcf4d92e7e0 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -352,6 +352,11 @@ struct i387_soft_struct { u32 entry_eip; }; +struct ymmh_struct { + /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ + u32 ymmh_space[64]; +}; + struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2]; @@ -361,6 +366,7 @@ struct xsave_hdr_struct { struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; + struct ymmh_struct ymmh; /* new processor state extensions will go here */ } __attribute__ ((packed, aligned (64))); diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index d5cd6c58688..a4737dddfd5 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -50,7 +50,7 @@ #ifdef CONFIG_X86_64 #define NEED_PSE 0 #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) -#define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) +#define NEED_PGE 0 #define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) #define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) #define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index ec666491aaa..72e5a449166 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -269,6 +269,11 @@ struct _xsave_hdr { __u64 reserved2[5]; }; +struct _ymmh_state { + /* 16 * 16 bytes for each YMMH-reg */ + __u32 ymmh_space[64]; +}; + /* * Extended state pointed by the fpstate pointer in the sigcontext. * In addition to the fpstate, information encoded in the xstate_hdr @@ -278,6 +283,7 @@ struct _xsave_hdr { struct _xstate { struct _fpstate fpstate; struct _xsave_hdr xstate_hdr; + struct _ymmh_state ymmh; /* new processor state extensions go here */ }; diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 1a918dde46b..018a0a40079 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -124,7 +124,8 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) -#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) +#define virt_to_pfn(v) (PFN_DOWN(__pa(v))) +#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) static inline unsigned long pte_mfn(pte_t pte) diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 08e9a1ac07a..727acc15234 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -7,6 +7,7 @@ #define XSTATE_FP 0x1 #define XSTATE_SSE 0x2 +#define XSTATE_YMM 0x4 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) @@ -15,7 +16,7 @@ /* * These are the features that the OS can handle currently. */ -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) +#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) #ifdef CONFIG_X86_64 #define REX_PREFIX "0x48, " diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 767fe7e46d6..a2789e42e16 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2524,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp) static inline void irq_complete_move(struct irq_desc **descp) {} #endif -#ifdef CONFIG_X86_X2APIC static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) { int apic, pin; @@ -2558,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc) spin_unlock_irqrestore(&ioapic_lock, flags); } +#ifdef CONFIG_X86_X2APIC static void ack_x2apic_level(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); @@ -2634,6 +2634,9 @@ static void ack_apic_level(unsigned int irq) */ ack_APIC_irq(); + if (irq_remapped(irq)) + eoi_ioapic_irq(desc); + /* Now we can move and renable the irq */ if (unlikely(do_unmask_irq)) { /* Only migrate the irq if the ack has been received. diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 9d3af380c6b..837c2c4cc20 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -153,7 +153,8 @@ struct drv_cmd { u32 val; }; -static long do_drv_read(void *_cmd) +/* Called via smp_call_function_single(), on the target CPU */ +static void do_drv_read(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 h; @@ -170,10 +171,10 @@ static long do_drv_read(void *_cmd) default: break; } - return 0; } -static long do_drv_write(void *_cmd) +/* Called via smp_call_function_many(), on the target CPUs */ +static void do_drv_write(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 lo, hi; @@ -192,23 +193,18 @@ static long do_drv_write(void *_cmd) default: break; } - return 0; } static void drv_read(struct drv_cmd *cmd) { cmd->val = 0; - work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); + smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1); } static void drv_write(struct drv_cmd *cmd) { - unsigned int i; - - for_each_cpu(i, cmd->mask) { - work_on_cpu(i, do_drv_write, cmd); - } + smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); } static u32 get_cur_val(const struct cpumask *mask) @@ -252,15 +248,13 @@ struct perf_pair { } aperf, mperf; }; - -static long read_measured_perf_ctrs(void *_cur) +/* Called via smp_call_function_single(), on the target CPU */ +static void read_measured_perf_ctrs(void *_cur) { struct perf_pair *cur = _cur; rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); - - return 0; } /* @@ -283,7 +277,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, unsigned int perf_percent; unsigned int retval; - if (!work_on_cpu(cpu, read_measured_perf_ctrs, &readin)) + if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) return 0; cur.aperf.whole = readin.aperf.whole - diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index dce99dca6cf..70fd7e414c1 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -679,7 +679,7 @@ void __init get_smp_config(void) __get_smp_config(0); } -static void smp_reserve_bootmem(struct mpf_intel *mpf) +static void __init smp_reserve_bootmem(struct mpf_intel *mpf) { unsigned long size = get_mpc_size(mpf->physptr); #ifdef CONFIG_X86_32 @@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m) static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; -static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) +static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) { int i; @@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) } } #else /* CONFIG_X86_IO_APIC */ -static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} +static +inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} #endif /* CONFIG_X86_IO_APIC */ static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 2b54fe002e9..0a5b04aa98f 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -324,7 +324,7 @@ void __ref xsave_cntxt_init(void) } /* - * for now OS knows only about FP/SSE + * Support only the state known to OS. */ pcntxt_mask = pcntxt_mask & XCNTXT_MASK; xsave_init(); diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index be54176e9eb..6340cef6798 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -219,6 +219,22 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } +/** + * get_user_pages_fast() - pin user pages in memory + * @start: starting user address + * @nr_pages: number of pages from start to pin + * @write: whether pages will be written to + * @pages: array that receives pointers to the pages pinned. + * Should be at least nr_pages long. + * + * Attempt to pin user pages in memory without taking mm->mmap_sem. + * If not successful, it will fall back to taking the lock and + * calling get_user_pages(). + * + * Returns number of pages pinned. This may be fewer than the number + * requested. If nr_pages is 0 or negative, returns 0. If no pages + * were pinned, returns -errno. + */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 0dfa09d69e8..09daebfdb11 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -547,7 +547,7 @@ void __init early_ioremap_reset(void) } static void __init __early_set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t flags) + phys_addr_t phys, pgprot_t flags) { unsigned long addr = __fix_to_virt(idx); pte_t *pte; @@ -566,7 +566,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx, } static inline void __init early_set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t prot) + phys_addr_t phys, pgprot_t prot) { if (after_paging_init) __set_fixmap(idx, phys, prot); @@ -607,9 +607,10 @@ static int __init check_early_ioremap_leak(void) late_initcall(check_early_ioremap_leak); static void __init __iomem * -__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) +__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) { - unsigned long offset, last_addr; + unsigned long offset; + resource_size_t last_addr; unsigned int nrpages; enum fixed_addresses idx0, idx; int i, slot; @@ -625,15 +626,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) } if (slot < 0) { - printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n", - phys_addr, size); + printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n", + (u64)phys_addr, size); WARN_ON(1); return NULL; } if (early_ioremap_debug) { - printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", - phys_addr, size, slot); + printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ", + (u64)phys_addr, size, slot); dump_stack(); } @@ -680,13 +681,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) } /* Remap an IO device */ -void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) +void __init __iomem * +early_ioremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); } /* Remap memory */ -void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) +void __init __iomem * +early_memremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, PAGE_KERNEL); } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 640339ee4fb..c009a241d56 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -31,7 +31,7 @@ #ifdef CONFIG_X86_PAT int __read_mostly pat_enabled = 1; -void __cpuinit pat_disable(const char *reason) +static inline void pat_disable(const char *reason) { pat_enabled = 0; printk(KERN_INFO "%s\n", reason); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 82cd39a6cbd..f09e8c36ee8 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -42,6 +42,7 @@ #include <asm/xen/hypervisor.h> #include <asm/fixmap.h> #include <asm/processor.h> +#include <asm/proto.h> #include <asm/msr-index.h> #include <asm/setup.h> #include <asm/desc.h> @@ -168,21 +169,23 @@ static void __init xen_banner(void) xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); } +static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; +static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; + static void xen_cpuid(unsigned int *ax, unsigned int *bx, unsigned int *cx, unsigned int *dx) { + unsigned maskecx = ~0; unsigned maskedx = ~0; /* * Mask out inconvenient features, to try and disable as many * unsupported kernel subsystems as possible. */ - if (*ax == 1) - maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ - (1 << X86_FEATURE_ACPI) | /* disable ACPI */ - (1 << X86_FEATURE_MCE) | /* disable MCE */ - (1 << X86_FEATURE_MCA) | /* disable MCA */ - (1 << X86_FEATURE_ACC)); /* thermal monitoring */ + if (*ax == 1) { + maskecx = cpuid_leaf1_ecx_mask; + maskedx = cpuid_leaf1_edx_mask; + } asm(XEN_EMULATE_PREFIX "cpuid" : "=a" (*ax), @@ -190,9 +193,43 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, "=c" (*cx), "=d" (*dx) : "0" (*ax), "2" (*cx)); + + *cx &= maskecx; *dx &= maskedx; } +static __init void xen_init_cpuid_mask(void) +{ + unsigned int ax, bx, cx, dx; + + cpuid_leaf1_edx_mask = + ~((1 << X86_FEATURE_MCE) | /* disable MCE */ + (1 << X86_FEATURE_MCA) | /* disable MCA */ + (1 << X86_FEATURE_ACC)); /* thermal monitoring */ + + if (!xen_initial_domain()) + cpuid_leaf1_edx_mask &= + ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ + (1 << X86_FEATURE_ACPI)); /* disable ACPI */ + + ax = 1; + xen_cpuid(&ax, &bx, &cx, &dx); + + /* cpuid claims we support xsave; try enabling it to see what happens */ + if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { + unsigned long cr4; + + set_in_cr4(X86_CR4_OSXSAVE); + + cr4 = read_cr4(); + + if ((cr4 & X86_CR4_OSXSAVE) == 0) + cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); + + clear_in_cr4(X86_CR4_OSXSAVE); + } +} + static void xen_set_debugreg(int reg, unsigned long val) { HYPERVISOR_set_debugreg(reg, val); @@ -284,12 +321,11 @@ static void xen_set_ldt(const void *addr, unsigned entries) static void xen_load_gdt(const struct desc_ptr *dtr) { - unsigned long *frames; unsigned long va = dtr->address; unsigned int size = dtr->size + 1; unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; + unsigned long frames[pages]; int f; - struct multicall_space mcs; /* A GDT can be up to 64k in size, which corresponds to 8192 8-byte entries, or 16 4k pages.. */ @@ -297,19 +333,26 @@ static void xen_load_gdt(const struct desc_ptr *dtr) BUG_ON(size > 65536); BUG_ON(va & ~PAGE_MASK); - mcs = xen_mc_entry(sizeof(*frames) * pages); - frames = mcs.args; - for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { - frames[f] = arbitrary_virt_to_mfn((void *)va); + int level; + pte_t *ptep = lookup_address(va, &level); + unsigned long pfn, mfn; + void *virt; + + BUG_ON(ptep == NULL); + + pfn = pte_pfn(*ptep); + mfn = pfn_to_mfn(pfn); + virt = __va(PFN_PHYS(pfn)); + + frames[f] = mfn; make_lowmem_page_readonly((void *)va); - make_lowmem_page_readonly(mfn_to_virt(frames[f])); + make_lowmem_page_readonly(virt); } - MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); - - xen_mc_issue(PARAVIRT_LAZY_CPU); + if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) + BUG(); } static void load_TLS_descriptor(struct thread_struct *t, @@ -385,7 +428,7 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, static int cvt_gate_to_trap(int vector, const gate_desc *val, struct trap_info *info) { - if (val->type != 0xf && val->type != 0xe) + if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT) return 0; info->vector = vector; @@ -393,8 +436,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val, info->cs = gate_segment(*val); info->flags = val->dpl; /* interrupt gates clear IF */ - if (val->type == 0xe) - info->flags |= 4; + if (val->type == GATE_INTERRUPT) + info->flags |= 1 << 2; return 1; } @@ -872,7 +915,6 @@ static const struct machine_ops __initdata xen_machine_ops = { .emergency_restart = xen_emergency_restart, }; - /* First C function to be called on Xen boot */ asmlinkage void __init xen_start_kernel(void) { @@ -897,6 +939,8 @@ asmlinkage void __init xen_start_kernel(void) xen_init_irq_ops(); + xen_init_cpuid_mask(); + #ifdef CONFIG_X86_LOCAL_APIC /* * set up the basic apic ops. @@ -938,6 +982,11 @@ asmlinkage void __init xen_start_kernel(void) if (!xen_initial_domain()) __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); +#ifdef CONFIG_X86_64 + /* Work out if we support NX */ + check_efer(); +#endif + /* Don't do the full vcpu_info placement stuff until we have a possible map and a non-dummy shared_info. */ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 2a81838a9ab..9842b121240 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -184,7 +184,7 @@ static inline unsigned p2m_index(unsigned long pfn) } /* Build the parallel p2m_top_mfn structures */ -void xen_setup_mfn_list_list(void) +static void __init xen_build_mfn_list_list(void) { unsigned pfn, idx; @@ -198,7 +198,10 @@ void xen_setup_mfn_list_list(void) unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); } +} +void xen_setup_mfn_list_list(void) +{ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = @@ -218,6 +221,8 @@ void __init xen_build_dynamic_phys_to_machine(void) p2m_top[topidx] = &mfn_list[pfn]; } + + xen_build_mfn_list_list(); } unsigned long get_phys_to_machine(unsigned long pfn) @@ -233,47 +238,74 @@ unsigned long get_phys_to_machine(unsigned long pfn) } EXPORT_SYMBOL_GPL(get_phys_to_machine); -static void alloc_p2m(unsigned long **pp, unsigned long *mfnp) +/* install a new p2m_top page */ +bool install_p2mtop_page(unsigned long pfn, unsigned long *p) { - unsigned long *p; + unsigned topidx = p2m_top_index(pfn); + unsigned long **pfnp, *mfnp; unsigned i; - p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); - BUG_ON(p == NULL); + pfnp = &p2m_top[topidx]; + mfnp = &p2m_top_mfn[topidx]; for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) p[i] = INVALID_P2M_ENTRY; - if (cmpxchg(pp, p2m_missing, p) != p2m_missing) - free_page((unsigned long)p); - else + if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) { *mfnp = virt_to_mfn(p); + return true; + } + + return false; } -void set_phys_to_machine(unsigned long pfn, unsigned long mfn) +static void alloc_p2m(unsigned long pfn) { - unsigned topidx, idx; + unsigned long *p; - if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return; - } + p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); + BUG_ON(p == NULL); + + if (!install_p2mtop_page(pfn, p)) + free_page((unsigned long)p); +} + +/* Try to install p2m mapping; fail if intermediate bits missing */ +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + unsigned topidx, idx; if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { BUG_ON(mfn != INVALID_P2M_ENTRY); - return; + return true; } topidx = p2m_top_index(pfn); if (p2m_top[topidx] == p2m_missing) { - /* no need to allocate a page to store an invalid entry */ if (mfn == INVALID_P2M_ENTRY) - return; - alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]); + return true; + return false; } idx = p2m_index(pfn); p2m_top[topidx][idx] = mfn; + + return true; +} + +void set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { + BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); + return; + } + + if (unlikely(!__set_phys_to_machine(pfn, mfn))) { + alloc_p2m(pfn); + + if (!__set_phys_to_machine(pfn, mfn)) + BUG(); + } } unsigned long arbitrary_virt_to_mfn(void *vaddr) @@ -987,7 +1019,7 @@ static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, return 0; } -void __init xen_mark_init_mm_pinned(void) +static void __init xen_mark_init_mm_pinned(void) { xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); } @@ -1270,8 +1302,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, } *args; struct multicall_space mcs; - BUG_ON(cpumask_empty(cpus)); - BUG_ON(!mm); + if (cpumask_empty(cpus)) + return; /* nothing to do */ mcs = xen_mc_entry(sizeof(*args)); args = mcs.args; @@ -1438,6 +1470,15 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) } #endif +static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +{ + struct mmuext_op op; + op.cmd = cmd; + op.arg1.mfn = pfn_to_mfn(pfn); + if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) + BUG(); +} + /* Early in boot, while setting up the initial pagetable, assume everything is pinned. */ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) @@ -1446,22 +1487,29 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) BUG_ON(mem_map); /* should only be used early */ #endif make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); +} + +/* Used for pmd and pud */ +static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) +{ +#ifdef CONFIG_FLATMEM + BUG_ON(mem_map); /* should only be used early */ +#endif + make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); } /* Early release_pte assumes that all pts are pinned, since there's only init_mm and anything attached to that is pinned. */ -static void xen_release_pte_init(unsigned long pfn) +static __init void xen_release_pte_init(unsigned long pfn) { + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } -static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +static __init void xen_release_pmd_init(unsigned long pfn) { - struct mmuext_op op; - op.cmd = cmd; - op.arg1.mfn = pfn_to_mfn(pfn); - if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) - BUG(); + make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } /* This needs to make sure the new pte page is pinned iff its being @@ -1773,6 +1821,9 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #ifdef CONFIG_X86_LOCAL_APIC case FIX_APIC_BASE: /* maps dummy local APIC */ #endif + case FIX_TEXT_POKE0: + case FIX_TEXT_POKE1: + /* All local page mappings */ pte = pfn_pte(phys, prot); break; @@ -1819,7 +1870,6 @@ __init void xen_post_allocator_init(void) xen_mark_init_mm_pinned(); } - const struct pv_mmu_ops xen_mmu_ops __initdata = { .pagetable_setup_start = xen_pagetable_setup_start, .pagetable_setup_done = xen_pagetable_setup_done, @@ -1843,9 +1893,9 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = { .alloc_pte = xen_alloc_pte_init, .release_pte = xen_release_pte_init, - .alloc_pmd = xen_alloc_pte_init, + .alloc_pmd = xen_alloc_pmd_init, .alloc_pmd_clone = paravirt_nop, - .release_pmd = xen_release_pte_init, + .release_pmd = xen_release_pmd_init, #ifdef CONFIG_HIGHPTE .kmap_atomic_pte = xen_kmap_atomic_pte, @@ -1883,8 +1933,8 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = { .make_pud = PV_CALLEE_SAVE(xen_make_pud), .set_pgd = xen_set_pgd_hyper, - .alloc_pud = xen_alloc_pte_init, - .release_pud = xen_release_pte_init, + .alloc_pud = xen_alloc_pmd_init, + .release_pud = xen_release_pmd_init, #endif /* PAGETABLE_LEVELS == 4 */ .activate_mm = xen_activate_mm, diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index 24d1b44a337..da730262489 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h @@ -11,6 +11,9 @@ enum pt_level { }; +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); +bool install_p2mtop_page(unsigned long pfn, unsigned long *p); + void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 585a6e33083..429834ec168 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -317,7 +317,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { - HYPERVISOR_sched_op(SCHEDOP_yield, 0); + HYPERVISOR_sched_op(SCHEDOP_yield, NULL); barrier(); } @@ -422,7 +422,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask) /* Make sure other vcpus get a chance to run if they need to. */ for_each_cpu(cpu, mask) { if (xen_vcpu_stolen(cpu)) { - HYPERVISOR_sched_op(SCHEDOP_yield, 0); + HYPERVISOR_sched_op(SCHEDOP_yield, NULL); break; } } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 2f5ef2632ea..20139464943 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -57,8 +57,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id); bool xen_vcpu_stolen(int vcpu); -void xen_mark_init_mm_pinned(void); - void xen_setup_vcpu_info_placement(void); #ifdef CONFIG_SMP |