diff options
Diffstat (limited to 'arch')
241 files changed, 2165 insertions, 1715 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 9fb8aae5c39..443448154f3 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -45,6 +45,14 @@ config GENERIC_CALIBRATE_DELAY bool default y +config GENERIC_TIME + bool + default y + +config ARCH_USES_GETTIMEOFFSET + bool + default y + config ZONE_DMA bool default y diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c index ef183823029..9d0727d18ae 100644 --- a/arch/alpha/boot/tools/objstrip.c +++ b/arch/alpha/boot/tools/objstrip.c @@ -93,7 +93,7 @@ main (int argc, char *argv[]) ofd = 1; if (i < argc) { ofd = open(argv[i++], O_WRONLY | O_CREAT | O_TRUNC, 0666); - if (fd == -1) { + if (ofd == -1) { perror("open"); exit(1); } diff --git a/arch/alpha/include/asm/hardirq.h b/arch/alpha/include/asm/hardirq.h index 88971460fa6..242c09ba98c 100644 --- a/arch/alpha/include/asm/hardirq.h +++ b/arch/alpha/include/asm/hardirq.h @@ -1,17 +1,9 @@ #ifndef _ALPHA_HARDIRQ_H #define _ALPHA_HARDIRQ_H -#include <linux/threads.h> -#include <linux/cache.h> - - -/* entry.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned long __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ - void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#include <asm-generic/hardirq.h> #endif /* _ALPHA_HARDIRQ_H */ diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h index 90d7c35d286..99c56d47879 100644 --- a/arch/alpha/include/asm/mman.h +++ b/arch/alpha/include/asm/mman.h @@ -28,6 +28,8 @@ #define MAP_NORESERVE 0x10000 /* don't check for reservations */ #define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x40000 /* do not block on IO */ +#define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x100000 /* create a huge page mapping */ #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_SYNC 2 /* synchronous memory sync */ @@ -48,6 +50,9 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index bfb880af959..d15aedfe606 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -268,11 +268,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, assume it doesn't support sg mapping, and, since we tried to use direct_map above, it now must be considered an error. */ if (! alpha_mv.mv_pci_tbi) { - static int been_here = 0; /* Only print the message once. */ - if (!been_here) { - printk(KERN_WARNING "pci_map_single: no HW sg\n"); - been_here = 1; - } + printk_once(KERN_WARNING "pci_map_single: no HW sg\n"); return 0; } diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index b04e2cbf23a..5d0826654c6 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -408,28 +408,17 @@ time_init(void) * part. So we can't do the "find absolute time in terms of cycles" thing * that the other ports do. */ -void -do_gettimeofday(struct timeval *tv) +u32 arch_gettimeoffset(void) { - unsigned long flags; - unsigned long sec, usec, seq; - unsigned long delta_cycles, delta_usec, partial_tick; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - - delta_cycles = rpcc() - state.last_time; - sec = xtime.tv_sec; - usec = (xtime.tv_nsec / 1000); - partial_tick = state.partial_tick; - - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - #ifdef CONFIG_SMP /* Until and unless we figure out how to get cpu cycle counters in sync and keep them there, we can't use the rpcc tricks. */ - delta_usec = 0; + return 0; #else + unsigned long delta_cycles, delta_usec, partial_tick; + + delta_cycles = rpcc() - state.last_time; + partial_tick = state.partial_tick; /* * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks) * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks) @@ -446,64 +435,10 @@ do_gettimeofday(struct timeval *tv) delta_usec = (delta_cycles * state.scaled_ticks_per_cycle + partial_tick) * 15625; delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; + return delta_usec * 1000; #endif - - usec += delta_usec; - if (usec >= 1000000) { - sec += 1; - usec -= 1000000; - } - - tv->tv_sec = sec; - tv->tv_usec = usec; } -EXPORT_SYMBOL(do_gettimeofday); - -int -do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; - unsigned long delta_nsec; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irq(&xtime_lock); - - /* The offset that is added into time in do_gettimeofday above - must be subtracted out here to keep a coherent view of the - time. Without this, a full-tick error is possible. */ - -#ifdef CONFIG_SMP - delta_nsec = 0; -#else - delta_nsec = rpcc() - state.last_time; - delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle - + state.partial_tick) * 15625; - delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; - delta_nsec *= 1000; -#endif - - nsec -= delta_nsec; - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - ntp_clear(); - - write_sequnlock_irq(&xtime_lock); - clock_was_set(); - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); - - /* * In order to set the CMOS clock precisely, set_rtc_mmss has to be * called 500 ms after the second nowtime has started, because when diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index af71d38c8e4..a0902c20d67 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -299,7 +299,7 @@ printk_memory_info(void) initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 0eab5574942..10b403554b6 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -349,7 +349,7 @@ void __init mem_init(void) printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, " "%luk data, %luk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7350557a81e..54661125a8b 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -25,7 +25,7 @@ KBUILD_CFLAGS +=$(call cc-option,-marm,) # Select a platform tht is kept up-to-date KBUILD_DEFCONFIG := versatile_defconfig -# defines filename extension depending memory manement type. +# defines filename extension depending memory management type. ifeq ($(CONFIG_MMU),) MMUEXT := -nommu endif diff --git a/arch/arm/configs/n770_defconfig b/arch/arm/configs/n770_defconfig index 672f6db06a5..a1657b73683 100644 --- a/arch/arm/configs/n770_defconfig +++ b/arch/arm/configs/n770_defconfig @@ -875,7 +875,7 @@ CONFIG_FB_OMAP_LCDC_EXTERNAL=y CONFIG_FB_OMAP_LCDC_HWA742=y # CONFIG_FB_OMAP_LCDC_BLIZZARD is not set CONFIG_FB_OMAP_MANUAL_UPDATE=y -# CONFIG_FB_OMAP_LCD_MIPID is not set +CONFIG_FB_OMAP_LCD_MIPID=y # CONFIG_FB_OMAP_BOOTLOADER_INIT is not set CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2 # CONFIG_FB_OMAP_DMA_TUNE is not set diff --git a/arch/arm/configs/omap3_beagle_defconfig b/arch/arm/configs/omap3_beagle_defconfig index 51c0fa8897c..357d4021e2d 100644 --- a/arch/arm/configs/omap3_beagle_defconfig +++ b/arch/arm/configs/omap3_beagle_defconfig @@ -778,7 +778,33 @@ CONFIG_DAB=y # # CONFIG_VGASTATE is not set # CONFIG_VIDEO_OUTPUT_CONTROL is not set -# CONFIG_FB is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +CONFIG_FB_OMAP=y +# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set +# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set +CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # @@ -791,6 +817,25 @@ CONFIG_DAB=y # # CONFIG_VGA_CONSOLE is not set CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_10x18 is not set +# CONFIG_LOGO is not set + +# +# Sound +# # CONFIG_SOUND is not set # CONFIG_HID_SUPPORT is not set CONFIG_USB_SUPPORT=y diff --git a/arch/arm/configs/omap_3430sdp_defconfig b/arch/arm/configs/omap_3430sdp_defconfig index 9a510eab75a..8a4a7e2ba87 100644 --- a/arch/arm/configs/omap_3430sdp_defconfig +++ b/arch/arm/configs/omap_3430sdp_defconfig @@ -1313,8 +1313,33 @@ CONFIG_DVB_ISL6421=m # Graphics support # # CONFIG_VGASTATE is not set -# CONFIG_VIDEO_OUTPUT_CONTROL is not set -# CONFIG_FB is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +CONFIG_FB_OMAP=y +# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set +# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set +CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # @@ -1331,6 +1356,16 @@ CONFIG_DISPLAY_SUPPORT=y # # CONFIG_VGA_CONSOLE is not set CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_LOGO_LINUX_CLUT224=y CONFIG_SOUND=y CONFIG_SOUND_OSS_CORE=y CONFIG_SND=y diff --git a/arch/arm/configs/omap_ldp_defconfig b/arch/arm/configs/omap_ldp_defconfig index 679a4a3e265..b9c48919a68 100644 --- a/arch/arm/configs/omap_ldp_defconfig +++ b/arch/arm/configs/omap_ldp_defconfig @@ -690,6 +690,7 @@ CONFIG_GPIOLIB=y # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +CONFIG_GPIO_TWL4030=y # # PCI GPIO expanders: @@ -742,6 +743,7 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_SM501 is not set # CONFIG_HTC_EGPIO is not set # CONFIG_HTC_PASIC3 is not set +CONFIG_TWL4030_CORE=y # CONFIG_MFD_TMIO is not set # CONFIG_MFD_T7L66XB is not set # CONFIG_MFD_TC6387XB is not set @@ -767,8 +769,46 @@ CONFIG_DAB=y # # CONFIG_VGASTATE is not set CONFIG_VIDEO_OUTPUT_CONTROL=m -# CONFIG_FB is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +CONFIG_FB_OMAP=y +CONFIG_FB_OMAP_LCD_VGA=y +# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set +# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set +CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=4 +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_CORGI is not set +# CONFIG_BACKLIGHT_GENERIC is not set # # Display device support @@ -780,6 +820,16 @@ CONFIG_VIDEO_OUTPUT_CONTROL=m # # CONFIG_VGA_CONSOLE is not set CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_LOGO_LINUX_CLUT224=y CONFIG_SOUND=y CONFIG_SND=y # CONFIG_SND_SEQUENCER is not set diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h index fc26976d8e3..8eebf89f5ab 100644 --- a/arch/arm/include/asm/mman.h +++ b/arch/arm/include/asm/mman.h @@ -1,17 +1 @@ -#ifndef __ARM_MMAN_H__ -#define __ARM_MMAN_H__ - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __ARM_MMAN_H__ */ +#include <asm-generic/mman.h> diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 9122c9ee18f..89f7eade20a 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -390,7 +390,7 @@ #define __NR_preadv (__NR_SYSCALL_BASE+361) #define __NR_pwritev (__NR_SYSCALL_BASE+362) #define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) -#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364) +#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index ecfa98954d1..fafce1b5c69 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -373,7 +373,7 @@ CALL(sys_preadv) CALL(sys_pwritev) CALL(sys_rt_tgsigqueueinfo) - CALL(sys_perf_counter_open) + CALL(sys_perf_event_open) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index a24d824c428..e35d54d43e7 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -289,6 +289,13 @@ config MACH_NEOCORE926 help Select this if you are using the Adeneo Neocore 926 board. +config MACH_AT91SAM9G20EK_2MMC + bool "Atmel AT91SAM9G20-EK Evaluation Kit modified for 2 MMC Slots" + depends on ARCH_AT91SAM9G20 + help + Select this if you are using an Atmel AT91SAM9G20-EK Evaluation Kit + Rev A or B modified for 2 MMC Slots. + endif # ---------------------------------------------------------- diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index a6ed015d82e..ada440aab0c 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_MACH_AT91SAM9RLEK) += board-sam9rlek.o # AT91SAM9G20 board-specific support obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o +obj-$(CONFIG_MACH_AT91SAM9G20EK_2MMC) += board-sam9g20ek-2slot-mmc.o obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o # AT91SAM9G45 board-specific support diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index ee4ea0e720c..07eb7b07e44 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -278,6 +278,102 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {} #endif +/* -------------------------------------------------------------------- + * MMC / SD Slot for Atmel MCI Driver + * -------------------------------------------------------------------- */ + +#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) +static u64 mmc_dmamask = DMA_BIT_MASK(32); +static struct mci_platform_data mmc_data; + +static struct resource mmc_resources[] = { + [0] = { + .start = AT91SAM9260_BASE_MCI, + .end = AT91SAM9260_BASE_MCI + SZ_16K - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = AT91SAM9260_ID_MCI, + .end = AT91SAM9260_ID_MCI, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device at91sam9260_mmc_device = { + .name = "atmel_mci", + .id = -1, + .dev = { + .dma_mask = &mmc_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = &mmc_data, + }, + .resource = mmc_resources, + .num_resources = ARRAY_SIZE(mmc_resources), +}; + +void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) +{ + unsigned int i; + unsigned int slot_count = 0; + + if (!data) + return; + + for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { + if (data->slot[i].bus_width) { + /* input/irq */ + if (data->slot[i].detect_pin) { + at91_set_gpio_input(data->slot[i].detect_pin, 1); + at91_set_deglitch(data->slot[i].detect_pin, 1); + } + if (data->slot[i].wp_pin) + at91_set_gpio_input(data->slot[i].wp_pin, 1); + + switch (i) { + case 0: + /* CMD */ + at91_set_A_periph(AT91_PIN_PA7, 1); + /* DAT0, maybe DAT1..DAT3 */ + at91_set_A_periph(AT91_PIN_PA6, 1); + if (data->slot[i].bus_width == 4) { + at91_set_A_periph(AT91_PIN_PA9, 1); + at91_set_A_periph(AT91_PIN_PA10, 1); + at91_set_A_periph(AT91_PIN_PA11, 1); + } + slot_count++; + break; + case 1: + /* CMD */ + at91_set_B_periph(AT91_PIN_PA1, 1); + /* DAT0, maybe DAT1..DAT3 */ + at91_set_B_periph(AT91_PIN_PA0, 1); + if (data->slot[i].bus_width == 4) { + at91_set_B_periph(AT91_PIN_PA5, 1); + at91_set_B_periph(AT91_PIN_PA4, 1); + at91_set_B_periph(AT91_PIN_PA3, 1); + } + slot_count++; + break; + default: + printk(KERN_ERR + "AT91: SD/MMC slot %d not available\n", i); + break; + } + } + } + + if (slot_count) { + /* CLK */ + at91_set_A_periph(AT91_PIN_PA8, 0); + + mmc_data = *data; + platform_device_register(&at91sam9260_mmc_device); + } +} +#else +void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {} +#endif + /* -------------------------------------------------------------------- * NAND / SmartMedia diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c index 61e52b66bc7..50667bed7cc 100644 --- a/arch/arm/mach-at91/board-afeb-9260v1.c +++ b/arch/arm/mach-at91/board-afeb-9260v1.c @@ -53,7 +53,7 @@ static void __init afeb9260_map_io(void) /* Initialize processor: 18.432 MHz crystal */ at91sam9260_initialize(18432000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c index d3ba29c5d8c..02138af631e 100644 --- a/arch/arm/mach-at91/board-cam60.c +++ b/arch/arm/mach-at91/board-cam60.c @@ -50,7 +50,7 @@ static void __init cam60_map_io(void) /* Initialize processor: 10 MHz crystal */ at91sam9260_initialize(10000000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* set serial console to ttyS0 (ie, DBGU) */ diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c index 9ba7ba2cc3b..8c0b71c95be 100644 --- a/arch/arm/mach-at91/board-neocore926.c +++ b/arch/arm/mach-at91/board-neocore926.c @@ -56,7 +56,7 @@ static void __init neocore926_map_io(void) /* Initialize processor: 20 MHz crystal */ at91sam9263_initialize(20000000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, RTS, CTS) */ diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c index 4cff9a7e61d..664938e8f66 100644 --- a/arch/arm/mach-at91/board-qil-a9260.c +++ b/arch/arm/mach-at91/board-qil-a9260.c @@ -53,7 +53,7 @@ static void __init ek_map_io(void) /* Initialize processor: 12.000 MHz crystal */ at91sam9260_initialize(12000000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c index 93a0f8b100e..ba9d501b5c5 100644 --- a/arch/arm/mach-at91/board-sam9260ek.c +++ b/arch/arm/mach-at91/board-sam9260ek.c @@ -54,7 +54,7 @@ static void __init ek_map_io(void) /* Initialize processor: 18.432 MHz crystal */ at91sam9260_initialize(18432000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index f9b19993a7a..c4c8865d52d 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c @@ -61,7 +61,7 @@ static void __init ek_map_io(void) /* Setup the LEDs */ at91_init_leds(AT91_PIN_PA13, AT91_PIN_PA14); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* set serial console to ttyS0 (ie, DBGU) */ diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c index 1bf7bd4cbe1..26f1aa6049a 100644 --- a/arch/arm/mach-at91/board-sam9263ek.c +++ b/arch/arm/mach-at91/board-sam9263ek.c @@ -57,7 +57,7 @@ static void __init ek_map_io(void) /* Initialize processor: 16.367 MHz crystal */ at91sam9263_initialize(16367660); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, RTS, CTS) */ diff --git a/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c b/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c new file mode 100644 index 00000000000..a28e53faf71 --- /dev/null +++ b/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c @@ -0,0 +1,277 @@ +/* + * Copyright (C) 2005 SAN People + * Copyright (C) 2008 Atmel + * Copyright (C) 2009 Rob Emanuele + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/types.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/spi/at73c213.h> +#include <linux/clk.h> + +#include <mach/hardware.h> +#include <asm/setup.h> +#include <asm/mach-types.h> +#include <asm/irq.h> + +#include <asm/mach/arch.h> +#include <asm/mach/map.h> +#include <asm/mach/irq.h> + +#include <mach/board.h> +#include <mach/gpio.h> +#include <mach/at91sam9_smc.h> + +#include "sam9_smc.h" +#include "generic.h" + + +static void __init ek_map_io(void) +{ + /* Initialize processor: 18.432 MHz crystal */ + at91sam9260_initialize(18432000); + + /* DGBU on ttyS0. (Rx & Tx only) */ + at91_register_uart(0, 0, 0); + + /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ + at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS + | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD + | ATMEL_UART_RI); + + /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */ + at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); + + /* set serial console to ttyS0 (ie, DBGU) */ + at91_set_serial_console(0); +} + +static void __init ek_init_irq(void) +{ + at91sam9260_init_interrupts(NULL); +} + + +/* + * USB Host port + */ +static struct at91_usbh_data __initdata ek_usbh_data = { + .ports = 2, +}; + +/* + * USB Device port + */ +static struct at91_udc_data __initdata ek_udc_data = { + .vbus_pin = AT91_PIN_PC5, + .pullup_pin = 0, /* pull-up driven by UDC */ +}; + + +/* + * SPI devices. + */ +static struct spi_board_info ek_spi_devices[] = { +#if !defined(CONFIG_MMC_ATMELMCI) + { /* DataFlash chip */ + .modalias = "mtd_dataflash", + .chip_select = 1, + .max_speed_hz = 15 * 1000 * 1000, + .bus_num = 0, + }, +#if defined(CONFIG_MTD_AT91_DATAFLASH_CARD) + { /* DataFlash card */ + .modalias = "mtd_dataflash", + .chip_select = 0, + .max_speed_hz = 15 * 1000 * 1000, + .bus_num = 0, + }, +#endif +#endif +}; + + +/* + * MACB Ethernet device + */ +static struct at91_eth_data __initdata ek_macb_data = { + .phy_irq_pin = AT91_PIN_PC12, + .is_rmii = 1, +}; + + +/* + * NAND flash + */ +static struct mtd_partition __initdata ek_nand_partition[] = { + { + .name = "Bootstrap", + .offset = 0, + .size = 4 * SZ_1M, + }, + { + .name = "Partition 1", + .offset = MTDPART_OFS_NXTBLK, + .size = 60 * SZ_1M, + }, + { + .name = "Partition 2", + .offset = MTDPART_OFS_NXTBLK, + .size = MTDPART_SIZ_FULL, + }, +}; + +static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) +{ + *num_partitions = ARRAY_SIZE(ek_nand_partition); + return ek_nand_partition; +} + +/* det_pin is not connected */ +static struct atmel_nand_data __initdata ek_nand_data = { + .ale = 21, + .cle = 22, + .rdy_pin = AT91_PIN_PC13, + .enable_pin = AT91_PIN_PC14, + .partition_info = nand_partitions, +#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16) + .bus_width_16 = 1, +#else + .bus_width_16 = 0, +#endif +}; + +static struct sam9_smc_config __initdata ek_nand_smc_config = { + .ncs_read_setup = 0, + .nrd_setup = 2, + .ncs_write_setup = 0, + .nwe_setup = 2, + + .ncs_read_pulse = 4, + .nrd_pulse = 4, + .ncs_write_pulse = 4, + .nwe_pulse = 4, + + .read_cycle = 7, + .write_cycle = 7, + + .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE, + .tdf_cycles = 3, +}; + +static void __init ek_add_device_nand(void) +{ + /* setup bus-width (8 or 16) */ + if (ek_nand_data.bus_width_16) + ek_nand_smc_config.mode |= AT91_SMC_DBW_16; + else + ek_nand_smc_config.mode |= AT91_SMC_DBW_8; + + /* configure chip-select 3 (NAND) */ + sam9_smc_configure(3, &ek_nand_smc_config); + + at91_add_device_nand(&ek_nand_data); +} + + +/* + * MCI (SD/MMC) + * det_pin and wp_pin are not connected + */ +#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) +static struct mci_platform_data __initdata ek_mmc_data = { + .slot[0] = { + .bus_width = 4, + .detect_pin = -ENODEV, + .wp_pin = -ENODEV, + }, + .slot[1] = { + .bus_width = 4, + .detect_pin = -ENODEV, + .wp_pin = -ENODEV, + }, + +}; +#else +static struct amci_platform_data __initdata ek_mmc_data = { +}; +#endif + +/* + * LEDs + */ +static struct gpio_led ek_leds[] = { + { /* "bottom" led, green, userled1 to be defined */ + .name = "ds5", + .gpio = AT91_PIN_PB12, + .active_low = 1, + .default_trigger = "none", + }, + { /* "power" led, yellow */ + .name = "ds1", + .gpio = AT91_PIN_PB13, + .default_trigger = "heartbeat", + } +}; + +static struct i2c_board_info __initdata ek_i2c_devices[] = { + { + I2C_BOARD_INFO("24c512", 0x50), + }, +}; + + +static void __init ek_board_init(void) +{ + /* Serial */ + at91_add_device_serial(); + /* USB Host */ + at91_add_device_usbh(&ek_usbh_data); + /* USB Device */ + at91_add_device_udc(&ek_udc_data); + /* SPI */ + at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); + /* NAND */ + ek_add_device_nand(); + /* Ethernet */ + at91_add_device_eth(&ek_macb_data); + /* MMC */ + at91_add_device_mci(0, &ek_mmc_data); + /* I2C */ + at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); + /* LEDs */ + at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); + /* PCK0 provides MCLK to the WM8731 */ + at91_set_B_periph(AT91_PIN_PC1, 0); + /* SSC (for WM8731) */ + at91_add_device_ssc(AT91SAM9260_ID_SSC, ATMEL_SSC_TX); +} + +MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod") + /* Maintainer: Rob Emanuele */ + .phys_io = AT91_BASE_SYS, + .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc, + .boot_params = AT91_SDRAM_BASE + 0x100, + .timer = &at91sam926x_timer, + .map_io = ek_map_io, + .init_irq = ek_init_irq, + .init_machine = ek_board_init, +MACHINE_END diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c index ca470d504ea..29cf8317748 100644 --- a/arch/arm/mach-at91/board-sam9g20ek.c +++ b/arch/arm/mach-at91/board-sam9g20ek.c @@ -50,7 +50,7 @@ static void __init ek_map_io(void) /* Initialize processor: 18.432 MHz crystal */ at91sam9260_initialize(18432000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c index 9d07679efce..94ffb5c103b 100644 --- a/arch/arm/mach-at91/board-sam9rlek.c +++ b/arch/arm/mach-at91/board-sam9rlek.c @@ -43,7 +43,7 @@ static void __init ek_map_io(void) /* Initialize processor: 12.000 MHz crystal */ at91sam9rl_initialize(12000000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) */ diff --git a/arch/arm/mach-at91/board-usb-a9260.c b/arch/arm/mach-at91/board-usb-a9260.c index d13304c0bc4..905d6ef7680 100644 --- a/arch/arm/mach-at91/board-usb-a9260.c +++ b/arch/arm/mach-at91/board-usb-a9260.c @@ -53,7 +53,7 @@ static void __init ek_map_io(void) /* Initialize processor: 12.000 MHz crystal */ at91sam9260_initialize(12000000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* set serial console to ttyS0 (ie, DBGU) */ diff --git a/arch/arm/mach-at91/board-usb-a9263.c b/arch/arm/mach-at91/board-usb-a9263.c index d96405b7d57..b6a3480383e 100644 --- a/arch/arm/mach-at91/board-usb-a9263.c +++ b/arch/arm/mach-at91/board-usb-a9263.c @@ -52,7 +52,7 @@ static void __init ek_map_io(void) /* Initialize processor: 12.00 MHz crystal */ at91sam9263_initialize(12000000); - /* DGBU on ttyS0. (Rx & Tx only) */ + /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* set serial console to ttyS0 (ie, DBGU) */ diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h index 13f27a4b882..583f38a38df 100644 --- a/arch/arm/mach-at91/include/mach/board.h +++ b/arch/arm/mach-at91/include/mach/board.h @@ -37,6 +37,7 @@ #include <linux/leds.h> #include <linux/spi/spi.h> #include <linux/usb/atmel_usba_udc.h> +#include <linux/atmel-mci.h> #include <sound/atmel-ac97c.h> /* USB Device */ @@ -64,6 +65,7 @@ struct at91_cf_data { extern void __init at91_add_device_cf(struct at91_cf_data *data); /* MMC / SD */ + /* at91_mci platform config */ struct at91_mmc_data { u8 det_pin; /* card detect IRQ */ unsigned slot_b:1; /* uses Slot B */ @@ -73,6 +75,9 @@ struct at91_mmc_data { }; extern void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data); + /* atmel-mci platform config */ +extern void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data); + /* Ethernet (EMAC & MACB) */ struct at91_eth_data { u32 phy_mask; diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 3dd0e2a2309..dda19cd7619 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -37,7 +37,7 @@ struct clk { static unsigned long get_uart_rate(struct clk *clk); static int set_keytchclk_rate(struct clk *clk, unsigned long rate); - +static int set_div_rate(struct clk *clk, unsigned long rate); static struct clk clk_uart1 = { .sw_locked = 1, @@ -76,6 +76,13 @@ static struct clk clk_pwm = { .rate = EP93XX_EXT_CLK_RATE, }; +static struct clk clk_video = { + .sw_locked = 1, + .enable_reg = EP93XX_SYSCON_VIDCLKDIV, + .enable_mask = EP93XX_SYSCON_CLKDIV_ENABLE, + .set_rate = set_div_rate, +}; + /* DMA Clocks */ static struct clk clk_m2p0 = { .enable_reg = EP93XX_SYSCON_PWRCNT, @@ -140,6 +147,7 @@ static struct clk_lookup clocks[] = { INIT_CK(NULL, "pll2", &clk_pll2), INIT_CK("ep93xx-ohci", NULL, &clk_usb_host), INIT_CK("ep93xx-keypad", NULL, &clk_keypad), + INIT_CK("ep93xx-fb", NULL, &clk_video), INIT_CK(NULL, "pwm_clk", &clk_pwm), INIT_CK(NULL, "m2p0", &clk_m2p0), INIT_CK(NULL, "m2p1", &clk_m2p1), @@ -236,6 +244,84 @@ static int set_keytchclk_rate(struct clk *clk, unsigned long rate) return 0; } +static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel, + int *pdiv, int *div) +{ + unsigned long max_rate, best_rate = 0, + actual_rate = 0, mclk_rate = 0, rate_err = -1; + int i, found = 0, __div = 0, __pdiv = 0; + + /* Don't exceed the maximum rate */ + max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4), + (unsigned long)EP93XX_EXT_CLK_RATE / 4); + rate = min(rate, max_rate); + + /* + * Try the two pll's and the external clock + * Because the valid predividers are 2, 2.5 and 3, we multiply + * all the clocks by 2 to avoid floating point math. + * + * This is based on the algorithm in the ep93xx raster guide: + * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf + * + */ + for (i = 0; i < 3; i++) { + if (i == 0) + mclk_rate = EP93XX_EXT_CLK_RATE * 2; + else if (i == 1) + mclk_rate = clk_pll1.rate * 2; + else if (i == 2) + mclk_rate = clk_pll2.rate * 2; + + /* Try each predivider value */ + for (__pdiv = 4; __pdiv <= 6; __pdiv++) { + __div = mclk_rate / (rate * __pdiv); + if (__div < 2 || __div > 127) + continue; + + actual_rate = mclk_rate / (__pdiv * __div); + + if (!found || abs(actual_rate - rate) < rate_err) { + *pdiv = __pdiv - 3; + *div = __div; + *psel = (i == 2); + *esel = (i != 0); + best_rate = actual_rate; + rate_err = abs(actual_rate - rate); + found = 1; + } + } + } + + if (!found) + return 0; + + return best_rate; +} + +static int set_div_rate(struct clk *clk, unsigned long rate) +{ + unsigned long actual_rate; + int psel = 0, esel = 0, pdiv = 0, div = 0; + u32 val; + + actual_rate = calc_clk_div(rate, &psel, &esel, &pdiv, &div); + if (actual_rate == 0) + return -EINVAL; + clk->rate = actual_rate; + + /* Clear the esel, psel, pdiv and div bits */ + val = __raw_readl(clk->enable_reg); + val &= ~0x7fff; + + /* Set the new esel, psel, pdiv and div bits for the new clock rate */ + val |= (esel ? EP93XX_SYSCON_CLKDIV_ESEL : 0) | + (psel ? EP93XX_SYSCON_CLKDIV_PSEL : 0) | + (pdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | div; + ep93xx_syscon_swlocked_write(val, clk->enable_reg); + return 0; +} + int clk_set_rate(struct clk *clk, unsigned long rate) { if (clk->set_rate) diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 16b92c37ec9..f7ebed942f6 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -30,6 +30,7 @@ #include <linux/i2c-gpio.h> #include <mach/hardware.h> +#include <mach/fb.h> #include <asm/mach/map.h> #include <asm/mach/time.h> @@ -682,6 +683,37 @@ void ep93xx_pwm_release_gpio(struct platform_device *pdev) EXPORT_SYMBOL(ep93xx_pwm_release_gpio); +/************************************************************************* + * EP93xx video peripheral handling + *************************************************************************/ +static struct ep93xxfb_mach_info ep93xxfb_data; + +static struct resource ep93xx_fb_resource[] = { + { + .start = EP93XX_RASTER_PHYS_BASE, + .end = EP93XX_RASTER_PHYS_BASE + 0x800 - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device ep93xx_fb_device = { + .name = "ep93xx-fb", + .id = -1, + .dev = { + .platform_data = &ep93xxfb_data, + .coherent_dma_mask = DMA_BIT_MASK(32), + .dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask, + }, + .num_resources = ARRAY_SIZE(ep93xx_fb_resource), + .resource = ep93xx_fb_resource, +}; + +void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data) +{ + ep93xxfb_data = *data; + platform_device_register(&ep93xx_fb_device); +} + extern void ep93xx_gpio_init(void); void __init ep93xx_init_devices(void) diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h index ea78e908fc8..0fbf87b1633 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h @@ -70,6 +70,7 @@ #define EP93XX_USB_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00020000) #define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000) +#define EP93XX_RASTER_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00030000) #define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000) #define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000) @@ -207,6 +208,11 @@ #define EP93XX_SYSCON_DEVCFG_ADCPD (1<<2) #define EP93XX_SYSCON_DEVCFG_KEYS (1<<1) #define EP93XX_SYSCON_DEVCFG_SHENA (1<<0) +#define EP93XX_SYSCON_VIDCLKDIV EP93XX_SYSCON_REG(0x84) +#define EP93XX_SYSCON_CLKDIV_ENABLE (1<<15) +#define EP93XX_SYSCON_CLKDIV_ESEL (1<<14) +#define EP93XX_SYSCON_CLKDIV_PSEL (1<<13) +#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8 #define EP93XX_SYSCON_KEYTCHCLKDIV EP93XX_SYSCON_REG(0x90) #define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN (1<<31) #define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV (1<<16) diff --git a/arch/arm/mach-ep93xx/include/mach/fb.h b/arch/arm/mach-ep93xx/include/mach/fb.h new file mode 100644 index 00000000000..d5ae11d7c45 --- /dev/null +++ b/arch/arm/mach-ep93xx/include/mach/fb.h @@ -0,0 +1,56 @@ +/* + * arch/arm/mach-ep93xx/include/mach/fb.h + */ + +#ifndef __ASM_ARCH_EP93XXFB_H +#define __ASM_ARCH_EP93XXFB_H + +struct platform_device; +struct fb_videomode; +struct fb_info; + +#define EP93XXFB_USE_MODEDB 0 + +/* VideoAttributes flags */ +#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0) +#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1) +#define EP93XXFB_VSYNC_ENABLE (1 << 2) +#define EP93XXFB_PIXEL_DATA_ENABLE (1 << 3) +#define EP93XXFB_COMPOSITE_SYNC (1 << 4) +#define EP93XXFB_SYNC_VERT_HIGH (1 << 5) +#define EP93XXFB_SYNC_HORIZ_HIGH (1 << 6) +#define EP93XXFB_SYNC_BLANK_HIGH (1 << 7) +#define EP93XXFB_PCLK_FALLING (1 << 8) +#define EP93XXFB_ENABLE_AC (1 << 9) +#define EP93XXFB_ENABLE_LCD (1 << 10) +#define EP93XXFB_ENABLE_CCIR (1 << 12) +#define EP93XXFB_USE_PARALLEL_INTERFACE (1 << 13) +#define EP93XXFB_ENABLE_INTERRUPT (1 << 14) +#define EP93XXFB_USB_INTERLACE (1 << 16) +#define EP93XXFB_USE_EQUALIZATION (1 << 17) +#define EP93XXFB_USE_DOUBLE_HORZ (1 << 18) +#define EP93XXFB_USE_DOUBLE_VERT (1 << 19) +#define EP93XXFB_USE_BLANK_PIXEL (1 << 20) +#define EP93XXFB_USE_SDCSN0 (0 << 21) +#define EP93XXFB_USE_SDCSN1 (1 << 21) +#define EP93XXFB_USE_SDCSN2 (2 << 21) +#define EP93XXFB_USE_SDCSN3 (3 << 21) + +#define EP93XXFB_ENABLE (EP93XXFB_STATE_MACHINE_ENABLE | \ + EP93XXFB_PIXEL_CLOCK_ENABLE | \ + EP93XXFB_VSYNC_ENABLE | \ + EP93XXFB_PIXEL_DATA_ENABLE) + +struct ep93xxfb_mach_info { + unsigned int num_modes; + const struct fb_videomode *modes; + const struct fb_videomode *default_mode; + int bpp; + unsigned int flags; + + int (*setup)(struct platform_device *pdev); + void (*teardown)(struct platform_device *pdev); + void (*blank)(int blank_mode, struct fb_info *info); +}; + +#endif /* __ASM_ARCH_EP93XXFB_H */ diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h index 5f5fa6574d3..01a0f0838e5 100644 --- a/arch/arm/mach-ep93xx/include/mach/platform.h +++ b/arch/arm/mach-ep93xx/include/mach/platform.h @@ -6,6 +6,7 @@ struct i2c_board_info; struct platform_device; +struct ep93xxfb_mach_info; struct ep93xx_eth_data { @@ -33,6 +34,7 @@ static inline void ep93xx_devcfg_clear_bits(unsigned int bits) void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr); void ep93xx_register_i2c(struct i2c_board_info *devices, int num); +void ep93xx_register_fb(struct ep93xxfb_mach_info *data); void ep93xx_register_pwm(int pwm0, int pwm1); int ep93xx_pwm_acquire_gpio(struct platform_device *pdev); void ep93xx_pwm_release_gpio(struct platform_device *pdev); diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index e70baa79901..e6e8290b782 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/regulator/machine.h> #include <linux/gpio.h> +#include <linux/mmc/host.h> #include <mach/mcspi.h> #include <mach/mux.h> @@ -102,6 +103,7 @@ static struct twl4030_hsmmc_info mmc[] = { .cover_only = true, .gpio_cd = 160, .gpio_wp = -EINVAL, + .power_saving = true, }, { .name = "internal", @@ -109,6 +111,8 @@ static struct twl4030_hsmmc_info mmc[] = { .wires = 8, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, + .nonremovable = true, + .power_saving = true, }, {} /* Terminator */ }; diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index a2e915639b7..bcfcfc7fdb9 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -257,6 +257,11 @@ static inline void omap_init_sti(void) {} #define OMAP2_MCSPI3_BASE 0x480b8000 #define OMAP2_MCSPI4_BASE 0x480ba000 +#define OMAP4_MCSPI1_BASE 0x48098100 +#define OMAP4_MCSPI2_BASE 0x4809a100 +#define OMAP4_MCSPI3_BASE 0x480b8100 +#define OMAP4_MCSPI4_BASE 0x480ba100 + static struct omap2_mcspi_platform_config omap2_mcspi1_config = { .num_cs = 4, }; @@ -301,7 +306,8 @@ static struct platform_device omap2_mcspi2 = { }, }; -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) +#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ + defined(CONFIG_ARCH_OMAP4) static struct omap2_mcspi_platform_config omap2_mcspi3_config = { .num_cs = 2, }; @@ -325,7 +331,7 @@ static struct platform_device omap2_mcspi3 = { }; #endif -#ifdef CONFIG_ARCH_OMAP3 +#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) static struct omap2_mcspi_platform_config omap2_mcspi4_config = { .num_cs = 1, }; @@ -351,14 +357,25 @@ static struct platform_device omap2_mcspi4 = { static void omap_init_mcspi(void) { + if (cpu_is_omap44xx()) { + omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE; + omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff; + omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE; + omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff; + omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE; + omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff; + omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE; + omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff; + } platform_device_register(&omap2_mcspi1); platform_device_register(&omap2_mcspi2); -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) - if (cpu_is_omap2430() || cpu_is_omap343x()) +#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ + defined(CONFIG_ARCH_OMAP4) + if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx()) platform_device_register(&omap2_mcspi3); #endif -#ifdef CONFIG_ARCH_OMAP3 - if (cpu_is_omap343x()) +#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) + if (cpu_is_omap343x() || cpu_is_omap44xx()) platform_device_register(&omap2_mcspi4); #endif } @@ -397,7 +414,7 @@ static inline void omap_init_sha1_md5(void) { } /*-------------------------------------------------------------------------*/ -#ifdef CONFIG_ARCH_OMAP3 +#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) #define MMCHS_SYSCONFIG 0x0010 #define MMCHS_SYSCONFIG_SWRESET (1 << 1) @@ -424,8 +441,8 @@ static struct platform_device dummy_pdev = { **/ static void __init omap_hsmmc_reset(void) { - u32 i, nr_controllers = cpu_is_omap34xx() ? OMAP34XX_NR_MMC : - OMAP24XX_NR_MMC; + u32 i, nr_controllers = cpu_is_omap44xx() ? OMAP44XX_NR_MMC : + (cpu_is_omap34xx() ? OMAP34XX_NR_MMC : OMAP24XX_NR_MMC); for (i = 0; i < nr_controllers; i++) { u32 v, base = 0; @@ -442,8 +459,21 @@ static void __init omap_hsmmc_reset(void) case 2: base = OMAP3_MMC3_BASE; break; + case 3: + if (!cpu_is_omap44xx()) + return; + base = OMAP4_MMC4_BASE; + break; + case 4: + if (!cpu_is_omap44xx()) + return; + base = OMAP4_MMC5_BASE; + break; } + if (cpu_is_omap44xx()) + base += OMAP4_MMC_REG_OFFSET; + dummy_pdev.id = i; dev_set_name(&dummy_pdev.dev, "mmci-omap-hs.%d", i); iclk = clk_get(dev, "ick"); @@ -581,11 +611,23 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data, irq = INT_24XX_MMC2_IRQ; break; case 2: - if (!cpu_is_omap34xx()) + if (!cpu_is_omap44xx() && !cpu_is_omap34xx()) return; base = OMAP3_MMC3_BASE; irq = INT_34XX_MMC3_IRQ; break; + case 3: + if (!cpu_is_omap44xx()) + return; + base = OMAP4_MMC4_BASE + OMAP4_MMC_REG_OFFSET; + irq = INT_44XX_MMC4_IRQ; + break; + case 4: + if (!cpu_is_omap44xx()) + return; + base = OMAP4_MMC5_BASE + OMAP4_MMC_REG_OFFSET; + irq = INT_44XX_MMC5_IRQ; + break; default: continue; } @@ -593,8 +635,15 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data, if (cpu_is_omap2420()) { size = OMAP2420_MMC_SIZE; name = "mmci-omap"; + } else if (cpu_is_omap44xx()) { + if (i < 3) { + base += OMAP4_MMC_REG_OFFSET; + irq += IRQ_GIC_START; + } + size = OMAP4_HSMMC_SIZE; + name = "mmci-omap-hs"; } else { - size = HSMMC_SIZE; + size = OMAP3_HSMMC_SIZE; name = "mmci-omap-hs"; } omap_mmc_add(name, i, base, size, irq, mmc_data[i]); diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c index 3c04c2f1b23..c9c59a2db4e 100644 --- a/arch/arm/mach-omap2/mmc-twl4030.c +++ b/arch/arm/mach-omap2/mmc-twl4030.c @@ -198,6 +198,18 @@ static int twl_mmc_resume(struct device *dev, int slot) #define twl_mmc_resume NULL #endif +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) + +static int twl4030_mmc_get_context_loss(struct device *dev) +{ + /* FIXME: PM DPS not implemented yet */ + return 0; +} + +#else +#define twl4030_mmc_get_context_loss NULL +#endif + static int twl_mmc1_set_power(struct device *dev, int slot, int power_on, int vdd) { @@ -328,6 +340,61 @@ static int twl_mmc23_set_power(struct device *dev, int slot, int power_on, int v return ret; } +static int twl_mmc1_set_sleep(struct device *dev, int slot, int sleep, int vdd, + int cardsleep) +{ + struct twl_mmc_controller *c = &hsmmc[0]; + int mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; + + return regulator_set_mode(c->vcc, mode); +} + +static int twl_mmc23_set_sleep(struct device *dev, int slot, int sleep, int vdd, + int cardsleep) +{ + struct twl_mmc_controller *c = NULL; + struct omap_mmc_platform_data *mmc = dev->platform_data; + int i, err, mode; + + for (i = 1; i < ARRAY_SIZE(hsmmc); i++) { + if (mmc == hsmmc[i].mmc) { + c = &hsmmc[i]; + break; + } + } + + if (c == NULL) + return -ENODEV; + + /* + * If we don't see a Vcc regulator, assume it's a fixed + * voltage always-on regulator. + */ + if (!c->vcc) + return 0; + + mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; + + if (!c->vcc_aux) + return regulator_set_mode(c->vcc, mode); + + if (cardsleep) { + /* VCC can be turned off if card is asleep */ + struct regulator *vcc_aux = c->vcc_aux; + + c->vcc_aux = NULL; + if (sleep) + err = twl_mmc23_set_power(dev, slot, 0, 0); + else + err = twl_mmc23_set_power(dev, slot, 1, vdd); + c->vcc_aux = vcc_aux; + } else + err = regulator_set_mode(c->vcc, mode); + if (err) + return err; + return regulator_set_mode(c->vcc_aux, mode); +} + static struct omap_mmc_platform_data *hsmmc_data[OMAP34XX_NR_MMC] __initdata; void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) @@ -390,6 +457,9 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) } else mmc->slots[0].switch_pin = -EINVAL; + mmc->get_context_loss_count = + twl4030_mmc_get_context_loss; + /* write protect normally uses an OMAP gpio */ if (gpio_is_valid(c->gpio_wp)) { gpio_request(c->gpio_wp, "mmc_wp"); @@ -400,6 +470,12 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) } else mmc->slots[0].gpio_wp = -EINVAL; + if (c->nonremovable) + mmc->slots[0].nonremovable = 1; + + if (c->power_saving) + mmc->slots[0].power_saving = 1; + /* NOTE: MMC slots should have a Vcc regulator set up. * This may be from a TWL4030-family chip, another * controllable regulator, or a fixed supply. @@ -412,6 +488,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) case 1: /* on-chip level shifting via PBIAS0/PBIAS1 */ mmc->slots[0].set_power = twl_mmc1_set_power; + mmc->slots[0].set_sleep = twl_mmc1_set_sleep; break; case 2: if (c->ext_clock) @@ -422,6 +499,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) case 3: /* off-chip level shifting, or none */ mmc->slots[0].set_power = twl_mmc23_set_power; + mmc->slots[0].set_sleep = twl_mmc23_set_sleep; break; default: pr_err("MMC%d configuration not supported!\n", c->mmc); diff --git a/arch/arm/mach-omap2/mmc-twl4030.h b/arch/arm/mach-omap2/mmc-twl4030.h index 3807c45c9a6..a47e68563fb 100644 --- a/arch/arm/mach-omap2/mmc-twl4030.h +++ b/arch/arm/mach-omap2/mmc-twl4030.h @@ -12,6 +12,8 @@ struct twl4030_hsmmc_info { bool transceiver; /* MMC-2 option */ bool ext_clock; /* use external pin for input clock */ bool cover_only; /* No card detect - just cover switch */ + bool nonremovable; /* Nonremovable e.g. eMMC */ + bool power_saving; /* Try to sleep or power off when possible */ int gpio_cd; /* or -EINVAL */ int gpio_wp; /* or -EINVAL */ char *name; /* or NULL for default */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ea36186f32c..f982606d7bf 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -596,8 +596,8 @@ void __init mem_init(void) printk(KERN_NOTICE "Memory: %luKB available (%dK code, " "%dK data, %dK init, %luK highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codesize >> 10, datasize >> 10, initsize >> 10, + nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, + datasize >> 10, initsize >> 10, (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); if (PAGE_SIZE >= 16384 && num_physpages <= 128) { diff --git a/arch/arm/plat-mxc/include/mach/spi.h b/arch/arm/plat-mxc/include/mach/spi.h new file mode 100644 index 00000000000..08be445e8eb --- /dev/null +++ b/arch/arm/plat-mxc/include/mach/spi.h @@ -0,0 +1,27 @@ + +#ifndef __MACH_SPI_H_ +#define __MACH_SPI_H_ + +/* + * struct spi_imx_master - device.platform_data for SPI controller devices. + * @chipselect: Array of chipselects for this master. Numbers >= 0 mean gpio + * pins, numbers < 0 mean internal CSPI chipselects according + * to MXC_SPI_CS(). Normally you want to use gpio based chip + * selects as the CSPI module tries to be intelligent about + * when to assert the chipselect: The CSPI module deasserts the + * chipselect once it runs out of input data. The other problem + * is that it is not possible to mix between high active and low + * active chipselects on one single bus using the internal + * chipselects. Unfortunately Freescale decided to put some + * chipselects on dedicated pins which are not usable as gpios, + * so we have to support the internal chipselects. + * @num_chipselect: ARRAY_SIZE(chipselect) + */ +struct spi_imx_master { + int *chipselect; + int num_chipselect; +}; + +#define MXC_SPI_CS(no) ((no) - 32) + +#endif /* __MACH_SPI_H_*/ diff --git a/arch/arm/plat-omap/include/mach/irqs.h b/arch/arm/plat-omap/include/mach/irqs.h index fb7cb772399..28a165058b6 100644 --- a/arch/arm/plat-omap/include/mach/irqs.h +++ b/arch/arm/plat-omap/include/mach/irqs.h @@ -503,6 +503,7 @@ #define INT_44XX_FPKA_READY_IRQ (50 + IRQ_GIC_START) #define INT_44XX_SHA1MD51_IRQ (51 + IRQ_GIC_START) #define INT_44XX_RNG_IRQ (52 + IRQ_GIC_START) +#define INT_44XX_MMC5_IRQ (59 + IRQ_GIC_START) #define INT_44XX_I2C3_IRQ (61 + IRQ_GIC_START) #define INT_44XX_FPKA_ERROR_IRQ (64 + IRQ_GIC_START) #define INT_44XX_PBIAS_IRQ (75 + IRQ_GIC_START) @@ -511,6 +512,7 @@ #define INT_44XX_TLL_IRQ (78 + IRQ_GIC_START) #define INT_44XX_PARTHASH_IRQ (79 + IRQ_GIC_START) #define INT_44XX_MMC3_IRQ (94 + IRQ_GIC_START) +#define INT_44XX_MMC4_IRQ (96 + IRQ_GIC_START) /* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730/850) and diff --git a/arch/arm/plat-omap/include/mach/lcd_mipid.h b/arch/arm/plat-omap/include/mach/lcd_mipid.h index f8fbc4801e5..8e52c657228 100644 --- a/arch/arm/plat-omap/include/mach/lcd_mipid.h +++ b/arch/arm/plat-omap/include/mach/lcd_mipid.h @@ -16,7 +16,12 @@ enum mipid_test_result { struct mipid_platform_data { int nreset_gpio; int data_lines; + void (*shutdown)(struct mipid_platform_data *pdata); + void (*set_bklight_level)(struct mipid_platform_data *pdata, + int level); + int (*get_bklight_level)(struct mipid_platform_data *pdata); + int (*get_bklight_max)(struct mipid_platform_data *pdata); }; #endif diff --git a/arch/arm/plat-omap/include/mach/mmc.h b/arch/arm/plat-omap/include/mach/mmc.h index 81d5b36534b..7229b959330 100644 --- a/arch/arm/plat-omap/include/mach/mmc.h +++ b/arch/arm/plat-omap/include/mach/mmc.h @@ -25,11 +25,18 @@ #define OMAP24XX_NR_MMC 2 #define OMAP34XX_NR_MMC 3 +#define OMAP44XX_NR_MMC 5 #define OMAP2420_MMC_SIZE OMAP1_MMC_SIZE -#define HSMMC_SIZE 0x200 +#define OMAP3_HSMMC_SIZE 0x200 +#define OMAP4_HSMMC_SIZE 0x1000 #define OMAP2_MMC1_BASE 0x4809c000 #define OMAP2_MMC2_BASE 0x480b4000 #define OMAP3_MMC3_BASE 0x480ad000 +#define OMAP4_MMC4_BASE 0x480d1000 +#define OMAP4_MMC5_BASE 0x480d5000 +#define OMAP4_MMC_REG_OFFSET 0x100 +#define HSMMC5 (1 << 4) +#define HSMMC4 (1 << 3) #define HSMMC3 (1 << 2) #define HSMMC2 (1 << 1) #define HSMMC1 (1 << 0) @@ -59,6 +66,9 @@ struct omap_mmc_platform_data { int (*suspend)(struct device *dev, int slot); int (*resume)(struct device *dev, int slot); + /* Return context loss count due to PM states changing */ + int (*get_context_loss_count)(struct device *dev); + u64 dma_mask; struct omap_mmc_slot_data { @@ -80,12 +90,20 @@ struct omap_mmc_platform_data { /* use the internal clock */ unsigned internal_clock:1; + /* nonremovable e.g. eMMC */ + unsigned nonremovable:1; + + /* Try to sleep or power off when possible */ + unsigned power_saving:1; + int switch_pin; /* gpio (card detect) */ int gpio_wp; /* gpio (write protect) */ int (* set_bus_mode)(struct device *dev, int slot, int bus_mode); int (* set_power)(struct device *dev, int slot, int power_on, int vdd); int (* get_ro)(struct device *dev, int slot); + int (*set_sleep)(struct device *dev, int slot, int sleep, + int vdd, int cardsleep); /* return MMC cover switch state, can be NULL if not supported. * diff --git a/arch/arm/plat-omap/include/mach/omapfb.h b/arch/arm/plat-omap/include/mach/omapfb.h index 7b74d1255e0..b226bdf4573 100644 --- a/arch/arm/plat-omap/include/mach/omapfb.h +++ b/arch/arm/plat-omap/include/mach/omapfb.h @@ -276,8 +276,8 @@ typedef int (*omapfb_notifier_callback_t)(struct notifier_block *, void *fbi); struct omapfb_mem_region { - dma_addr_t paddr; - void *vaddr; + u32 paddr; + void __iomem *vaddr; unsigned long size; u8 type; /* OMAPFB_PLANE_MEM_* */ unsigned alloc:1; /* allocated by the driver */ diff --git a/arch/avr32/include/asm/mman.h b/arch/avr32/include/asm/mman.h index 9a92b15f6a6..8eebf89f5ab 100644 --- a/arch/avr32/include/asm/mman.h +++ b/arch/avr32/include/asm/mman.h @@ -1,17 +1 @@ -#ifndef __ASM_AVR32_MMAN_H__ -#define __ASM_AVR32_MMAN_H__ - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __ASM_AVR32_MMAN_H__ */ +#include <asm-generic/mman.h> diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index e819fa69a90..376f18c4a6c 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c @@ -141,7 +141,7 @@ void __init mem_init(void) printk ("Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), + nr_free_pages() << (PAGE_SHIFT - 10), totalram_pages << (PAGE_SHIFT - 10), codesize >> 10, reservedpages << (PAGE_SHIFT - 10), diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h index e7fd0ecd73f..ae4dae1e370 100644 --- a/arch/blackfin/include/asm/sections.h +++ b/arch/blackfin/include/asm/sections.h @@ -1,9 +1,6 @@ #ifndef _BLACKFIN_SECTIONS_H #define _BLACKFIN_SECTIONS_H -/* nothing to see, move along */ -#include <asm-generic/sections.h> - /* only used when MTD_UCLINUX */ extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; @@ -15,4 +12,39 @@ extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], _ebss_l2[], _l2_lma_start[]; +#include <asm/mem_map.h> + +/* Blackfin systems have discontinuous memory map and no virtualized memory */ +static inline int arch_is_kernel_text(unsigned long addr) +{ + return + (L1_CODE_LENGTH && + addr >= (unsigned long)_stext_l1 && + addr < (unsigned long)_etext_l1) + || + (L2_LENGTH && + addr >= (unsigned long)_stext_l2 && + addr < (unsigned long)_etext_l2); +} +#define arch_is_kernel_text(addr) arch_is_kernel_text(addr) + +static inline int arch_is_kernel_data(unsigned long addr) +{ + return + (L1_DATA_A_LENGTH && + addr >= (unsigned long)_sdata_l1 && + addr < (unsigned long)_ebss_l1) + || + (L1_DATA_B_LENGTH && + addr >= (unsigned long)_sdata_b_l1 && + addr < (unsigned long)_ebss_b_l1) + || + (L2_LENGTH && + addr >= (unsigned long)_sdata_l2 && + addr < (unsigned long)_ebss_l2); +} +#define arch_is_kernel_data(addr) arch_is_kernel_data(addr) + +#include <asm-generic/sections.h> + #endif diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index c8e7ee4768c..02b1529dad5 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h @@ -381,7 +381,7 @@ #define __NR_preadv 366 #define __NR_pwritev 367 #define __NR_rt_tgsigqueueinfo 368 -#define __NR_perf_counter_open 369 +#define __NR_perf_event_open 369 #define __NR_syscall 370 #define NR_syscalls __NR_syscall diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h index bdc330cd0e1..1c58914a874 100644 --- a/arch/blackfin/mach-bf538/include/mach/defBF539.h +++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h @@ -2325,7 +2325,7 @@ #define AMBEN_B0_B1 0x0004 /* Enable Asynchronous Memory Banks 0 & 1 only */ #define AMBEN_B0_B1_B2 0x0006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */ #define AMBEN_ALL 0x0008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */ -#define CDPRIO 0x0100 /* DMA has priority over core for for external accesses */ +#define CDPRIO 0x0100 /* DMA has priority over core for external accesses */ /* EBIU_AMGCTL Bit Positions */ #define AMCKEN_P 0x0000 /* Enable CLKOUT */ diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 01af24cde36..1e7cac23e25 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -1620,7 +1620,7 @@ ENTRY(_sys_call_table) .long _sys_preadv .long _sys_pwritev .long _sys_rt_tgsigqueueinfo - .long _sys_perf_counter_open + .long _sys_perf_event_open .rept NR_syscalls-(.-_sys_call_table)/4 .long _sys_ni_syscall diff --git a/arch/cris/include/asm/mman.h b/arch/cris/include/asm/mman.h index b7f0afba3ce..8eebf89f5ab 100644 --- a/arch/cris/include/asm/mman.h +++ b/arch/cris/include/asm/mman.h @@ -1,19 +1 @@ -#ifndef __CRIS_MMAN_H__ -#define __CRIS_MMAN_H__ - -/* verbatim copy of asm-i386/ version */ - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __CRIS_MMAN_H__ */ +#include <asm-generic/mman.h> diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c index 514f46a4b23..ff68b9f516a 100644 --- a/arch/cris/mm/init.c +++ b/arch/cris/mm/init.c @@ -54,7 +54,7 @@ mem_init(void) printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, " "%dk init)\n" , - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index b86e19c9b5b..4b5830bcbe2 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -7,7 +7,7 @@ config FRV default y select HAVE_IDE select HAVE_ARCH_TRACEHOOK - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS config ZONE_DMA bool diff --git a/arch/frv/include/asm/mman.h b/arch/frv/include/asm/mman.h index 58c1d11e2ac..8eebf89f5ab 100644 --- a/arch/frv/include/asm/mman.h +++ b/arch/frv/include/asm/mman.h @@ -1,18 +1 @@ -#ifndef __ASM_MMAN_H__ -#define __ASM_MMAN_H__ - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __ASM_MMAN_H__ */ - +#include <asm-generic/mman.h> diff --git a/arch/frv/include/asm/perf_counter.h b/arch/frv/include/asm/perf_event.h index ccf726e61b2..a69e0155d14 100644 --- a/arch/frv/include/asm/perf_counter.h +++ b/arch/frv/include/asm/perf_event.h @@ -1,4 +1,4 @@ -/* FRV performance counter support +/* FRV performance event support * * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -9,9 +9,9 @@ * 2 of the Licence, or (at your option) any later version. */ -#ifndef _ASM_PERF_COUNTER_H -#define _ASM_PERF_COUNTER_H +#ifndef _ASM_PERF_EVENT_H +#define _ASM_PERF_EVENT_H -#define PERF_COUNTER_INDEX_OFFSET 0 +#define PERF_EVENT_INDEX_OFFSET 0 -#endif /* _ASM_PERF_COUNTER_H */ +#endif /* _ASM_PERF_EVENT_H */ diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h index 4a8fb427ce0..be6ef0f5cd4 100644 --- a/arch/frv/include/asm/unistd.h +++ b/arch/frv/include/asm/unistd.h @@ -342,7 +342,7 @@ #define __NR_preadv 333 #define __NR_pwritev 334 #define __NR_rt_tgsigqueueinfo 335 -#define __NR_perf_counter_open 336 +#define __NR_perf_event_open 336 #ifdef __KERNEL__ diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index fde1e446b44..189397ec012 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -1525,6 +1525,6 @@ sys_call_table: .long sys_preadv .long sys_pwritev .long sys_rt_tgsigqueueinfo /* 335 */ - .long sys_perf_counter_open + .long sys_perf_event_open syscall_table_size = (. - sys_call_table) diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile index 0a377210c89..f4709756d0d 100644 --- a/arch/frv/lib/Makefile +++ b/arch/frv/lib/Makefile @@ -5,4 +5,4 @@ lib-y := \ __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ - outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o + outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o diff --git a/arch/frv/lib/cache.S b/arch/frv/lib/cache.S index 0e10ad8dc46..0c4fb204911 100644 --- a/arch/frv/lib/cache.S +++ b/arch/frv/lib/cache.S @@ -1,4 +1,4 @@ -/* cache.S: cache managment routines +/* cache.S: cache management routines * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/arch/frv/lib/perf_counter.c b/arch/frv/lib/perf_event.c index 2000feecd57..9ac5acfd2e9 100644 --- a/arch/frv/lib/perf_counter.c +++ b/arch/frv/lib/perf_event.c @@ -1,4 +1,4 @@ -/* Performance counter handling +/* Performance event handling * * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -9,11 +9,11 @@ * 2 of the Licence, or (at your option) any later version. */ -#include <linux/perf_counter.h> +#include <linux/perf_event.h> /* - * mark the performance counter as pending + * mark the performance event as pending */ -void set_perf_counter_pending(void) +void set_perf_event_pending(void) { } diff --git a/arch/h8300/include/asm/hardirq.h b/arch/h8300/include/asm/hardirq.h index 9d7f7a7462b..c2e1aa0f0d1 100644 --- a/arch/h8300/include/asm/hardirq.h +++ b/arch/h8300/include/asm/hardirq.h @@ -1,18 +1,7 @@ #ifndef __H8300_HARDIRQ_H #define __H8300_HARDIRQ_H -#include <linux/kernel.h> -#include <linux/threads.h> -#include <linux/interrupt.h> -#include <linux/irq.h> - -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ - -extern void ack_bad_irq(unsigned int irq); +#include <asm/irq.h> #define HARDIRQ_BITS 8 @@ -25,4 +14,6 @@ extern void ack_bad_irq(unsigned int irq); # error HARDIRQ_BITS is too low! #endif +#include <asm-generic/hardirq.h> + #endif diff --git a/arch/h8300/include/asm/mman.h b/arch/h8300/include/asm/mman.h index cf35f0a6f12..8eebf89f5ab 100644 --- a/arch/h8300/include/asm/mman.h +++ b/arch/h8300/include/asm/mman.h @@ -1,17 +1 @@ -#ifndef __H8300_MMAN_H__ -#define __H8300_MMAN_H__ - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __H8300_MMAN_H__ */ +#include <asm-generic/mman.h> diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index 74f8dd7b34d..5c913d47211 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c @@ -81,11 +81,6 @@ struct irq_chip h8300irq_chip = { .end = h8300_end_irq, }; -void ack_bad_irq(unsigned int irq) -{ - printk("unexpected IRQ trap at vector %02x\n", irq); -} - #if defined(CONFIG_RAMKERNEL) static unsigned long __init *get_vector_address(void) { diff --git a/arch/h8300/kernel/timer/tpu.c b/arch/h8300/kernel/timer/tpu.c index e7c6e614a75..2193a2e2859 100644 --- a/arch/h8300/kernel/timer/tpu.c +++ b/arch/h8300/kernel/timer/tpu.c @@ -7,7 +7,6 @@ * */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 011a1cdf0eb..6851e52ed5a 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -500,6 +500,10 @@ config HAVE_ARCH_NODEDATA_EXTENSION def_bool y depends on NUMA +config ARCH_PROC_KCORE_TEXT + def_bool y + depends on PROC_KCORE + config IA32_SUPPORT bool "Support for Linux/x86 binaries" help diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 16ef61a91d9..625ed8f76fc 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1270,7 +1270,7 @@ putreg (struct task_struct *child, int regno, unsigned int value) case PT_CS: if (value != __USER_CS) printk(KERN_ERR - "ia32.putreg: attempt to to set invalid segment register %d = %x\n", + "ia32.putreg: attempt to set invalid segment register %d = %x\n", regno, value); break; default: diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h index 48cf8b98a0b..4459028e5aa 100644 --- a/arch/ia64/include/asm/mman.h +++ b/arch/ia64/include/asm/mman.h @@ -8,19 +8,9 @@ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co */ -#include <asm-generic/mman-common.h> +#include <asm-generic/mman.h> -#define MAP_GROWSDOWN 0x00100 /* stack-like segment */ -#define MAP_GROWSUP 0x00200 /* register stack-like segment */ -#define MAP_DENYWRITE 0x00800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */ -#define MAP_LOCKED 0x02000 /* pages are locked */ -#define MAP_NORESERVE 0x04000 /* don't check for reservations */ -#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ +#define MAP_GROWSUP 0x0200 /* register stack-like segment */ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index b115b3bbf04..1857766a63c 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -617,7 +617,6 @@ mem_init (void) long reserved_pages, codesize, datasize, initsize; pg_data_t *pgdat; int i; - static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); @@ -639,10 +638,6 @@ mem_init (void) high_memory = __va(max_low_pfn * PAGE_SIZE); - kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); - kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); - kclist_add(&kcore_kernel, _stext, _end - _stext); - for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); @@ -655,7 +650,7 @@ mem_init (void) initsize = (unsigned long) __init_end - (unsigned long) __init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " - "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), + "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10), num_physpages << (PAGE_SHIFT - 10), codesize >> 10, reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index cabba332cc4..c41234f1b82 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -41,6 +41,12 @@ config HZ int default 100 +config GENERIC_TIME + def_bool y + +config ARCH_USES_GETTIMEOFFSET + def_bool y + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h index cb8aa762f23..4c31c0ae215 100644 --- a/arch/m32r/include/asm/hardirq.h +++ b/arch/m32r/include/asm/hardirq.h @@ -2,14 +2,7 @@ #ifndef __ASM_HARDIRQ_H #define __ASM_HARDIRQ_H -#include <linux/threads.h> -#include <linux/irq.h> - -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#include <asm/irq.h> #if NR_IRQS > 256 #define HARDIRQ_BITS 9 @@ -26,11 +19,7 @@ typedef struct { # error HARDIRQ_BITS is too low! #endif -static inline void ack_bad_irq(int irq) -{ - printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); - BUG(); -} +#include <asm-generic/hardirq.h> #endif /* __ASM_HARDIRQ_H */ #endif /* __KERNEL__ */ diff --git a/arch/m32r/include/asm/mman.h b/arch/m32r/include/asm/mman.h index 04a5f40aa40..8eebf89f5ab 100644 --- a/arch/m32r/include/asm/mman.h +++ b/arch/m32r/include/asm/mman.h @@ -1,17 +1 @@ -#ifndef __M32R_MMAN_H__ -#define __M32R_MMAN_H__ - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __M32R_MMAN_H__ */ +#include <asm-generic/mman.h> diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 98b8feb12ed..98682bba0ed 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c @@ -77,7 +77,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, struct user * dummy = NULL; #endif - if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3)) + if ((off & 3) || off > sizeof(struct user) - 3) return -EIO; off >>= 2; @@ -139,8 +139,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, struct user * dummy = NULL; #endif - if ((off & 3) || off < 0 || - off > sizeof(struct user) - 3) + if ((off & 3) || off > sizeof(struct user) - 3) return -EIO; off >>= 2; diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 2547d6c4a82..655ea1c47a0 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -213,7 +213,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (!physid_isset(phys_id, phys_cpu_present_map)) continue; - if ((max_cpus >= 0) && (max_cpus <= cpucount + 1)) + if (max_cpus <= cpucount + 1) continue; do_boot_cpu(phys_id); diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index cada3ba4b99..ba61c4c7320 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -48,7 +48,7 @@ extern void smp_local_timer_interrupt(void); static unsigned long latch; -static unsigned long do_gettimeoffset(void) +u32 arch_gettimeoffset(void) { unsigned long elapsed_time = 0; /* [us] */ @@ -93,79 +93,10 @@ static unsigned long do_gettimeoffset(void) #error no chip configuration #endif - return elapsed_time; + return elapsed_time * 1000; } /* - * This version of gettimeofday has near microsecond resolution. - */ -void do_gettimeofday(struct timeval *tv) -{ - unsigned long seq; - unsigned long usec, sec; - unsigned long max_ntp_tick = tick_usec - tickadj; - - do { - seq = read_seqbegin(&xtime_lock); - - usec = do_gettimeoffset(); - - /* - * If time_adjust is negative then NTP is slowing the clock - * so make sure not to go into next possible interval. - * Better to lose some accuracy than have time go backwards.. - */ - if (unlikely(time_adjust < 0)) - usec = min(usec, max_ntp_tick); - - sec = xtime.tv_sec; - usec += (xtime.tv_nsec / 1000); - } while (read_seqretry(&xtime_lock, seq)); - - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -EXPORT_SYMBOL(do_gettimeofday); - -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irq(&xtime_lock); - /* - * This is revolting. We need to set "xtime" correctly. However, the - * value in this location is the value at the most recent update of - * wall time. Discover what correction gettimeofday() would have - * made, and then undo it! - */ - nsec -= do_gettimeoffset() * NSEC_PER_USEC; - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - ntp_clear(); - write_sequnlock_irq(&xtime_lock); - clock_was_set(); - - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); - -/* * In order to set the CMOS clock precisely, set_rtc_mmss has to be * called 500 ms after the second nowtime has started, because when * nowtime is written into the registers of the CMOS clock, it will @@ -192,6 +123,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) #ifndef CONFIG_SMP profile_tick(CPU_PROFILING); #endif + /* XXX FIXME. Uh, the xtime_lock should be held here, no? */ do_timer(1); #ifndef CONFIG_SMP diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index 24d429f9358..9f581df3952 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c @@ -171,7 +171,7 @@ void __init mem_init(void) printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index fb87c08c6b5..29dd8489ffe 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -58,6 +58,12 @@ config HZ int default 100 +config GENERIC_TIME + def_bool y + +config ARCH_USES_GETTIMEOFFSET + def_bool y + mainmenu "Linux/68k Kernel Configuration" source "init/Kconfig" diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h index 394ee946015..554f65b6cd3 100644 --- a/arch/m68k/include/asm/hardirq_mm.h +++ b/arch/m68k/include/asm/hardirq_mm.h @@ -1,16 +1,8 @@ #ifndef __M68K_HARDIRQ_H #define __M68K_HARDIRQ_H -#include <linux/threads.h> -#include <linux/cache.h> - -/* entry.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ - #define HARDIRQ_BITS 8 +#include <asm-generic/hardirq.h> + #endif diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h index 9f5c4c4b3c7..8eebf89f5ab 100644 --- a/arch/m68k/include/asm/mman.h +++ b/arch/m68k/include/asm/mman.h @@ -1,17 +1 @@ -#ifndef __M68K_MMAN_H__ -#define __M68K_MMAN_H__ - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __M68K_MMAN_H__ */ +#include <asm-generic/mman.h> diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 946d8691f2b..48b87f5ced5 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -335,7 +335,7 @@ #define __NR_preadv 329 #define __NR_pwritev 330 #define __NR_rt_tgsigqueueinfo 331 -#define __NR_perf_counter_open 332 +#define __NR_perf_event_open 332 #ifdef __KERNEL__ diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 922f52e7ed1..c5b33634c98 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -756,5 +756,5 @@ sys_call_table: .long sys_preadv .long sys_pwritev /* 330 */ .long sys_rt_tgsigqueueinfo - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 54d980795fc..17dc2a31a7c 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -91,77 +91,11 @@ void __init time_init(void) mach_sched_init(timer_interrupt); } -/* - * This version of gettimeofday has near microsecond resolution. - */ -void do_gettimeofday(struct timeval *tv) +u32 arch_gettimeoffset(void) { - unsigned long flags; - unsigned long seq; - unsigned long usec, sec; - unsigned long max_ntp_tick = tick_usec - tickadj; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - - usec = mach_gettimeoffset(); - - /* - * If time_adjust is negative then NTP is slowing the clock - * so make sure not to go into next possible interval. - * Better to lose some accuracy than have time go backwards.. - */ - if (unlikely(time_adjust < 0)) - usec = min(usec, max_ntp_tick); - - sec = xtime.tv_sec; - usec += xtime.tv_nsec/1000; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - - - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -EXPORT_SYMBOL(do_gettimeofday); - -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irq(&xtime_lock); - /* This is revolting. We need to set the xtime.tv_nsec - * correctly. However, the value in this location is - * is value at the last tick. - * Discover what correction gettimeofday - * would have done, and then undo it! - */ - nsec -= 1000 * mach_gettimeoffset(); - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - ntp_clear(); - write_sequnlock_irq(&xtime_lock); - clock_was_set(); - return 0; + return mach_gettimeoffset() * 1000; } -EXPORT_SYMBOL(do_settimeofday); - - static int __init rtc_init(void) { struct platform_device *pdev; diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 0007b2adf3a..774549accd2 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -126,7 +126,7 @@ void __init mem_init(void) #endif printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), totalram_pages << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10), diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index 0ae123e0898..23535cc415a 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S @@ -350,7 +350,7 @@ ENTRY(sys_call_table) .long sys_preadv .long sys_pwritev /* 330 */ .long sys_rt_tgsigqueueinfo - .long sys_perf_counter_open + .long sys_perf_event_open .rept NR_syscalls-(.-sys_call_table)/4 .long sys_ni_syscall diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index 0b852327c0e..cb05a07e55e 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -381,7 +381,7 @@ #define __NR_preadv 363 /* new */ #define __NR_pwritev 364 /* new */ #define __NR_rt_tgsigqueueinfo 365 /* new */ -#define __NR_perf_counter_open 366 /* new */ +#define __NR_perf_event_open 366 /* new */ #define __NR_syscalls 367 diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 457216097df..ecec1915513 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -370,4 +370,4 @@ ENTRY(sys_call_table) .long sys_ni_syscall .long sys_ni_syscall .long sys_rt_tgsigqueueinfo /* 365 */ - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index f207f1a94db..1110784eb3f 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -204,7 +204,7 @@ void __init mem_init(void) totalram_pages += free_all_bootmem(); printk(KERN_INFO "Memory: %luk/%luk available\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10)); #ifdef CONFIG_MMU mem_init_done = 1; diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h index e4d6f1fb1cf..a2250f390a2 100644 --- a/arch/mips/include/asm/mman.h +++ b/arch/mips/include/asm/mman.h @@ -46,6 +46,8 @@ #define MAP_LOCKED 0x8000 /* pages are locked */ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ +#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x80000 /* create a huge page mapping */ /* * Flags for msync @@ -71,6 +73,9 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 1a9f9b25755..d6eb6134abe 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -76,6 +76,16 @@ extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) +#define is_zero_pfn is_zero_pfn +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); +} + +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + extern void paging_init(void); /* diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index e753a777949..8c9dfa9e901 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -353,7 +353,7 @@ #define __NR_preadv (__NR_Linux + 330) #define __NR_pwritev (__NR_Linux + 331) #define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) -#define __NR_perf_counter_open (__NR_Linux + 333) +#define __NR_perf_event_open (__NR_Linux + 333) #define __NR_accept4 (__NR_Linux + 334) /* @@ -664,7 +664,7 @@ #define __NR_preadv (__NR_Linux + 289) #define __NR_pwritev (__NR_Linux + 290) #define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) -#define __NR_perf_counter_open (__NR_Linux + 292) +#define __NR_perf_event_open (__NR_Linux + 292) #define __NR_accept4 (__NR_Linux + 293) /* @@ -979,7 +979,7 @@ #define __NR_preadv (__NR_Linux + 293) #define __NR_pwritev (__NR_Linux + 294) #define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) -#define __NR_perf_counter_open (__NR_Linux + 296) +#define __NR_perf_event_open (__NR_Linux + 296) #define __NR_accept4 (__NR_Linux + 297) /* diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 7c2de4f091c..fd2a9bb620d 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -581,7 +581,7 @@ einval: li v0, -ENOSYS sys sys_preadv 6 /* 4330 */ sys sys_pwritev 6 sys sys_rt_tgsigqueueinfo 4 - sys sys_perf_counter_open 5 + sys sys_perf_event_open 5 sys sys_accept4 4 .endm diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index b97b993846d..18bf7f32c5e 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -418,6 +418,6 @@ sys_call_table: PTR sys_preadv PTR sys_pwritev /* 5390 */ PTR sys_rt_tgsigqueueinfo - PTR sys_perf_counter_open + PTR sys_perf_event_open PTR sys_accept4 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 1a6ae124635..6ebc0797669 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -416,6 +416,6 @@ EXPORT(sysn32_call_table) PTR sys_preadv PTR sys_pwritev PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ - PTR sys_perf_counter_open + PTR sys_perf_event_open PTR sys_accept4 .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index cd31087a651..9bbf9775e0b 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -536,6 +536,6 @@ sys_call_table: PTR compat_sys_preadv /* 4330 */ PTR compat_sys_pwritev PTR compat_sys_rt_tgsigqueueinfo - PTR sys_perf_counter_open + PTR sys_perf_event_open PTR sys_accept4 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 38c79c55b06..15aa1902a78 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -352,7 +352,6 @@ void __init paging_init(void) free_area_init_nodes(max_zone_pfns); } -static struct kcore_list kcore_mem, kcore_vmalloc; #ifdef CONFIG_64BIT static struct kcore_list kcore_kseg0; #endif @@ -409,15 +408,13 @@ void __init mem_init(void) if ((unsigned long) &_text > (unsigned long) CKSEG0) /* The -4 is a hack so that user tools don't have to handle the overflow. */ - kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4); + kclist_add(&kcore_kseg0, (void *) CKSEG0, + 0x80000000 - 4, KCORE_TEXT); #endif - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); - kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), ram << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h index 2db746a251f..1a55d61f0d0 100644 --- a/arch/mn10300/include/asm/cacheflush.h +++ b/arch/mn10300/include/asm/cacheflush.h @@ -17,7 +17,7 @@ #include <linux/mm.h> /* - * virtually-indexed cache managment (our cache is physically indexed) + * virtually-indexed cache management (our cache is physically indexed) */ #define flush_cache_all() do {} while (0) #define flush_cache_mm(mm) do {} while (0) @@ -31,7 +31,7 @@ #define flush_dcache_mmap_unlock(mapping) do {} while (0) /* - * physically-indexed cache managment + * physically-indexed cache management */ #ifndef CONFIG_MN10300_CACHE_DISABLED diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h index d04fac1da5a..8eebf89f5ab 100644 --- a/arch/mn10300/include/asm/mman.h +++ b/arch/mn10300/include/asm/mman.h @@ -1,28 +1 @@ -/* MN10300 Constants for mmap and co. - * - * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd. - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * - Derived from asm-x86/mman.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#ifndef _ASM_MMAN_H -#define _ASM_MMAN_H - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* _ASM_MMAN_H */ +#include <asm-generic/mman.h> diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h index fad68616af3..2a983931c11 100644 --- a/arch/mn10300/include/asm/unistd.h +++ b/arch/mn10300/include/asm/unistd.h @@ -347,7 +347,7 @@ #define __NR_preadv 334 #define __NR_pwritev 335 #define __NR_rt_tgsigqueueinfo 336 -#define __NR_perf_counter_open 337 +#define __NR_perf_event_open 337 #ifdef __KERNEL__ diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index e0d2563af4f..a94e7ea3faa 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S @@ -723,7 +723,7 @@ ENTRY(sys_call_table) .long sys_preadv .long sys_pwritev /* 335 */ .long sys_rt_tgsigqueueinfo - .long sys_perf_counter_open + .long sys_perf_event_open nr_syscalls=(.-sys_call_table)/4 diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c index 79890edfd67..3f24c298a3a 100644 --- a/arch/mn10300/kernel/setup.c +++ b/arch/mn10300/kernel/setup.c @@ -285,7 +285,7 @@ static void c_stop(struct seq_file *m, void *v) { } -struct seq_operations cpuinfo_op = { +const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index 8cee387a24f..ec1420562dc 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -112,7 +112,7 @@ void __init mem_init(void) "Memory: %luk/%luk available" " (%dk kernel code, %dk reserved, %dk data, %dk init," " %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), + nr_free_pages() << (PAGE_SHIFT - 10), max_mapnr << (PAGE_SHIFT - 10), codesize >> 10, reservedpages << (PAGE_SHIFT - 10), diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 06f8d5b5b0f..f388dc68f60 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -16,7 +16,7 @@ config PARISC select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE select BUG - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS select GENERIC_ATOMIC64 if !64BIT help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h index defe752cc99..9749c8afe83 100644 --- a/arch/parisc/include/asm/mman.h +++ b/arch/parisc/include/asm/mman.h @@ -22,6 +22,8 @@ #define MAP_GROWSDOWN 0x8000 /* stack-like segment */ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ +#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x80000 /* create a huge page mapping */ #define MS_SYNC 1 /* synchronous memory sync */ #define MS_ASYNC 2 /* sync memory asynchronously */ @@ -54,6 +56,9 @@ #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ +#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 #define MAP_VARIABLE 0 diff --git a/arch/parisc/include/asm/perf_counter.h b/arch/parisc/include/asm/perf_counter.h deleted file mode 100644 index dc9e829f701..00000000000 --- a/arch/parisc/include/asm/perf_counter.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_PARISC_PERF_COUNTER_H -#define __ASM_PARISC_PERF_COUNTER_H - -/* parisc only supports software counters through this interface. */ -static inline void set_perf_counter_pending(void) { } - -#endif /* __ASM_PARISC_PERF_COUNTER_H */ diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h new file mode 100644 index 00000000000..cc146427d8f --- /dev/null +++ b/arch/parisc/include/asm/perf_event.h @@ -0,0 +1,7 @@ +#ifndef __ASM_PARISC_PERF_EVENT_H +#define __ASM_PARISC_PERF_EVENT_H + +/* parisc only supports software events through this interface. */ +static inline void set_perf_event_pending(void) { } + +#endif /* __ASM_PARISC_PERF_EVENT_H */ diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index f3d3b8b012c..cda158318c6 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -810,9 +810,9 @@ #define __NR_preadv (__NR_Linux + 315) #define __NR_pwritev (__NR_Linux + 316) #define __NR_rt_tgsigqueueinfo (__NR_Linux + 317) -#define __NR_perf_counter_open (__NR_Linux + 318) +#define __NR_perf_event_open (__NR_Linux + 318) -#define __NR_Linux_syscalls (__NR_perf_counter_open + 1) +#define __NR_Linux_syscalls (__NR_perf_event_open + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index cf145eb026b..843f423dec6 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -416,7 +416,7 @@ ENTRY_COMP(preadv) /* 315 */ ENTRY_COMP(pwritev) ENTRY_COMP(rt_tgsigqueueinfo) - ENTRY_SAME(perf_counter_open) + ENTRY_SAME(perf_event_open) /* Nothing yet */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index b0831d9e35c..d5aca31fddb 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -506,7 +506,7 @@ void __init mem_init(void) #endif printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8250902265c..4fd479059d6 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -129,7 +129,7 @@ config PPC select HAVE_OPROFILE select HAVE_SYSCALL_WRAPPERS if PPC64 select GENERIC_ATOMIC64 if PPC32 - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS config EARLY_PRINTK bool diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts index f32c2811c6d..855782c5e5e 100644 --- a/arch/powerpc/boot/dts/mpc8377_mds.dts +++ b/arch/powerpc/boot/dts/mpc8377_mds.dts @@ -159,6 +159,7 @@ reg = <0x2e000 0x1000>; interrupts = <42 0x8>; interrupt-parent = <&ipic>; + sdhci,wp-inverted; /* Filled in by U-Boot */ clock-frequency = <0>; }; diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts index 28e022ac417..9e2264b1000 100644 --- a/arch/powerpc/boot/dts/mpc8377_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts @@ -173,6 +173,7 @@ reg = <0x2e000 0x1000>; interrupts = <42 0x8>; interrupt-parent = <&ipic>; + sdhci,wp-inverted; /* Filled in by U-Boot */ clock-frequency = <111111111>; }; diff --git a/arch/powerpc/boot/dts/mpc8377_wlan.dts b/arch/powerpc/boot/dts/mpc8377_wlan.dts index 3febc4e91b1..9a603695723 100644 --- a/arch/powerpc/boot/dts/mpc8377_wlan.dts +++ b/arch/powerpc/boot/dts/mpc8377_wlan.dts @@ -150,6 +150,7 @@ reg = <0x2e000 0x1000>; interrupts = <42 0x8>; interrupt-parent = <&ipic>; + sdhci,wp-inverted; clock-frequency = <133333333>; }; }; diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts index f720ab9af30..f70cf600083 100644 --- a/arch/powerpc/boot/dts/mpc8378_mds.dts +++ b/arch/powerpc/boot/dts/mpc8378_mds.dts @@ -159,6 +159,7 @@ reg = <0x2e000 0x1000>; interrupts = <42 0x8>; interrupt-parent = <&ipic>; + sdhci,wp-inverted; /* Filled in by U-Boot */ clock-frequency = <0>; }; diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts index a11ead8214b..4e6a1a407bb 100644 --- a/arch/powerpc/boot/dts/mpc8378_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts @@ -173,6 +173,7 @@ reg = <0x2e000 0x1000>; interrupts = <42 0x8>; interrupt-parent = <&ipic>; + sdhci,wp-inverted; /* Filled in by U-Boot */ clock-frequency = <111111111>; }; diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts index 4fa221fd9bd..645ec51cc6e 100644 --- a/arch/powerpc/boot/dts/mpc8379_mds.dts +++ b/arch/powerpc/boot/dts/mpc8379_mds.dts @@ -157,6 +157,7 @@ reg = <0x2e000 0x1000>; interrupts = <42 0x8>; interrupt-parent = <&ipic>; + sdhci,wp-inverted; /* Filled in by U-Boot */ clock-frequency = <0>; }; diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts index e35dfba587c..72336d50452 100644 --- a/arch/powerpc/boot/dts/mpc8379_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts @@ -171,6 +171,7 @@ reg = <0x2e000 0x1000>; interrupts = <42 0x8>; interrupt-parent = <&ipic>; + sdhci,wp-inverted; /* Filled in by U-Boot */ clock-frequency = <111111111>; }; diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index e73d554538d..abbc2aaaced 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -135,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags) */ struct irq_chip; -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PPC64 -static inline unsigned long test_perf_counter_pending(void) +static inline unsigned long test_perf_event_pending(void) { unsigned long x; asm volatile("lbz %0,%1(13)" : "=r" (x) - : "i" (offsetof(struct paca_struct, perf_counter_pending))); + : "i" (offsetof(struct paca_struct, perf_event_pending))); return x; } -static inline void set_perf_counter_pending(void) +static inline void set_perf_event_pending(void) { asm volatile("stb %0,%1(13)" : : "r" (1), - "i" (offsetof(struct paca_struct, perf_counter_pending))); + "i" (offsetof(struct paca_struct, perf_event_pending))); } -static inline void clear_perf_counter_pending(void) +static inline void clear_perf_event_pending(void) { asm volatile("stb %0,%1(13)" : : "r" (0), - "i" (offsetof(struct paca_struct, perf_counter_pending))); + "i" (offsetof(struct paca_struct, perf_event_pending))); } #endif /* CONFIG_PPC64 */ -#else /* CONFIG_PERF_COUNTERS */ +#else /* CONFIG_PERF_EVENTS */ -static inline unsigned long test_perf_counter_pending(void) +static inline unsigned long test_perf_event_pending(void) { return 0; } -static inline void clear_perf_counter_pending(void) {} -#endif /* CONFIG_PERF_COUNTERS */ +static inline void clear_perf_event_pending(void) {} +#endif /* CONFIG_PERF_EVENTS */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HW_IRQ_H */ diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 7b1c49811a2..d4a7f645c5d 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -25,6 +25,8 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #ifdef __KERNEL__ #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b634456ea89..7d8514cecea 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -122,7 +122,7 @@ struct paca_struct { u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ - u8 perf_counter_pending; /* PM interrupt while soft-disabled */ + u8 perf_event_pending; /* PM interrupt while soft-disabled */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_event.h index 0ea0639fcf7..3288ce3997e 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -1,5 +1,5 @@ /* - * Performance counter support - PowerPC-specific definitions. + * Performance event support - PowerPC-specific definitions. * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * @@ -12,7 +12,7 @@ #include <asm/hw_irq.h> -#define MAX_HWCOUNTERS 8 +#define MAX_HWEVENTS 8 #define MAX_EVENT_ALTERNATIVES 8 #define MAX_LIMITED_HWCOUNTERS 2 @@ -28,12 +28,12 @@ struct power_pmu { unsigned long test_adder; int (*compute_mmcr)(u64 events[], int n_ev, unsigned int hwc[], unsigned long mmcr[]); - int (*get_constraint)(u64 event, unsigned long *mskp, + int (*get_constraint)(u64 event_id, unsigned long *mskp, unsigned long *valp); - int (*get_alternatives)(u64 event, unsigned int flags, + int (*get_alternatives)(u64 event_id, unsigned int flags, u64 alt[]); void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); - int (*limited_pmc_event)(u64 event); + int (*limited_pmc_event)(u64 event_id); u32 flags; int n_generic; int *generic_events; @@ -61,10 +61,10 @@ struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -#define PERF_COUNTER_INDEX_OFFSET 1 +#define PERF_EVENT_INDEX_OFFSET 1 /* - * Only override the default definitions in include/linux/perf_counter.h + * Only override the default definitions in include/linux/perf_event.h * if we have hardware PMU support. */ #ifdef CONFIG_PPC_PERF_CTRS @@ -73,14 +73,14 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); /* * The power_pmu.get_constraint function returns a 32/64-bit value and - * a 32/64-bit mask that express the constraints between this event and + * a 32/64-bit mask that express the constraints between this event_id and * other events. * * The value and mask are divided up into (non-overlapping) bitfields * of three different types: * * Select field: this expresses the constraint that some set of bits - * in MMCR* needs to be set to a specific value for this event. For a + * in MMCR* needs to be set to a specific value for this event_id. For a * select field, the mask contains 1s in every bit of the field, and * the value contains a unique value for each possible setting of the * MMCR* bits. The constraint checking code will ensure that two events @@ -102,9 +102,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); * possible.) For N classes, the field is N+1 bits wide, and each class * is assigned one bit from the least-significant N bits. The mask has * only the most-significant bit set, and the value has only the bit - * for the event's class set. The test_adder has the least significant + * for the event_id's class set. The test_adder has the least significant * bit set in the field. * - * If an event is not subject to the constraint expressed by a particular + * If an event_id is not subject to the constraint expressed by a particular * field, then it will have 0 in both the mask and value for that field. */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index ed24bd92fe4..c7d671a7d9a 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1) SYSCALL_SPU(dup3) SYSCALL_SPU(pipe2) SYSCALL(inotify_init1) -SYSCALL_SPU(perf_counter_open) +SYSCALL_SPU(perf_event_open) COMPAT_SYS_SPU(preadv) COMPAT_SYS_SPU(pwritev) COMPAT_SYS(rt_tgsigqueueinfo) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index cef080bfc60..f6ca7617676 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -341,7 +341,7 @@ #define __NR_dup3 316 #define __NR_pipe2 317 #define __NR_inotify_init1 318 -#define __NR_perf_counter_open 319 +#define __NR_perf_event_open 319 #define __NR_preadv 320 #define __NR_pwritev 321 #define __NR_rt_tgsigqueueinfo 322 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 569f79ccd31..b23664a0b86 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o -obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o +obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ power5+-pmu.o power6-pmu.o power7-pmu.o obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index f0df285f0f8..0812b0f414b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -133,7 +133,7 @@ int main(void) DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); - DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); + DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); #ifdef CONFIG_PPC_MM_SLICES DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 66bcda34a6b..900e0eea009 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -556,14 +556,14 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) 2: TRACE_AND_RESTORE_IRQ(r5); -#ifdef CONFIG_PERF_COUNTERS - /* check paca->perf_counter_pending if we're enabling ints */ +#ifdef CONFIG_PERF_EVENTS + /* check paca->perf_event_pending if we're enabling ints */ lbz r3,PACAPERFPEND(r13) and. r3,r3,r5 beq 27f - bl .perf_counter_do_pending + bl .perf_event_do_pending 27: -#endif /* CONFIG_PERF_COUNTERS */ +#endif /* CONFIG_PERF_EVENTS */ /* extract EE bit and use it to restore paca->hard_enabled */ ld r3,_MSR(r1) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index f7f376ea7b1..e5d12117798 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -53,7 +53,7 @@ #include <linux/bootmem.h> #include <linux/pci.h> #include <linux/debugfs.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <asm/uaccess.h> #include <asm/system.h> @@ -138,9 +138,9 @@ notrace void raw_local_irq_restore(unsigned long en) } #endif /* CONFIG_PPC_STD_MMU_64 */ - if (test_perf_counter_pending()) { - clear_perf_counter_pending(); - perf_counter_do_pending(); + if (test_perf_event_pending()) { + clear_perf_event_pending(); + perf_event_do_pending(); } /* diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index cc466d039af..09d72028f31 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/string.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <asm/reg.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index f74b62c6751..0a03cf70d24 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -10,7 +10,7 @@ */ #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/uaccess.h> #include <linux/mm.h> diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_event.c index 7ceefaf3a7f..bbcbae183e9 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1,5 +1,5 @@ /* - * Performance counter support - powerpc architecture code + * Performance event support - powerpc architecture code * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * @@ -10,7 +10,7 @@ */ #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/reg.h> @@ -19,24 +19,24 @@ #include <asm/firmware.h> #include <asm/ptrace.h> -struct cpu_hw_counters { - int n_counters; +struct cpu_hw_events { + int n_events; int n_percpu; int disabled; int n_added; int n_limited; u8 pmcs_enabled; - struct perf_counter *counter[MAX_HWCOUNTERS]; - u64 events[MAX_HWCOUNTERS]; - unsigned int flags[MAX_HWCOUNTERS]; + struct perf_event *event[MAX_HWEVENTS]; + u64 events[MAX_HWEVENTS]; + unsigned int flags[MAX_HWEVENTS]; unsigned long mmcr[3]; - struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; + struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; - u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; - unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; - unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; + unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; + unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; }; -DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); struct power_pmu *ppmu; @@ -47,7 +47,7 @@ struct power_pmu *ppmu; * where the hypervisor bit is forced to 1 (as on Apple G5 processors), * then we need to use the FCHV bit to ignore kernel events. */ -static unsigned int freeze_counters_kernel = MMCR0_FCS; +static unsigned int freeze_events_kernel = MMCR0_FCS; /* * 32-bit doesn't have MMCRA but does have an MMCR2, @@ -122,14 +122,14 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs) if (ppmu->flags & PPMU_ALT_SIPR) { if (mmcra & POWER6_MMCRA_SIHV) - return PERF_EVENT_MISC_HYPERVISOR; + return PERF_RECORD_MISC_HYPERVISOR; return (mmcra & POWER6_MMCRA_SIPR) ? - PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; + PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL; } if (mmcra & MMCRA_SIHV) - return PERF_EVENT_MISC_HYPERVISOR; - return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : - PERF_EVENT_MISC_KERNEL; + return PERF_RECORD_MISC_HYPERVISOR; + return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER : + PERF_RECORD_MISC_KERNEL; } /* @@ -152,9 +152,9 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs) #endif /* CONFIG_PPC64 */ -static void perf_counter_interrupt(struct pt_regs *regs); +static void perf_event_interrupt(struct pt_regs *regs); -void perf_counter_print_debug(void) +void perf_event_print_debug(void) { } @@ -240,15 +240,15 @@ static void write_pmc(int idx, unsigned long val) * Check if a set of events can all go on the PMU at once. * If they can't, this will look at alternative codes for the events * and see if any combination of alternative codes is feasible. - * The feasible set is returned in event[]. + * The feasible set is returned in event_id[]. */ -static int power_check_constraints(struct cpu_hw_counters *cpuhw, - u64 event[], unsigned int cflags[], +static int power_check_constraints(struct cpu_hw_events *cpuhw, + u64 event_id[], unsigned int cflags[], int n_ev) { unsigned long mask, value, nv; - unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; - int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; + unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; + int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; int i, j; unsigned long addf = ppmu->add_fields; unsigned long tadd = ppmu->test_adder; @@ -259,12 +259,12 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw, /* First see if the events will go on as-is */ for (i = 0; i < n_ev; ++i) { if ((cflags[i] & PPMU_LIMITED_PMC_REQD) - && !ppmu->limited_pmc_event(event[i])) { - ppmu->get_alternatives(event[i], cflags[i], + && !ppmu->limited_pmc_event(event_id[i])) { + ppmu->get_alternatives(event_id[i], cflags[i], cpuhw->alternatives[i]); - event[i] = cpuhw->alternatives[i][0]; + event_id[i] = cpuhw->alternatives[i][0]; } - if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0], + if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], &cpuhw->avalues[i][0])) return -1; } @@ -287,7 +287,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw, return -1; for (i = 0; i < n_ev; ++i) { choice[i] = 0; - n_alt[i] = ppmu->get_alternatives(event[i], cflags[i], + n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], cpuhw->alternatives[i]); for (j = 1; j < n_alt[i]; ++j) ppmu->get_constraint(cpuhw->alternatives[i][j], @@ -307,7 +307,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw, j = choice[i]; } /* - * See if any alternative k for event i, + * See if any alternative k for event_id i, * where k > j, will satisfy the constraints. */ while (++j < n_alt[i]) { @@ -321,16 +321,16 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw, if (j >= n_alt[i]) { /* * No feasible alternative, backtrack - * to event i-1 and continue enumerating its + * to event_id i-1 and continue enumerating its * alternatives from where we got up to. */ if (--i < 0) return -1; } else { /* - * Found a feasible alternative for event i, - * remember where we got up to with this event, - * go on to the next event, and start with + * Found a feasible alternative for event_id i, + * remember where we got up to with this event_id, + * go on to the next event_id, and start with * the first alternative for it. */ choice[i] = j; @@ -345,21 +345,21 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw, /* OK, we have a feasible combination, tell the caller the solution */ for (i = 0; i < n_ev; ++i) - event[i] = cpuhw->alternatives[i][choice[i]]; + event_id[i] = cpuhw->alternatives[i][choice[i]]; return 0; } /* - * Check if newly-added counters have consistent settings for + * Check if newly-added events have consistent settings for * exclude_{user,kernel,hv} with each other and any previously - * added counters. + * added events. */ -static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], +static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; int i, n, first; - struct perf_counter *counter; + struct perf_event *event; n = n_prev + n_new; if (n <= 1) @@ -371,15 +371,15 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], cflags[i] &= ~PPMU_LIMITED_PMC_REQD; continue; } - counter = ctrs[i]; + event = ctrs[i]; if (first) { - eu = counter->attr.exclude_user; - ek = counter->attr.exclude_kernel; - eh = counter->attr.exclude_hv; + eu = event->attr.exclude_user; + ek = event->attr.exclude_kernel; + eh = event->attr.exclude_hv; first = 0; - } else if (counter->attr.exclude_user != eu || - counter->attr.exclude_kernel != ek || - counter->attr.exclude_hv != eh) { + } else if (event->attr.exclude_user != eu || + event->attr.exclude_kernel != ek || + event->attr.exclude_hv != eh) { return -EAGAIN; } } @@ -392,11 +392,11 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], return 0; } -static void power_pmu_read(struct perf_counter *counter) +static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; - if (!counter->hw.idx) + if (!event->hw.idx) return; /* * Performance monitor interrupts come even when interrupts @@ -404,21 +404,21 @@ static void power_pmu_read(struct perf_counter *counter) * Therefore we treat them like NMIs. */ do { - prev = atomic64_read(&counter->hw.prev_count); + prev = atomic64_read(&event->hw.prev_count); barrier(); - val = read_pmc(counter->hw.idx); - } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); + val = read_pmc(event->hw.idx); + } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); /* The counters are only 32 bits wide */ delta = (val - prev) & 0xfffffffful; - atomic64_add(delta, &counter->count); - atomic64_sub(delta, &counter->hw.period_left); + atomic64_add(delta, &event->count); + atomic64_sub(delta, &event->hw.period_left); } /* * On some machines, PMC5 and PMC6 can't be written, don't respect * the freeze conditions, and don't generate interrupts. This tells - * us if `counter' is using such a PMC. + * us if `event' is using such a PMC. */ static int is_limited_pmc(int pmcnum) { @@ -426,53 +426,53 @@ static int is_limited_pmc(int pmcnum) && (pmcnum == 5 || pmcnum == 6); } -static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, +static void freeze_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { - struct perf_counter *counter; + struct perf_event *event; u64 val, prev, delta; int i; for (i = 0; i < cpuhw->n_limited; ++i) { - counter = cpuhw->limited_counter[i]; - if (!counter->hw.idx) + event = cpuhw->limited_counter[i]; + if (!event->hw.idx) continue; - val = (counter->hw.idx == 5) ? pmc5 : pmc6; - prev = atomic64_read(&counter->hw.prev_count); - counter->hw.idx = 0; + val = (event->hw.idx == 5) ? pmc5 : pmc6; + prev = atomic64_read(&event->hw.prev_count); + event->hw.idx = 0; delta = (val - prev) & 0xfffffffful; - atomic64_add(delta, &counter->count); + atomic64_add(delta, &event->count); } } -static void thaw_limited_counters(struct cpu_hw_counters *cpuhw, +static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { - struct perf_counter *counter; + struct perf_event *event; u64 val; int i; for (i = 0; i < cpuhw->n_limited; ++i) { - counter = cpuhw->limited_counter[i]; - counter->hw.idx = cpuhw->limited_hwidx[i]; - val = (counter->hw.idx == 5) ? pmc5 : pmc6; - atomic64_set(&counter->hw.prev_count, val); - perf_counter_update_userpage(counter); + event = cpuhw->limited_counter[i]; + event->hw.idx = cpuhw->limited_hwidx[i]; + val = (event->hw.idx == 5) ? pmc5 : pmc6; + atomic64_set(&event->hw.prev_count, val); + perf_event_update_userpage(event); } } /* - * Since limited counters don't respect the freeze conditions, we + * Since limited events don't respect the freeze conditions, we * have to read them immediately after freezing or unfreezing the - * other counters. We try to keep the values from the limited - * counters as consistent as possible by keeping the delay (in + * other events. We try to keep the values from the limited + * events as consistent as possible by keeping the delay (in * cycles and instructions) between freezing/unfreezing and reading - * the limited counters as small and consistent as possible. - * Therefore, if any limited counters are in use, we read them + * the limited events as small and consistent as possible. + * Therefore, if any limited events are in use, we read them * both, and always in the same order, to minimize variability, * and do it inside the same asm that writes MMCR0. */ -static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) +static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) { unsigned long pmc5, pmc6; @@ -485,7 +485,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) * Write MMCR0, then read PMC5 and PMC6 immediately. * To ensure we don't get a performance monitor interrupt * between writing MMCR0 and freezing/thawing the limited - * counters, we first write MMCR0 with the counter overflow + * events, we first write MMCR0 with the event overflow * interrupt enable bits turned off. */ asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" @@ -500,7 +500,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) thaw_limited_counters(cpuhw, pmc5, pmc6); /* - * Write the full MMCR0 including the counter overflow interrupt + * Write the full MMCR0 including the event overflow interrupt * enable bits, if necessary. */ if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) @@ -508,18 +508,18 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) } /* - * Disable all counters to prevent PMU interrupts and to allow - * counters to be added or removed. + * Disable all events to prevent PMU interrupts and to allow + * events to be added or removed. */ void hw_perf_disable(void) { - struct cpu_hw_counters *cpuhw; + struct cpu_hw_events *cpuhw; unsigned long flags; if (!ppmu) return; local_irq_save(flags); - cpuhw = &__get_cpu_var(cpu_hw_counters); + cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { cpuhw->disabled = 1; @@ -545,7 +545,7 @@ void hw_perf_disable(void) /* * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been - * executed and the PMU has frozen the counters + * executed and the PMU has frozen the events * before we return. */ write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); @@ -555,26 +555,26 @@ void hw_perf_disable(void) } /* - * Re-enable all counters if disable == 0. - * If we were previously disabled and counters were added, then + * Re-enable all events if disable == 0. + * If we were previously disabled and events were added, then * put the new config on the PMU. */ void hw_perf_enable(void) { - struct perf_counter *counter; - struct cpu_hw_counters *cpuhw; + struct perf_event *event; + struct cpu_hw_events *cpuhw; unsigned long flags; long i; unsigned long val; s64 left; - unsigned int hwc_index[MAX_HWCOUNTERS]; + unsigned int hwc_index[MAX_HWEVENTS]; int n_lim; int idx; if (!ppmu) return; local_irq_save(flags); - cpuhw = &__get_cpu_var(cpu_hw_counters); + cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { local_irq_restore(flags); return; @@ -582,23 +582,23 @@ void hw_perf_enable(void) cpuhw->disabled = 0; /* - * If we didn't change anything, or only removed counters, + * If we didn't change anything, or only removed events, * no need to recalculate MMCR* settings and reset the PMCs. * Just reenable the PMU with the current MMCR* settings - * (possibly updated for removal of counters). + * (possibly updated for removal of events). */ if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); - if (cpuhw->n_counters == 0) + if (cpuhw->n_events == 0) ppc_set_pmu_inuse(0); goto out_enable; } /* - * Compute MMCR* values for the new set of counters + * Compute MMCR* values for the new set of events */ - if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, + if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, cpuhw->mmcr)) { /* shouldn't ever get here */ printk(KERN_ERR "oops compute_mmcr failed\n"); @@ -607,22 +607,22 @@ void hw_perf_enable(void) /* * Add in MMCR0 freeze bits corresponding to the - * attr.exclude_* bits for the first counter. - * We have already checked that all counters have the - * same values for these bits as the first counter. + * attr.exclude_* bits for the first event. + * We have already checked that all events have the + * same values for these bits as the first event. */ - counter = cpuhw->counter[0]; - if (counter->attr.exclude_user) + event = cpuhw->event[0]; + if (event->attr.exclude_user) cpuhw->mmcr[0] |= MMCR0_FCP; - if (counter->attr.exclude_kernel) - cpuhw->mmcr[0] |= freeze_counters_kernel; - if (counter->attr.exclude_hv) + if (event->attr.exclude_kernel) + cpuhw->mmcr[0] |= freeze_events_kernel; + if (event->attr.exclude_hv) cpuhw->mmcr[0] |= MMCR0_FCHV; /* * Write the new configuration to MMCR* with the freeze - * bit set and set the hardware counters to their initial values. - * Then unfreeze the counters. + * bit set and set the hardware events to their initial values. + * Then unfreeze the events. */ ppc_set_pmu_inuse(1); mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); @@ -631,43 +631,43 @@ void hw_perf_enable(void) | MMCR0_FC); /* - * Read off any pre-existing counters that need to move + * Read off any pre-existing events that need to move * to another PMC. */ - for (i = 0; i < cpuhw->n_counters; ++i) { - counter = cpuhw->counter[i]; - if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { - power_pmu_read(counter); - write_pmc(counter->hw.idx, 0); - counter->hw.idx = 0; + for (i = 0; i < cpuhw->n_events; ++i) { + event = cpuhw->event[i]; + if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { + power_pmu_read(event); + write_pmc(event->hw.idx, 0); + event->hw.idx = 0; } } /* - * Initialize the PMCs for all the new and moved counters. + * Initialize the PMCs for all the new and moved events. */ cpuhw->n_limited = n_lim = 0; - for (i = 0; i < cpuhw->n_counters; ++i) { - counter = cpuhw->counter[i]; - if (counter->hw.idx) + for (i = 0; i < cpuhw->n_events; ++i) { + event = cpuhw->event[i]; + if (event->hw.idx) continue; idx = hwc_index[i] + 1; if (is_limited_pmc(idx)) { - cpuhw->limited_counter[n_lim] = counter; + cpuhw->limited_counter[n_lim] = event; cpuhw->limited_hwidx[n_lim] = idx; ++n_lim; continue; } val = 0; - if (counter->hw.sample_period) { - left = atomic64_read(&counter->hw.period_left); + if (event->hw.sample_period) { + left = atomic64_read(&event->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; } - atomic64_set(&counter->hw.prev_count, val); - counter->hw.idx = idx; + atomic64_set(&event->hw.prev_count, val); + event->hw.idx = idx; write_pmc(idx, val); - perf_counter_update_userpage(counter); + perf_event_update_userpage(event); } cpuhw->n_limited = n_lim; cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; @@ -688,85 +688,85 @@ void hw_perf_enable(void) local_irq_restore(flags); } -static int collect_events(struct perf_counter *group, int max_count, - struct perf_counter *ctrs[], u64 *events, +static int collect_events(struct perf_event *group, int max_count, + struct perf_event *ctrs[], u64 *events, unsigned int *flags) { int n = 0; - struct perf_counter *counter; + struct perf_event *event; - if (!is_software_counter(group)) { + if (!is_software_event(group)) { if (n >= max_count) return -1; ctrs[n] = group; - flags[n] = group->hw.counter_base; + flags[n] = group->hw.event_base; events[n++] = group->hw.config; } - list_for_each_entry(counter, &group->sibling_list, list_entry) { - if (!is_software_counter(counter) && - counter->state != PERF_COUNTER_STATE_OFF) { + list_for_each_entry(event, &group->sibling_list, group_entry) { + if (!is_software_event(event) && + event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; - ctrs[n] = counter; - flags[n] = counter->hw.counter_base; - events[n++] = counter->hw.config; + ctrs[n] = event; + flags[n] = event->hw.event_base; + events[n++] = event->hw.config; } } return n; } -static void counter_sched_in(struct perf_counter *counter, int cpu) +static void event_sched_in(struct perf_event *event, int cpu) { - counter->state = PERF_COUNTER_STATE_ACTIVE; - counter->oncpu = cpu; - counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; - if (is_software_counter(counter)) - counter->pmu->enable(counter); + event->state = PERF_EVENT_STATE_ACTIVE; + event->oncpu = cpu; + event->tstamp_running += event->ctx->time - event->tstamp_stopped; + if (is_software_event(event)) + event->pmu->enable(event); } /* - * Called to enable a whole group of counters. + * Called to enable a whole group of events. * Returns 1 if the group was enabled, or -EAGAIN if it could not be. * Assumes the caller has disabled interrupts and has * frozen the PMU with hw_perf_save_disable. */ -int hw_perf_group_sched_in(struct perf_counter *group_leader, +int hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, int cpu) + struct perf_event_context *ctx, int cpu) { - struct cpu_hw_counters *cpuhw; + struct cpu_hw_events *cpuhw; long i, n, n0; - struct perf_counter *sub; + struct perf_event *sub; if (!ppmu) return 0; - cpuhw = &__get_cpu_var(cpu_hw_counters); - n0 = cpuhw->n_counters; + cpuhw = &__get_cpu_var(cpu_hw_events); + n0 = cpuhw->n_events; n = collect_events(group_leader, ppmu->n_counter - n0, - &cpuhw->counter[n0], &cpuhw->events[n0], + &cpuhw->event[n0], &cpuhw->events[n0], &cpuhw->flags[n0]); if (n < 0) return -EAGAIN; - if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n)) + if (check_excludes(cpuhw->event, cpuhw->flags, n0, n)) return -EAGAIN; i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); if (i < 0) return -EAGAIN; - cpuhw->n_counters = n0 + n; + cpuhw->n_events = n0 + n; cpuhw->n_added += n; /* - * OK, this group can go on; update counter states etc., - * and enable any software counters + * OK, this group can go on; update event states etc., + * and enable any software events */ for (i = n0; i < n0 + n; ++i) - cpuhw->counter[i]->hw.config = cpuhw->events[i]; + cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuctx->active_oncpu += n; n = 1; - counter_sched_in(group_leader, cpu); - list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { - if (sub->state != PERF_COUNTER_STATE_OFF) { - counter_sched_in(sub, cpu); + event_sched_in(group_leader, cpu); + list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { + if (sub->state != PERF_EVENT_STATE_OFF) { + event_sched_in(sub, cpu); ++n; } } @@ -776,14 +776,14 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, } /* - * Add a counter to the PMU. - * If all counters are not already frozen, then we disable and + * Add a event to the PMU. + * If all events are not already frozen, then we disable and * re-enable the PMU in order to get hw_perf_enable to do the * actual work of reconfiguring the PMU. */ -static int power_pmu_enable(struct perf_counter *counter) +static int power_pmu_enable(struct perf_event *event) { - struct cpu_hw_counters *cpuhw; + struct cpu_hw_events *cpuhw; unsigned long flags; int n0; int ret = -EAGAIN; @@ -792,23 +792,23 @@ static int power_pmu_enable(struct perf_counter *counter) perf_disable(); /* - * Add the counter to the list (if there is room) + * Add the event to the list (if there is room) * and check whether the total set is still feasible. */ - cpuhw = &__get_cpu_var(cpu_hw_counters); - n0 = cpuhw->n_counters; + cpuhw = &__get_cpu_var(cpu_hw_events); + n0 = cpuhw->n_events; if (n0 >= ppmu->n_counter) goto out; - cpuhw->counter[n0] = counter; - cpuhw->events[n0] = counter->hw.config; - cpuhw->flags[n0] = counter->hw.counter_base; - if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1)) + cpuhw->event[n0] = event; + cpuhw->events[n0] = event->hw.config; + cpuhw->flags[n0] = event->hw.event_base; + if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) goto out; if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) goto out; - counter->hw.config = cpuhw->events[n0]; - ++cpuhw->n_counters; + event->hw.config = cpuhw->events[n0]; + ++cpuhw->n_events; ++cpuhw->n_added; ret = 0; @@ -819,36 +819,36 @@ static int power_pmu_enable(struct perf_counter *counter) } /* - * Remove a counter from the PMU. + * Remove a event from the PMU. */ -static void power_pmu_disable(struct perf_counter *counter) +static void power_pmu_disable(struct perf_event *event) { - struct cpu_hw_counters *cpuhw; + struct cpu_hw_events *cpuhw; long i; unsigned long flags; local_irq_save(flags); perf_disable(); - power_pmu_read(counter); - - cpuhw = &__get_cpu_var(cpu_hw_counters); - for (i = 0; i < cpuhw->n_counters; ++i) { - if (counter == cpuhw->counter[i]) { - while (++i < cpuhw->n_counters) - cpuhw->counter[i-1] = cpuhw->counter[i]; - --cpuhw->n_counters; - ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); - if (counter->hw.idx) { - write_pmc(counter->hw.idx, 0); - counter->hw.idx = 0; + power_pmu_read(event); + + cpuhw = &__get_cpu_var(cpu_hw_events); + for (i = 0; i < cpuhw->n_events; ++i) { + if (event == cpuhw->event[i]) { + while (++i < cpuhw->n_events) + cpuhw->event[i-1] = cpuhw->event[i]; + --cpuhw->n_events; + ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); + if (event->hw.idx) { + write_pmc(event->hw.idx, 0); + event->hw.idx = 0; } - perf_counter_update_userpage(counter); + perf_event_update_userpage(event); break; } } for (i = 0; i < cpuhw->n_limited; ++i) - if (counter == cpuhw->limited_counter[i]) + if (event == cpuhw->limited_counter[i]) break; if (i < cpuhw->n_limited) { while (++i < cpuhw->n_limited) { @@ -857,8 +857,8 @@ static void power_pmu_disable(struct perf_counter *counter) } --cpuhw->n_limited; } - if (cpuhw->n_counters == 0) { - /* disable exceptions if no counters are running */ + if (cpuhw->n_events == 0) { + /* disable exceptions if no events are running */ cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); } @@ -867,28 +867,28 @@ static void power_pmu_disable(struct perf_counter *counter) } /* - * Re-enable interrupts on a counter after they were throttled + * Re-enable interrupts on a event after they were throttled * because they were coming too fast. */ -static void power_pmu_unthrottle(struct perf_counter *counter) +static void power_pmu_unthrottle(struct perf_event *event) { s64 val, left; unsigned long flags; - if (!counter->hw.idx || !counter->hw.sample_period) + if (!event->hw.idx || !event->hw.sample_period) return; local_irq_save(flags); perf_disable(); - power_pmu_read(counter); - left = counter->hw.sample_period; - counter->hw.last_period = left; + power_pmu_read(event); + left = event->hw.sample_period; + event->hw.last_period = left; val = 0; if (left < 0x80000000L) val = 0x80000000L - left; - write_pmc(counter->hw.idx, val); - atomic64_set(&counter->hw.prev_count, val); - atomic64_set(&counter->hw.period_left, left); - perf_counter_update_userpage(counter); + write_pmc(event->hw.idx, val); + atomic64_set(&event->hw.prev_count, val); + atomic64_set(&event->hw.period_left, left); + perf_event_update_userpage(event); perf_enable(); local_irq_restore(flags); } @@ -901,29 +901,29 @@ struct pmu power_pmu = { }; /* - * Return 1 if we might be able to put counter on a limited PMC, + * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. - * A counter can only go on a limited PMC if it counts something + * A event can only go on a limited PMC if it counts something * that a limited PMC can count, doesn't require interrupts, and * doesn't exclude any processor mode. */ -static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, +static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, unsigned int flags) { int n; u64 alt[MAX_EVENT_ALTERNATIVES]; - if (counter->attr.exclude_user - || counter->attr.exclude_kernel - || counter->attr.exclude_hv - || counter->attr.sample_period) + if (event->attr.exclude_user + || event->attr.exclude_kernel + || event->attr.exclude_hv + || event->attr.sample_period) return 0; if (ppmu->limited_pmc_event(ev)) return 1; /* - * The requested event isn't on a limited PMC already; + * The requested event_id isn't on a limited PMC already; * see if any alternative code goes on a limited PMC. */ if (!ppmu->get_alternatives) @@ -936,9 +936,9 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, } /* - * Find an alternative event that goes on a normal PMC, if possible, - * and return the event code, or 0 if there is no such alternative. - * (Note: event code 0 is "don't count" on all machines.) + * Find an alternative event_id that goes on a normal PMC, if possible, + * and return the event_id code, or 0 if there is no such alternative. + * (Note: event_id code 0 is "don't count" on all machines.) */ static u64 normal_pmc_alternative(u64 ev, unsigned long flags) { @@ -952,26 +952,26 @@ static u64 normal_pmc_alternative(u64 ev, unsigned long flags) return alt[0]; } -/* Number of perf_counters counting hardware events */ -static atomic_t num_counters; +/* Number of perf_events counting hardware events */ +static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); /* - * Release the PMU if this is the last perf_counter. + * Release the PMU if this is the last perf_event. */ -static void hw_perf_counter_destroy(struct perf_counter *counter) +static void hw_perf_event_destroy(struct perf_event *event) { - if (!atomic_add_unless(&num_counters, -1, 1)) { + if (!atomic_add_unless(&num_events, -1, 1)) { mutex_lock(&pmc_reserve_mutex); - if (atomic_dec_return(&num_counters) == 0) + if (atomic_dec_return(&num_events) == 0) release_pmc_hardware(); mutex_unlock(&pmc_reserve_mutex); } } /* - * Translate a generic cache event config to a raw event code. + * Translate a generic cache event_id config to a raw event_id code. */ static int hw_perf_cache_event(u64 config, u64 *eventp) { @@ -1000,39 +1000,39 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) return 0; } -const struct pmu *hw_perf_counter_init(struct perf_counter *counter) +const struct pmu *hw_perf_event_init(struct perf_event *event) { u64 ev; unsigned long flags; - struct perf_counter *ctrs[MAX_HWCOUNTERS]; - u64 events[MAX_HWCOUNTERS]; - unsigned int cflags[MAX_HWCOUNTERS]; + struct perf_event *ctrs[MAX_HWEVENTS]; + u64 events[MAX_HWEVENTS]; + unsigned int cflags[MAX_HWEVENTS]; int n; int err; - struct cpu_hw_counters *cpuhw; + struct cpu_hw_events *cpuhw; if (!ppmu) return ERR_PTR(-ENXIO); - switch (counter->attr.type) { + switch (event->attr.type) { case PERF_TYPE_HARDWARE: - ev = counter->attr.config; + ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return ERR_PTR(-EOPNOTSUPP); ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: - err = hw_perf_cache_event(counter->attr.config, &ev); + err = hw_perf_cache_event(event->attr.config, &ev); if (err) return ERR_PTR(err); break; case PERF_TYPE_RAW: - ev = counter->attr.config; + ev = event->attr.config; break; default: return ERR_PTR(-EINVAL); } - counter->hw.config_base = ev; - counter->hw.idx = 0; + event->hw.config_base = ev; + event->hw.idx = 0; /* * If we are not running on a hypervisor, force the @@ -1040,28 +1040,28 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) * the user set it to. */ if (!firmware_has_feature(FW_FEATURE_LPAR)) - counter->attr.exclude_hv = 0; + event->attr.exclude_hv = 0; /* - * If this is a per-task counter, then we can use + * If this is a per-task event, then we can use * PM_RUN_* events interchangeably with their non RUN_* * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. * XXX we should check if the task is an idle task. */ flags = 0; - if (counter->ctx->task) + if (event->ctx->task) flags |= PPMU_ONLY_COUNT_RUN; /* - * If this machine has limited counters, check whether this - * event could go on a limited counter. + * If this machine has limited events, check whether this + * event_id could go on a limited event. */ if (ppmu->flags & PPMU_LIMITED_PMC5_6) { - if (can_go_on_limited_pmc(counter, ev, flags)) { + if (can_go_on_limited_pmc(event, ev, flags)) { flags |= PPMU_LIMITED_PMC_OK; } else if (ppmu->limited_pmc_event(ev)) { /* - * The requested event is on a limited PMC, + * The requested event_id is on a limited PMC, * but we can't use a limited PMC; see if any * alternative goes on a normal PMC. */ @@ -1073,50 +1073,50 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) /* * If this is in a group, check if it can go on with all the - * other hardware counters in the group. We assume the counter + * other hardware events in the group. We assume the event * hasn't been linked into its leader's sibling list at this point. */ n = 0; - if (counter->group_leader != counter) { - n = collect_events(counter->group_leader, ppmu->n_counter - 1, + if (event->group_leader != event) { + n = collect_events(event->group_leader, ppmu->n_counter - 1, ctrs, events, cflags); if (n < 0) return ERR_PTR(-EINVAL); } events[n] = ev; - ctrs[n] = counter; + ctrs[n] = event; cflags[n] = flags; if (check_excludes(ctrs, cflags, n, 1)) return ERR_PTR(-EINVAL); - cpuhw = &get_cpu_var(cpu_hw_counters); + cpuhw = &get_cpu_var(cpu_hw_events); err = power_check_constraints(cpuhw, events, cflags, n + 1); - put_cpu_var(cpu_hw_counters); + put_cpu_var(cpu_hw_events); if (err) return ERR_PTR(-EINVAL); - counter->hw.config = events[n]; - counter->hw.counter_base = cflags[n]; - counter->hw.last_period = counter->hw.sample_period; - atomic64_set(&counter->hw.period_left, counter->hw.last_period); + event->hw.config = events[n]; + event->hw.event_base = cflags[n]; + event->hw.last_period = event->hw.sample_period; + atomic64_set(&event->hw.period_left, event->hw.last_period); /* * See if we need to reserve the PMU. - * If no counters are currently in use, then we have to take a + * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware. */ err = 0; - if (!atomic_inc_not_zero(&num_counters)) { + if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); - if (atomic_read(&num_counters) == 0 && - reserve_pmc_hardware(perf_counter_interrupt)) + if (atomic_read(&num_events) == 0 && + reserve_pmc_hardware(perf_event_interrupt)) err = -EBUSY; else - atomic_inc(&num_counters); + atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); } - counter->destroy = hw_perf_counter_destroy; + event->destroy = hw_perf_event_destroy; if (err) return ERR_PTR(err); @@ -1128,24 +1128,24 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) * things if requested. Note that interrupts are hard-disabled * here so there is no possibility of being interrupted. */ -static void record_and_restart(struct perf_counter *counter, unsigned long val, +static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs, int nmi) { - u64 period = counter->hw.sample_period; + u64 period = event->hw.sample_period; s64 prev, delta, left; int record = 0; /* we don't have to worry about interrupts here */ - prev = atomic64_read(&counter->hw.prev_count); + prev = atomic64_read(&event->hw.prev_count); delta = (val - prev) & 0xfffffffful; - atomic64_add(delta, &counter->count); + atomic64_add(delta, &event->count); /* - * See if the total period for this counter has expired, + * See if the total period for this event has expired, * and update for the next period. */ val = 0; - left = atomic64_read(&counter->hw.period_left) - delta; + left = atomic64_read(&event->hw.period_left) - delta; if (period) { if (left <= 0) { left += period; @@ -1162,20 +1162,19 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val, */ if (record) { struct perf_sample_data data = { - .regs = regs, .addr = 0, - .period = counter->hw.last_period, + .period = event->hw.last_period, }; - if (counter->attr.sample_type & PERF_SAMPLE_ADDR) + if (event->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); - if (perf_counter_overflow(counter, nmi, &data)) { + if (perf_event_overflow(event, nmi, &data, regs)) { /* * Interrupts are coming too fast - throttle them - * by setting the counter to 0, so it will be + * by setting the event to 0, so it will be * at least 2^30 cycles until the next interrupt - * (assuming each counter counts at most 2 counts + * (assuming each event counts at most 2 counts * per cycle). */ val = 0; @@ -1183,15 +1182,15 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val, } } - write_pmc(counter->hw.idx, val); - atomic64_set(&counter->hw.prev_count, val); - atomic64_set(&counter->hw.period_left, left); - perf_counter_update_userpage(counter); + write_pmc(event->hw.idx, val); + atomic64_set(&event->hw.prev_count, val); + atomic64_set(&event->hw.period_left, left); + perf_event_update_userpage(event); } /* * Called from generic code to get the misc flags (i.e. processor mode) - * for an event. + * for an event_id. */ unsigned long perf_misc_flags(struct pt_regs *regs) { @@ -1199,13 +1198,13 @@ unsigned long perf_misc_flags(struct pt_regs *regs) if (flags) return flags; - return user_mode(regs) ? PERF_EVENT_MISC_USER : - PERF_EVENT_MISC_KERNEL; + return user_mode(regs) ? PERF_RECORD_MISC_USER : + PERF_RECORD_MISC_KERNEL; } /* * Called from generic code to get the instruction pointer - * for an event. + * for an event_id. */ unsigned long perf_instruction_pointer(struct pt_regs *regs) { @@ -1221,11 +1220,11 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) /* * Performance monitor interrupt stuff */ -static void perf_counter_interrupt(struct pt_regs *regs) +static void perf_event_interrupt(struct pt_regs *regs) { int i; - struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); - struct perf_counter *counter; + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct perf_event *event; unsigned long val; int found = 0; int nmi; @@ -1242,21 +1241,21 @@ static void perf_counter_interrupt(struct pt_regs *regs) else irq_enter(); - for (i = 0; i < cpuhw->n_counters; ++i) { - counter = cpuhw->counter[i]; - if (!counter->hw.idx || is_limited_pmc(counter->hw.idx)) + for (i = 0; i < cpuhw->n_events; ++i) { + event = cpuhw->event[i]; + if (!event->hw.idx || is_limited_pmc(event->hw.idx)) continue; - val = read_pmc(counter->hw.idx); + val = read_pmc(event->hw.idx); if ((int)val < 0) { - /* counter has overflowed */ + /* event has overflowed */ found = 1; - record_and_restart(counter, val, regs, nmi); + record_and_restart(event, val, regs, nmi); } } /* - * In case we didn't find and reset the counter that caused - * the interrupt, scan all counters and reset any that are + * In case we didn't find and reset the event that caused + * the interrupt, scan all events and reset any that are * negative, to avoid getting continual interrupts. * Any that we processed in the previous loop will not be negative. */ @@ -1274,7 +1273,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) * Reset MMCR0 to its normal value. This will set PMXE and * clear FC (freeze counters) and PMAO (perf mon alert occurred) * and thus allow interrupts to occur again. - * XXX might want to use MSR.PM to keep the counters frozen until + * XXX might want to use MSR.PM to keep the events frozen until * we get back out of this interrupt. */ write_mmcr0(cpuhw, cpuhw->mmcr[0]); @@ -1285,9 +1284,9 @@ static void perf_counter_interrupt(struct pt_regs *regs) irq_exit(); } -void hw_perf_counter_setup(int cpu) +void hw_perf_event_setup(int cpu) { - struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); + struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); if (!ppmu) return; @@ -1309,7 +1308,7 @@ int register_power_pmu(struct power_pmu *pmu) * Use FCHV to ignore kernel events if MSR.HV is set. */ if (mfmsr() & MSR_HV) - freeze_counters_kernel = MMCR0_FCHV; + freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ return 0; diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 3c90a3d9173..2a361cdda63 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 31918af3e35..0f4c1c73a6a 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 867f6f66396..c351b3a57fb 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index fa21890531d..ca399ba5034 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 018d094d92f..28a4daacdc0 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 75dccb71a04..479574413a9 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/string.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <asm/reg.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 02fed27af7f..1d5570a1e45 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -328,7 +328,7 @@ static void c_stop(struct seq_file *m, void *v) { } -struct seq_operations cpuinfo_op = { +const struct seq_operations cpuinfo_op = { .start =c_start, .next = c_next, .stop = c_stop, diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 465e498bcb3..df45a7449a6 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -53,7 +53,7 @@ #include <linux/posix-timers.h> #include <linux/irq.h> #include <linux/delay.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <asm/io.h> #include <asm/processor.h> @@ -527,25 +527,25 @@ void __init iSeries_time_init_early(void) } #endif /* CONFIG_PPC_ISERIES */ -#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32) -DEFINE_PER_CPU(u8, perf_counter_pending); +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) +DEFINE_PER_CPU(u8, perf_event_pending); -void set_perf_counter_pending(void) +void set_perf_event_pending(void) { - get_cpu_var(perf_counter_pending) = 1; + get_cpu_var(perf_event_pending) = 1; set_dec(1); - put_cpu_var(perf_counter_pending); + put_cpu_var(perf_event_pending); } -#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending) -#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0 +#define test_perf_event_pending() __get_cpu_var(perf_event_pending) +#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 -#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ +#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ -#define test_perf_counter_pending() 0 -#define clear_perf_counter_pending() +#define test_perf_event_pending() 0 +#define clear_perf_event_pending() -#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ +#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ /* * For iSeries shared processors, we have to let the hypervisor @@ -573,9 +573,9 @@ void timer_interrupt(struct pt_regs * regs) set_dec(DECREMENTER_MAX); #ifdef CONFIG_PPC32 - if (test_perf_counter_pending()) { - clear_perf_counter_pending(); - perf_counter_do_pending(); + if (test_perf_event_pending()) { + clear_perf_event_pending(); + perf_event_do_pending(); } if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index acb74a17bbb..b4b167b3364 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -1,5 +1,5 @@ /* - * udbg for for NS16550 compatable serial ports + * udbg for NS16550 compatable serial ports * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 830bef0a113..e7dae82c128 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -29,7 +29,7 @@ #include <linux/module.h> #include <linux/kprobes.h> #include <linux/kdebug.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <asm/firmware.h> #include <asm/page.h> @@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, die("Weird page fault", regs, SIGSEGV); } - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -312,7 +312,7 @@ good_area: } if (ret & VM_FAULT_MAJOR) { current->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { @@ -323,7 +323,7 @@ good_area: #endif } else { current->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 3ef5084b90c..9ddcfb4dc13 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -242,39 +242,3 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -#ifdef CONFIG_PROC_KCORE -static struct kcore_list kcore_vmem; - -static int __init setup_kcore(void) -{ - int i; - - for (i = 0; i < lmb.memory.cnt; i++) { - unsigned long base; - unsigned long size; - struct kcore_list *kcore_mem; - - base = lmb.memory.region[i].base; - size = lmb.memory.region[i].size; - - kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); - if (!kcore_mem) - panic("%s: kmalloc failed\n", __func__); - - /* must stay under 32 bits */ - if ( 0xfffffffful - (unsigned long)__va(base) < size) { - size = 0xfffffffful - (unsigned long)(__va(base)); - printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n", - size); - } - - kclist_add(kcore_mem, __va(base), size); - } - - kclist_add(&kcore_vmem, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); - - return 0; -} -module_init(setup_kcore); -#endif diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 31582329cd6..335c578b9cc 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -109,35 +109,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -#ifdef CONFIG_PROC_KCORE -static struct kcore_list kcore_vmem; - -static int __init setup_kcore(void) -{ - int i; - - for (i=0; i < lmb.memory.cnt; i++) { - unsigned long base, size; - struct kcore_list *kcore_mem; - - base = lmb.memory.region[i].base; - size = lmb.memory.region[i].size; - - /* GFP_ATOMIC to avoid might_sleep warnings during boot */ - kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); - if (!kcore_mem) - panic("%s: kmalloc failed\n", __func__); - - kclist_add(kcore_mem, __va(base), size); - } - - kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); - - return 0; -} -module_init(setup_kcore); -#endif - static void pgd_ctor(void *addr) { memset(addr, 0, PGD_TABLE_SIZE); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 579382c163a..59736317bf0 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -143,8 +143,8 @@ int arch_add_memory(int nid, u64 start, u64 size) * memory regions, find holes and callback for contiguous regions. */ int -walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, - int (*func)(unsigned long, unsigned long, void *)) +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)) { struct lmb_property res; unsigned long pfn, len; @@ -166,7 +166,7 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, } return ret; } -EXPORT_SYMBOL_GPL(walk_memory_resource); +EXPORT_SYMBOL_GPL(walk_system_ram_range); /* * Initialize the bootmem system and give it all the memory we @@ -372,7 +372,7 @@ void __init mem_init(void) printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " "%luk reserved, %luk data, %luk bss, %luk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 9efc8bda01b..e382cae678b 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -280,9 +280,9 @@ config PPC_HAVE_PMU_SUPPORT config PPC_PERF_CTRS def_bool y - depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT + depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT help - This enables the powerpc-specific perf_counter back-end. + This enables the powerpc-specific perf_event back-end. config SMP depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index ab8aef9bb8e..8f079b865ad 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -29,7 +29,6 @@ #include <linux/poll.h> #include <linux/ptrace.h> #include <linux/seq_file.h> -#include <linux/marker.h> #include <asm/io.h> #include <asm/time.h> diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 24b30b6909c..fc1b1c42b1d 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -119,7 +119,7 @@ spufs_new_file(struct super_block *sb, struct dentry *dentry, const struct file_operations *fops, int mode, size_t size, struct spu_context *ctx) { - static struct inode_operations spufs_file_iops = { + static const struct inode_operations spufs_file_iops = { .setattr = spufs_setattr, }; struct inode *inode; @@ -773,7 +773,7 @@ static int spufs_fill_super(struct super_block *sb, void *data, int silent) { struct spufs_sb_info *info; - static struct super_operations s_ops = { + static const struct super_operations s_ops = { .alloc_inode = spufs_alloc_inode, .destroy_inode = spufs_destroy_inode, .statfs = simple_statfs, diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index bb5b77c66d0..4678078fede 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -39,7 +39,6 @@ #include <linux/pid_namespace.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/marker.h> #include <asm/io.h> #include <asm/mmu_context.h> diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 572771fd846..9490157da62 100644 --- a/arch/powerpc/platforms/powermac/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c @@ -1,5 +1,5 @@ /* - * udbg for for zilog scc ports as found on Apple PowerMacs + * udbg for zilog scc ports as found on Apple PowerMacs * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index eae51ef9af2..3631a4f277e 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -71,7 +71,7 @@ static int hc_show(struct seq_file *m, void *p) return 0; } -static struct seq_operations hcall_inst_seq_ops = { +static const struct seq_operations hcall_inst_seq_ops = { .start = hc_start, .next = hc_next, .stop = hc_stop, diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index a4779912a5c..88f4ae78783 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -165,7 +165,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector, return 0; } -static struct block_device_operations axon_ram_devops = { +static const struct block_device_operations axon_ram_devops = { .owner = THIS_MODULE, .direct_access = axon_ram_direct_access }; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1c866efd217..43c0acad716 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -94,7 +94,7 @@ config S390 select HAVE_KVM if 64BIT select HAVE_ARCH_TRACEHOOK select INIT_ALL_POSSIBLE - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS config SCHED_OMIT_FRAME_POINTER bool diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index bd9914b8948..341aff2687a 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -41,7 +41,7 @@ struct hypfs_sb_info { static const struct file_operations hypfs_file_ops; static struct file_system_type hypfs_type; -static struct super_operations hypfs_s_ops; +static const struct super_operations hypfs_s_ops; /* start of list of all dentries, which have to be deleted on update */ static struct dentry *hypfs_last_dentry; @@ -472,7 +472,7 @@ static struct file_system_type hypfs_type = { .kill_sb = hypfs_kill_super }; -static struct super_operations hypfs_s_ops = { +static const struct super_operations hypfs_s_ops = { .statfs = simple_statfs, .drop_inode = hypfs_drop_inode, .show_options = hypfs_show_options, @@ -496,7 +496,7 @@ static int __init hypfs_init(void) } s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); if (!s390_kobj) { - rc = -ENOMEM;; + rc = -ENOMEM; goto fail_sysfs; } rc = register_filesystem(&hypfs_type); diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index f63fe7b431e..4e9c8ae0a63 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -9,18 +9,7 @@ #ifndef __S390_MMAN_H__ #define __S390_MMAN_H__ -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ +#include <asm-generic/mman.h> #if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) int s390_mmap_check(unsigned long addr, unsigned long len); diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h deleted file mode 100644 index 7015188c2cc..00000000000 --- a/arch/s390/include/asm/perf_counter.h +++ /dev/null @@ -1,10 +0,0 @@ -/* - * Performance counter support - s390 specific definitions. - * - * Copyright 2009 Martin Schwidefsky, IBM Corporation. - */ - -static inline void set_perf_counter_pending(void) {} -static inline void clear_perf_counter_pending(void) {} - -#define PERF_COUNTER_INDEX_OFFSET 0 diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h new file mode 100644 index 00000000000..3840cbe7763 --- /dev/null +++ b/arch/s390/include/asm/perf_event.h @@ -0,0 +1,10 @@ +/* + * Performance event support - s390 specific definitions. + * + * Copyright 2009 Martin Schwidefsky, IBM Corporation. + */ + +static inline void set_perf_event_pending(void) {} +static inline void clear_perf_event_pending(void) {} + +#define PERF_EVENT_INDEX_OFFSET 0 diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index c80602d7c88..cb5232df151 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -268,7 +268,7 @@ #define __NR_preadv 328 #define __NR_pwritev 329 #define __NR_rt_tgsigqueueinfo 330 -#define __NR_perf_counter_open 331 +#define __NR_perf_event_open 331 #define NR_syscalls 332 /* diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 88a83366819..624790042d4 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1832,11 +1832,11 @@ compat_sys_rt_tgsigqueueinfo_wrapper: llgtr %r5,%r5 # struct compat_siginfo * jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call - .globl sys_perf_counter_open_wrapper -sys_perf_counter_open_wrapper: - llgtr %r2,%r2 # const struct perf_counter_attr * + .globl sys_perf_event_open_wrapper +sys_perf_event_open_wrapper: + llgtr %r2,%r2 # const struct perf_event_attr * lgfr %r3,%r3 # pid_t lgfr %r4,%r4 # int lgfr %r5,%r5 # int llgfr %r6,%r6 # unsigned long - jg sys_perf_counter_open # branch to system call + jg sys_perf_event_open # branch to system call diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index ad1acd20038..0b5083681e7 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -339,4 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ -SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper) +SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2c2f9835341..43486c2408e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -478,7 +478,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) if (!inti) return -ENOMEM; - inti->type = KVM_S390_PROGRAM_INT;; + inti->type = KVM_S390_PROGRAM_INT; inti->pgm.code = code; VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1abbadd497e..6d507462967 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -10,7 +10,7 @@ * Copyright (C) 1995 Linus Torvalds */ -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -306,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) * interrupts again and then search the VMAs */ local_irq_enable(); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); down_read(&mm->mmap_sem); si_code = SEGV_MAPERR; @@ -366,11 +366,11 @@ good_area: } if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c634dfbe92e..76564795222 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -105,7 +105,7 @@ void __init mem_init(void) datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/score/include/asm/page.h b/arch/score/include/asm/page.h index ee5821042fc..d92a5a2d36d 100644 --- a/arch/score/include/asm/page.h +++ b/arch/score/include/asm/page.h @@ -2,10 +2,11 @@ #define _ASM_SCORE_PAGE_H #include <linux/pfn.h> +#include <linux/const.h> /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT (12) -#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #ifdef __KERNEL__ diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h index 3a112288552..55939992c27 100644 --- a/arch/score/include/asm/thread_info.h +++ b/arch/score/include/asm/thread_info.h @@ -7,6 +7,15 @@ #define KU_USER 0x08 #define KU_KERN 0x00 +#include <asm/page.h> +#include <linux/const.h> + +/* thread information allocation */ +#define THREAD_SIZE_ORDER (1) +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +#define THREAD_MASK (THREAD_SIZE - _AC(1,UL)) +#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR + #ifndef __ASSEMBLY__ #include <asm/processor.h> @@ -62,12 +71,6 @@ struct thread_info { register struct thread_info *__current_thread_info __asm__("r28"); #define current_thread_info() __current_thread_info -/* thread information allocation */ -#define THREAD_SIZE_ORDER (1) -#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) -#define THREAD_MASK (THREAD_SIZE - 1UL) -#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR - #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) #define free_thread_info(info) kfree(info) diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S index f85569831d5..eebcbaa4e97 100644 --- a/arch/score/kernel/vmlinux.lds.S +++ b/arch/score/kernel/vmlinux.lds.S @@ -24,6 +24,8 @@ */ #include <asm-generic/vmlinux.lds.h> +#include <asm/thread_info.h> +#include <asm/page.h> OUTPUT_ARCH(score) ENTRY(_stext) @@ -49,21 +51,9 @@ SECTIONS . = ALIGN(16); RODATA - /* Exception table */ - . = ALIGN(16); - __ex_table : { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } + EXCEPTION_TABLE(16) - /* writeable */ - .data ALIGN (4096): { - *(.data.init_task) - - DATA_DATA - CONSTRUCTORS - } + RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) /* We want the small data sections together, so single-instruction offsets can access them all, and initialized data all before uninitialized, so @@ -72,45 +62,14 @@ SECTIONS .sdata : { *(.sdata) } - - . = ALIGN(32); - .data.cacheline_aligned : { - *(.data.cacheline_aligned) - } _edata = .; /* End of data section */ /* will be freed after init */ - . = ALIGN(4096); /* Init code and data */ + . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; - . = ALIGN(4096); - .init.text : { - _sinittext = .; - INIT_TEXT - _einittext = .; - } - .init.data : { - INIT_DATA - } - . = ALIGN(16); - .init.setup : { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - - .initcall.init : { - __initcall_start = .; - INITCALLS - __initcall_end = .; - } - - .con_initcall.init : { - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - } - SECURITY_INIT + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) /* .exit.text is discarded at runtime, not link time, to deal with * references from .rodata @@ -121,28 +80,10 @@ SECTIONS .exit.data : { EXIT_DATA } -#if defined(CONFIG_BLK_DEV_INITRD) - .init.ramfs ALIGN(4096): { - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; - . = ALIGN(4); - LONG(0); - } -#endif - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ - __bss_start = .; /* BSS */ - .sbss : { - *(.sbss) - *(.scommon) - } - .bss : { - *(.bss) - *(COMMON) - } - __bss_stop = .; + BSS_SECTION(0, 0, 0) _end = .; } diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 4df3570fe51..b940424f8cc 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -16,7 +16,7 @@ config SUPERH select HAVE_IOREMAP_PROT if MMU select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 327d47c25a5..2d080732a96 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -310,8 +310,10 @@ static int camera_set_capture(struct soc_camera_platform_info *info, return ret; } +static int ap325rxa_camera_add(struct soc_camera_link *icl, struct device *dev); +static void ap325rxa_camera_del(struct soc_camera_link *icl); + static struct soc_camera_platform_info camera_info = { - .iface = 0, .format_name = "UYVY", .format_depth = 16, .format = { @@ -323,24 +325,46 @@ static struct soc_camera_platform_info camera_info = { .bus_param = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8, .set_capture = camera_set_capture, + .link = { + .bus_id = 0, + .add_device = ap325rxa_camera_add, + .del_device = ap325rxa_camera_del, + .module_name = "soc_camera_platform", + }, }; +static void dummy_release(struct device *dev) +{ +} + static struct platform_device camera_device = { .name = "soc_camera_platform", .dev = { .platform_data = &camera_info, + .release = dummy_release, }, }; -static int __init camera_setup(void) +static int ap325rxa_camera_add(struct soc_camera_link *icl, + struct device *dev) { - if (camera_probe() > 0) - platform_device_register(&camera_device); + if (icl != &camera_info.link || camera_probe() <= 0) + return -ENODEV; - return 0; + camera_info.dev = dev; + + return platform_device_register(&camera_device); } -late_initcall(camera_setup); +static void ap325rxa_camera_del(struct soc_camera_link *icl) +{ + if (icl != &camera_info.link) + return; + + platform_device_unregister(&camera_device); + memset(&camera_device.dev.kobj, 0, + sizeof(camera_device.dev.kobj)); +} #endif /* CONFIG_I2C */ static int ov7725_power(struct device *dev, int mode) @@ -416,6 +440,7 @@ static struct ov772x_camera_info ov7725_info = { .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP, .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0), .link = { + .bus_id = 0, .power = ov7725_power, .board_info = &ap325rxa_i2c_camera[0], .i2c_adapter_id = 0, @@ -423,11 +448,19 @@ static struct ov772x_camera_info ov7725_info = { }, }; -static struct platform_device ap325rxa_camera = { - .name = "soc-camera-pdrv", - .id = 0, - .dev = { - .platform_data = &ov7725_info.link, +static struct platform_device ap325rxa_camera[] = { + { + .name = "soc-camera-pdrv", + .id = 0, + .dev = { + .platform_data = &ov7725_info.link, + }, + }, { + .name = "soc-camera-pdrv", + .id = 1, + .dev = { + .platform_data = &camera_info.link, + }, }, }; @@ -438,7 +471,8 @@ static struct platform_device *ap325rxa_devices[] __initdata = { &ceu_device, &nand_flash_device, &sdcard_cn3_device, - &ap325rxa_camera, + &ap325rxa_camera[0], + &ap325rxa_camera[1], }; static struct spi_board_info ap325rxa_spi_devices[] = { diff --git a/arch/sh/include/asm/perf_counter.h b/arch/sh/include/asm/perf_counter.h deleted file mode 100644 index d8e6bb9c0cc..00000000000 --- a/arch/sh/include/asm/perf_counter.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_SH_PERF_COUNTER_H -#define __ASM_SH_PERF_COUNTER_H - -/* SH only supports software counters through this interface. */ -static inline void set_perf_counter_pending(void) {} - -#define PERF_COUNTER_INDEX_OFFSET 0 - -#endif /* __ASM_SH_PERF_COUNTER_H */ diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h new file mode 100644 index 00000000000..11a302297ab --- /dev/null +++ b/arch/sh/include/asm/perf_event.h @@ -0,0 +1,9 @@ +#ifndef __ASM_SH_PERF_EVENT_H +#define __ASM_SH_PERF_EVENT_H + +/* SH only supports software events through this interface. */ +static inline void set_perf_event_pending(void) {} + +#define PERF_EVENT_INDEX_OFFSET 0 + +#endif /* __ASM_SH_PERF_EVENT_H */ diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h index 925dd40d9d5..f3fd1b9eb6b 100644 --- a/arch/sh/include/asm/unistd_32.h +++ b/arch/sh/include/asm/unistd_32.h @@ -344,7 +344,7 @@ #define __NR_preadv 333 #define __NR_pwritev 334 #define __NR_rt_tgsigqueueinfo 335 -#define __NR_perf_counter_open 336 +#define __NR_perf_event_open 336 #define NR_syscalls 337 diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h index 2b84bc916bc..343ce8f073e 100644 --- a/arch/sh/include/asm/unistd_64.h +++ b/arch/sh/include/asm/unistd_64.h @@ -384,7 +384,7 @@ #define __NR_preadv 361 #define __NR_pwritev 362 #define __NR_rt_tgsigqueueinfo 363 -#define __NR_perf_counter_open 364 +#define __NR_perf_event_open 364 #ifdef __KERNEL__ diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index 16ba225ede8..19fd11dd987 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -352,4 +352,4 @@ ENTRY(sys_call_table) .long sys_preadv .long sys_pwritev .long sys_rt_tgsigqueueinfo /* 335 */ - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index af6fb7410c2..5bfde6c7749 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S @@ -390,4 +390,4 @@ sys_call_table: .long sys_preadv .long sys_pwritev .long sys_rt_tgsigqueueinfo - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 781b413ff82..47530104e0a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -15,7 +15,7 @@ #include <linux/mm.h> #include <linux/hardirq.h> #include <linux/kprobes.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <asm/io_trapped.h> #include <asm/system.h> #include <asm/mmu_context.h> @@ -157,7 +157,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if ((regs->sr & SR_IMASK) != SR_IMASK) local_irq_enable(); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -208,11 +208,11 @@ survive: } if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index edc842ff61e..8173e38afd3 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -186,8 +186,6 @@ void __init paging_init(void) set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); } -static struct kcore_list kcore_mem, kcore_vmalloc; - void __init mem_init(void) { int codesize, datasize, initsize; @@ -226,13 +224,9 @@ void __init mem_init(void) datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); - kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END - VMALLOC_START); - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk data, %dk init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, datasize >> 10, diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 2dcc48528f7..de0b0e88182 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -20,7 +20,7 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/interrupt.h> #include <asm/system.h> #include <asm/io.h> @@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, /* Not an IO address, so reenable interrupts */ local_irq_enable(); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt or have no user @@ -201,11 +201,11 @@ survive: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 86b82348b97..97fca4695e0 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -25,7 +25,7 @@ config SPARC select ARCH_WANT_OPTIONAL_GPIOLIB select RTC_CLASS select RTC_DRV_M48T59 - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG @@ -47,7 +47,7 @@ config SPARC64 select RTC_DRV_BQ4802 select RTC_DRV_SUN4V select RTC_DRV_STARFIRE - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS config ARCH_DEFCONFIG string diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h index 988192e8e95..c3029ad6619 100644 --- a/arch/sparc/include/asm/mman.h +++ b/arch/sparc/include/asm/mman.h @@ -20,6 +20,8 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h deleted file mode 100644 index 5d7a8ca0e49..00000000000 --- a/arch/sparc/include/asm/perf_counter.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __ASM_SPARC_PERF_COUNTER_H -#define __ASM_SPARC_PERF_COUNTER_H - -extern void set_perf_counter_pending(void); - -#define PERF_COUNTER_INDEX_OFFSET 0 - -#ifdef CONFIG_PERF_COUNTERS -extern void init_hw_perf_counters(void); -#else -static inline void init_hw_perf_counters(void) { } -#endif - -#endif diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h new file mode 100644 index 00000000000..7e2669894ce --- /dev/null +++ b/arch/sparc/include/asm/perf_event.h @@ -0,0 +1,14 @@ +#ifndef __ASM_SPARC_PERF_EVENT_H +#define __ASM_SPARC_PERF_EVENT_H + +extern void set_perf_event_pending(void); + +#define PERF_EVENT_INDEX_OFFSET 0 + +#ifdef CONFIG_PERF_EVENTS +extern void init_hw_perf_events(void); +#else +static inline void init_hw_perf_events(void) { } +#endif + +#endif diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index 706df669f3b..42f2316c3ea 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -395,7 +395,7 @@ #define __NR_preadv 324 #define __NR_pwritev 325 #define __NR_rt_tgsigqueueinfo 326 -#define __NR_perf_counter_open 327 +#define __NR_perf_event_open 327 #define NR_SYSCALLS 328 diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index d4de32f0f8a..6cdbf7e7351 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -258,7 +258,7 @@ static inline void *vio_dring_entry(struct vio_dring_state *dr, static inline u32 vio_dring_avail(struct vio_dring_state *dr, unsigned int ring_size) { - BUILD_BUG_ON(!is_power_of_2(ring_size)); + MAYBE_BUILD_BUG_ON(!is_power_of_2(ring_size)); return (dr->pending - ((dr->prod - dr->cons) & (ring_size - 1))); diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 247cc620cee..3a048fad7ee 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -104,5 +104,5 @@ obj-$(CONFIG_AUDIT) += audit.o audit--$(CONFIG_AUDIT) := compat_audit.o obj-$(CONFIG_COMPAT) += $(audit--y) -pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o +pc--$(CONFIG_PERF_EVENTS) := perf_event.o obj-$(CONFIG_SPARC64) += $(pc--y) diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 8daab33fc17..8ab1d4728a4 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -229,7 +229,7 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) tid = ((a << IMAP_AID_SHIFT) | (n << IMAP_NID_SHIFT)); tid &= (IMAP_AID_SAFARI | - IMAP_NID_SAFARI);; + IMAP_NID_SAFARI); } } else { tid = cpuid << IMAP_TID_SHIFT; diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 378eb53e077..b129611590a 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -19,7 +19,7 @@ #include <linux/delay.h> #include <linux/smp.h> -#include <asm/perf_counter.h> +#include <asm/perf_event.h> #include <asm/ptrace.h> #include <asm/local.h> #include <asm/pcr.h> @@ -265,7 +265,7 @@ int __init nmi_init(void) } } if (!err) - init_hw_perf_counters(); + init_hw_perf_events(); return err; } diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 68ff0010707..2d94e7a03af 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -7,7 +7,7 @@ #include <linux/init.h> #include <linux/irq.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <asm/pil.h> #include <asm/pcr.h> @@ -15,7 +15,7 @@ /* This code is shared between various users of the performance * counters. Users will be oprofile, pseudo-NMI watchdog, and the - * perf_counter support layer. + * perf_event support layer. */ #define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE) @@ -42,14 +42,14 @@ void deferred_pcr_work_irq(int irq, struct pt_regs *regs) old_regs = set_irq_regs(regs); irq_enter(); -#ifdef CONFIG_PERF_COUNTERS - perf_counter_do_pending(); +#ifdef CONFIG_PERF_EVENTS + perf_event_do_pending(); #endif irq_exit(); set_irq_regs(old_regs); } -void set_perf_counter_pending(void) +void set_perf_event_pending(void) { set_softint(1 << PIL_DEFERRED_PCR_WORK); } diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_event.c index 09de4035eaa..2d6a1b10c81 100644 --- a/arch/sparc/kernel/perf_counter.c +++ b/arch/sparc/kernel/perf_event.c @@ -1,8 +1,8 @@ -/* Performance counter support for sparc64. +/* Performance event support for sparc64. * * Copyright (C) 2009 David S. Miller <davem@davemloft.net> * - * This code is based almost entirely upon the x86 perf counter + * This code is based almost entirely upon the x86 perf event * code, which is: * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> @@ -12,7 +12,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> */ -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/kernel.h> #include <linux/kdebug.h> @@ -46,19 +46,19 @@ * normal code. */ -#define MAX_HWCOUNTERS 2 +#define MAX_HWEVENTS 2 #define MAX_PERIOD ((1UL << 32) - 1) #define PIC_UPPER_INDEX 0 #define PIC_LOWER_INDEX 1 -struct cpu_hw_counters { - struct perf_counter *counters[MAX_HWCOUNTERS]; - unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; - unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; +struct cpu_hw_events { + struct perf_event *events[MAX_HWEVENTS]; + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; int enabled; }; -DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; struct perf_event_map { u16 encoding; @@ -87,9 +87,9 @@ static const struct perf_event_map ultra3i_perfmon_event_map[] = { [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, }; -static const struct perf_event_map *ultra3i_event_map(int event) +static const struct perf_event_map *ultra3i_event_map(int event_id) { - return &ultra3i_perfmon_event_map[event]; + return &ultra3i_perfmon_event_map[event_id]; } static const struct sparc_pmu ultra3i_pmu = { @@ -111,9 +111,9 @@ static const struct perf_event_map niagara2_perfmon_event_map[] = { [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, }; -static const struct perf_event_map *niagara2_event_map(int event) +static const struct perf_event_map *niagara2_event_map(int event_id) { - return &niagara2_perfmon_event_map[event]; + return &niagara2_perfmon_event_map[event_id]; } static const struct sparc_pmu niagara2_pmu = { @@ -130,13 +130,13 @@ static const struct sparc_pmu niagara2_pmu = { static const struct sparc_pmu *sparc_pmu __read_mostly; -static u64 event_encoding(u64 event, int idx) +static u64 event_encoding(u64 event_id, int idx) { if (idx == PIC_UPPER_INDEX) - event <<= sparc_pmu->upper_shift; + event_id <<= sparc_pmu->upper_shift; else - event <<= sparc_pmu->lower_shift; - return event; + event_id <<= sparc_pmu->lower_shift; + return event_id; } static u64 mask_for_index(int idx) @@ -151,7 +151,7 @@ static u64 nop_for_index(int idx) sparc_pmu->lower_nop, idx); } -static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, +static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, int idx) { u64 val, mask = mask_for_index(idx); @@ -160,7 +160,7 @@ static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, pcr_ops->write((val & ~mask) | hwc->config); } -static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, +static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, int idx) { u64 mask = mask_for_index(idx); @@ -172,7 +172,7 @@ static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, void hw_perf_enable(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 val; int i; @@ -184,9 +184,9 @@ void hw_perf_enable(void) val = pcr_ops->read(); - for (i = 0; i < MAX_HWCOUNTERS; i++) { - struct perf_counter *cp = cpuc->counters[i]; - struct hw_perf_counter *hwc; + for (i = 0; i < MAX_HWEVENTS; i++) { + struct perf_event *cp = cpuc->events[i]; + struct hw_perf_event *hwc; if (!cp) continue; @@ -199,7 +199,7 @@ void hw_perf_enable(void) void hw_perf_disable(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 val; if (!cpuc->enabled) @@ -241,8 +241,8 @@ static void write_pmc(int idx, u64 val) write_pic(pic); } -static int sparc_perf_counter_set_period(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) +static int sparc_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; @@ -268,33 +268,33 @@ static int sparc_perf_counter_set_period(struct perf_counter *counter, write_pmc(idx, (u64)(-left) & 0xffffffff); - perf_counter_update_userpage(counter); + perf_event_update_userpage(event); return ret; } -static int sparc_pmu_enable(struct perf_counter *counter) +static int sparc_pmu_enable(struct perf_event *event) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; if (test_and_set_bit(idx, cpuc->used_mask)) return -EAGAIN; - sparc_pmu_disable_counter(hwc, idx); + sparc_pmu_disable_event(hwc, idx); - cpuc->counters[idx] = counter; + cpuc->events[idx] = event; set_bit(idx, cpuc->active_mask); - sparc_perf_counter_set_period(counter, hwc, idx); - sparc_pmu_enable_counter(hwc, idx); - perf_counter_update_userpage(counter); + sparc_perf_event_set_period(event, hwc, idx); + sparc_pmu_enable_event(hwc, idx); + perf_event_update_userpage(event); return 0; } -static u64 sparc_perf_counter_update(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) +static u64 sparc_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) { int shift = 64 - 32; u64 prev_raw_count, new_raw_count; @@ -311,79 +311,79 @@ again: delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; - atomic64_add(delta, &counter->count); + atomic64_add(delta, &event->count); atomic64_sub(delta, &hwc->period_left); return new_raw_count; } -static void sparc_pmu_disable(struct perf_counter *counter) +static void sparc_pmu_disable(struct perf_event *event) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; clear_bit(idx, cpuc->active_mask); - sparc_pmu_disable_counter(hwc, idx); + sparc_pmu_disable_event(hwc, idx); barrier(); - sparc_perf_counter_update(counter, hwc, idx); - cpuc->counters[idx] = NULL; + sparc_perf_event_update(event, hwc, idx); + cpuc->events[idx] = NULL; clear_bit(idx, cpuc->used_mask); - perf_counter_update_userpage(counter); + perf_event_update_userpage(event); } -static void sparc_pmu_read(struct perf_counter *counter) +static void sparc_pmu_read(struct perf_event *event) { - struct hw_perf_counter *hwc = &counter->hw; - sparc_perf_counter_update(counter, hwc, hwc->idx); + struct hw_perf_event *hwc = &event->hw; + sparc_perf_event_update(event, hwc, hwc->idx); } -static void sparc_pmu_unthrottle(struct perf_counter *counter) +static void sparc_pmu_unthrottle(struct perf_event *event) { - struct hw_perf_counter *hwc = &counter->hw; - sparc_pmu_enable_counter(hwc, hwc->idx); + struct hw_perf_event *hwc = &event->hw; + sparc_pmu_enable_event(hwc, hwc->idx); } -static atomic_t active_counters = ATOMIC_INIT(0); +static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmc_grab_mutex); -void perf_counter_grab_pmc(void) +void perf_event_grab_pmc(void) { - if (atomic_inc_not_zero(&active_counters)) + if (atomic_inc_not_zero(&active_events)) return; mutex_lock(&pmc_grab_mutex); - if (atomic_read(&active_counters) == 0) { + if (atomic_read(&active_events) == 0) { if (atomic_read(&nmi_active) > 0) { on_each_cpu(stop_nmi_watchdog, NULL, 1); BUG_ON(atomic_read(&nmi_active) != 0); } - atomic_inc(&active_counters); + atomic_inc(&active_events); } mutex_unlock(&pmc_grab_mutex); } -void perf_counter_release_pmc(void) +void perf_event_release_pmc(void) { - if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { + if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { if (atomic_read(&nmi_active) == 0) on_each_cpu(start_nmi_watchdog, NULL, 1); mutex_unlock(&pmc_grab_mutex); } } -static void hw_perf_counter_destroy(struct perf_counter *counter) +static void hw_perf_event_destroy(struct perf_event *event) { - perf_counter_release_pmc(); + perf_event_release_pmc(); } -static int __hw_perf_counter_init(struct perf_counter *counter) +static int __hw_perf_event_init(struct perf_event *event) { - struct perf_counter_attr *attr = &counter->attr; - struct hw_perf_counter *hwc = &counter->hw; + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; const struct perf_event_map *pmap; u64 enc; @@ -396,8 +396,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter) if (attr->config >= sparc_pmu->max_events) return -EINVAL; - perf_counter_grab_pmc(); - counter->destroy = hw_perf_counter_destroy; + perf_event_grab_pmc(); + event->destroy = hw_perf_event_destroy; /* We save the enable bits in the config_base. So to * turn off sampling just write 'config', and to enable @@ -439,16 +439,16 @@ static const struct pmu pmu = { .unthrottle = sparc_pmu_unthrottle, }; -const struct pmu *hw_perf_counter_init(struct perf_counter *counter) +const struct pmu *hw_perf_event_init(struct perf_event *event) { - int err = __hw_perf_counter_init(counter); + int err = __hw_perf_event_init(event); if (err) return ERR_PTR(err); return &pmu; } -void perf_counter_print_debug(void) +void perf_event_print_debug(void) { unsigned long flags; u64 pcr, pic; @@ -471,16 +471,16 @@ void perf_counter_print_debug(void) local_irq_restore(flags); } -static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, +static int __kprobes perf_event_nmi_handler(struct notifier_block *self, unsigned long cmd, void *__args) { struct die_args *args = __args; struct perf_sample_data data; - struct cpu_hw_counters *cpuc; + struct cpu_hw_events *cpuc; struct pt_regs *regs; int idx; - if (!atomic_read(&active_counters)) + if (!atomic_read(&active_events)) return NOTIFY_DONE; switch (cmd) { @@ -493,35 +493,34 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, regs = args->regs; - data.regs = regs; data.addr = 0; - cpuc = &__get_cpu_var(cpu_hw_counters); - for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { - struct perf_counter *counter = cpuc->counters[idx]; - struct hw_perf_counter *hwc; + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx < MAX_HWEVENTS; idx++) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; u64 val; if (!test_bit(idx, cpuc->active_mask)) continue; - hwc = &counter->hw; - val = sparc_perf_counter_update(counter, hwc, idx); + hwc = &event->hw; + val = sparc_perf_event_update(event, hwc, idx); if (val & (1ULL << 31)) continue; - data.period = counter->hw.last_period; - if (!sparc_perf_counter_set_period(counter, hwc, idx)) + data.period = event->hw.last_period; + if (!sparc_perf_event_set_period(event, hwc, idx)) continue; - if (perf_counter_overflow(counter, 1, &data)) - sparc_pmu_disable_counter(hwc, idx); + if (perf_event_overflow(event, 1, &data, regs)) + sparc_pmu_disable_event(hwc, idx); } return NOTIFY_STOP; } -static __read_mostly struct notifier_block perf_counter_nmi_notifier = { - .notifier_call = perf_counter_nmi_handler, +static __read_mostly struct notifier_block perf_event_nmi_notifier = { + .notifier_call = perf_event_nmi_handler, }; static bool __init supported_pmu(void) @@ -537,9 +536,9 @@ static bool __init supported_pmu(void) return false; } -void __init init_hw_perf_counters(void) +void __init init_hw_perf_events(void) { - pr_info("Performance counters: "); + pr_info("Performance events: "); if (!supported_pmu()) { pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); @@ -548,10 +547,10 @@ void __init init_hw_perf_counters(void) pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); - /* All sparc64 PMUs currently have 2 counters. But this simple - * driver only supports one active counter at a time. + /* All sparc64 PMUs currently have 2 events. But this simple + * driver only supports one active event at a time. */ - perf_max_counters = 1; + perf_max_events = 1; - register_die_notifier(&perf_counter_nmi_notifier); + register_die_notifier(&perf_event_nmi_notifier); } diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 04181577cb6..0f1658d3749 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -82,5 +82,5 @@ sys_call_table: /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv -/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open +/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 91b06b7f7ac..009825f6e73 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -83,7 +83,7 @@ sys_call_table32: /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv - .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open + .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open #endif /* CONFIG_COMPAT */ @@ -158,4 +158,4 @@ sys_call_table: /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv - .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open + .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 54114ad0bde..dc7c3b17a15 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -472,7 +472,7 @@ void __init mem_init(void) reservedpages++; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT - 10), codepages << (PAGE_SHIFT-10), reservedpages << (PAGE_SHIFT - 10), diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index f114813ae25..a74245ae3a8 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -533,7 +533,7 @@ static int eth_parse(char *str, int *index_out, char **str_out, char **error_out) { char *end; - int n, err = -EINVAL;; + int n, err = -EINVAL; n = simple_strtoul(str, &end, 0); if (end == str) { diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 8f05d4d9da1..635d16d90a8 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -106,7 +106,7 @@ static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo); #define MAX_DEV (16) -static struct block_device_operations ubd_blops = { +static const struct block_device_operations ubd_blops = { .owner = THIS_MODULE, .open = ubd_open, .release = ubd_release, diff --git a/arch/um/include/asm/hardirq.h b/arch/um/include/asm/hardirq.h index 313ebb8a256..fb3c05a0cbb 100644 --- a/arch/um/include/asm/hardirq.h +++ b/arch/um/include/asm/hardirq.h @@ -1,25 +1 @@ -/* (c) 2004 cw@f00f.org, GPLv2 blah blah */ - -#ifndef __ASM_UM_HARDIRQ_H -#define __ASM_UM_HARDIRQ_H - -#include <linux/threads.h> -#include <linux/irq.h> - -/* NOTE: When SMP works again we might want to make this - * ____cacheline_aligned or maybe use per_cpu state? --cw */ -typedef struct { - unsigned int __softirq_pending; -} irq_cpustat_t; - -#include <linux/irq_cpustat.h> - -/* As this would be very strange for UML to get we BUG() after the - * printk. */ -static inline void ack_bad_irq(unsigned int irq) -{ - printk(KERN_ERR "unexpected IRQ %02x\n", irq); - BUG(); -} - -#endif /* __ASM_UM_HARDIRQ_H */ +#include <asm-generic/hardirq.h> diff --git a/arch/um/include/shared/ptrace_user.h b/arch/um/include/shared/ptrace_user.h index 4bce6e01288..7fd8539bc19 100644 --- a/arch/um/include/shared/ptrace_user.h +++ b/arch/um/include/shared/ptrace_user.h @@ -29,7 +29,7 @@ extern int ptrace_setregs(long pid, unsigned long *regs_in); * recompilation. So, we use PTRACE_OLDSETOPTIONS in UML. * We also want to be able to build the kernel on 2.4, which doesn't * have PTRACE_OLDSETOPTIONS. So, if it is missing, we declare - * PTRACE_OLDSETOPTIONS to to be the same as PTRACE_SETOPTIONS. + * PTRACE_OLDSETOPTIONS to be the same as PTRACE_SETOPTIONS. * * On architectures, that start to support PTRACE_O_TRACESYSGOOD on * linux 2.6, PTRACE_OLDSETOPTIONS never is defined, and also isn't diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 61d7e6138ff..a5d5e70cf6f 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -77,7 +77,7 @@ void __init mem_init(void) num_physpages = totalram_pages; max_pfn = totalram_pages; printk(KERN_INFO "Memory: %luk available\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10)); + nr_free_pages() << (PAGE_SHIFT-10)); kmalloc_ok = 1; #ifdef CONFIG_HIGHMEM diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 0cd9a7a05e7..8bfd1e90581 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -38,10 +38,10 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, *pte = pte_mkread(*pte); return 0; - out_pmd: - pud_free(mm, pud); out_pte: pmd_free(mm, pmd); + out_pmd: + pud_free(mm, pud); out: return -ENOMEM; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 51c59015b28..7c7a54bed4a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -24,7 +24,7 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE - select HAVE_PERF_COUNTERS if (!M386 && !M486) + select HAVE_PERF_EVENTS if (!M386 && !M486) select HAVE_IOREMAP_PROT select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB @@ -1204,6 +1204,10 @@ config ARCH_DISCONTIGMEM_DEFAULT def_bool y depends on NUMA && X86_32 +config ARCH_PROC_KCORE_TEXT + def_bool y + depends on X86_64 && PROC_KCORE + config ARCH_SPARSEMEM_DEFAULT def_bool y depends on X86_64 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index ba331bfd111..74619c4f9fd 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -831,5 +831,5 @@ ia32_sys_call_table: .quad compat_sys_preadv .quad compat_sys_pwritev .quad compat_sys_rt_tgsigqueueinfo /* 335 */ - .quad sys_perf_counter_open + .quad sys_perf_event_open ia32_syscall_end: diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index c6d21b18806..474d80d3e6c 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -66,6 +66,19 @@ static inline void default_inquire_remote_apic(int apicid) } /* + * With 82489DX we can't rely on apic feature bit + * retrieved via cpuid but still have to deal with + * such an apic chip so we assume that SMP configuration + * is found from MP table (64bit case uses ACPI mostly + * which set smp presence flag as well so we are safe + * to use this helper too). + */ +static inline bool apic_from_smp_config(void) +{ + return smp_found_config && !disable_apic; +} + +/* * Basic functions accessing APICs. */ #ifdef CONFIG_PARAVIRT diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 5e3f2044f0d..f5693c81a1d 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) #endif diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_event.h index e7b7c938ae2..ad7ce3fd506 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_event.h @@ -1,8 +1,8 @@ -#ifndef _ASM_X86_PERF_COUNTER_H -#define _ASM_X86_PERF_COUNTER_H +#ifndef _ASM_X86_PERF_EVENT_H +#define _ASM_X86_PERF_EVENT_H /* - * Performance counter hw details: + * Performance event hw details: */ #define X86_PMC_MAX_GENERIC 8 @@ -43,7 +43,7 @@ union cpuid10_eax { struct { unsigned int version_id:8; - unsigned int num_counters:8; + unsigned int num_events:8; unsigned int bit_width:8; unsigned int mask_length:8; } split; @@ -52,7 +52,7 @@ union cpuid10_eax { union cpuid10_edx { struct { - unsigned int num_counters_fixed:4; + unsigned int num_events_fixed:4; unsigned int reserved:28; } split; unsigned int full; @@ -60,7 +60,7 @@ union cpuid10_edx { /* - * Fixed-purpose performance counters: + * Fixed-purpose performance events: */ /* @@ -87,22 +87,22 @@ union cpuid10_edx { /* * We model BTS tracing as another fixed-mode PMC. * - * We choose a value in the middle of the fixed counter range, since lower - * values are used by actual fixed counters and higher values are used + * We choose a value in the middle of the fixed event range, since lower + * values are used by actual fixed events and higher values are used * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. */ #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) -#ifdef CONFIG_PERF_COUNTERS -extern void init_hw_perf_counters(void); -extern void perf_counters_lapic_init(void); +#ifdef CONFIG_PERF_EVENTS +extern void init_hw_perf_events(void); +extern void perf_events_lapic_init(void); -#define PERF_COUNTER_INDEX_OFFSET 0 +#define PERF_EVENT_INDEX_OFFSET 0 #else -static inline void init_hw_perf_counters(void) { } -static inline void perf_counters_lapic_init(void) { } +static inline void init_hw_perf_events(void) { } +static inline void perf_events_lapic_init(void) { } #endif -#endif /* _ASM_X86_PERF_COUNTER_H */ +#endif /* _ASM_X86_PERF_EVENT_H */ diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index c86f452256d..ae907e61718 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -65,7 +65,6 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, case 4: *(int *)to = *(int *)from; return to; - case 3: *(short *)to = *(short *)from; *((char *)to + 2) = *((char *)from + 2); diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index d82f39bb790..8d33bc5462d 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -1,7 +1,7 @@ /* * Access to user system call parameters and results * - * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions @@ -16,13 +16,13 @@ #include <linux/sched.h> #include <linux/err.h> -static inline long syscall_get_nr(struct task_struct *task, - struct pt_regs *regs) +/* + * Only the low 32 bits of orig_ax are meaningful, so we return int. + * This importantly ignores the high bits on 64-bit, so comparisons + * sign-extend the low 32 bits. + */ +static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - /* - * We always sign-extend a -1 value being set here, - * so this is always either -1L or a syscall number. - */ return regs->orig_ax; } diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 5e06259e90e..632fb44b4cb 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -33,7 +33,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero * Copy data from kernel space to user space. Caller must check * the specified block with access_ok() before calling this function. * The caller should also make sure he pins the user space address - * so that the we don't result in page fault and sleep. + * so that we don't result in page fault and sleep. * * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault * we return the initial request size (1, 2 or 4), as copy_*_user should do. diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 8deaada61bc..6fb3c209a7e 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h @@ -341,7 +341,7 @@ #define __NR_preadv 333 #define __NR_pwritev 334 #define __NR_rt_tgsigqueueinfo 335 -#define __NR_perf_counter_open 336 +#define __NR_perf_event_open 336 #ifdef __KERNEL__ diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index b9f3c60de5f..8d3ad0adbc6 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h @@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv) __SYSCALL(__NR_pwritev, sys_pwritev) #define __NR_rt_tgsigqueueinfo 297 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) -#define __NR_perf_counter_open 298 -__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) +#define __NR_perf_event_open 298 +__SYSCALL(__NR_perf_event_open, sys_perf_event_open) #ifndef __NO_STUBS #define __ARCH_WANT_OLD_READDIR diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 77a68505419..04eb6c958b9 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -15,6 +15,7 @@ #include <linux/numa.h> #include <linux/percpu.h> #include <linux/timer.h> +#include <linux/io.h> #include <asm/types.h> #include <asm/percpu.h> #include <asm/uv/uv_mmrs.h> @@ -258,13 +259,13 @@ static inline unsigned long *uv_global_mmr32_address(int pnode, static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val) { - *uv_global_mmr32_address(pnode, offset) = val; + writeq(val, uv_global_mmr32_address(pnode, offset)); } static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset) { - return *uv_global_mmr32_address(pnode, offset); + return readq(uv_global_mmr32_address(pnode, offset)); } /* @@ -281,13 +282,13 @@ static inline unsigned long *uv_global_mmr64_address(int pnode, static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val) { - *uv_global_mmr64_address(pnode, offset) = val; + writeq(val, uv_global_mmr64_address(pnode, offset)); } static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset) { - return *uv_global_mmr64_address(pnode, offset); + return readq(uv_global_mmr64_address(pnode, offset)); } /* @@ -301,22 +302,22 @@ static inline unsigned long *uv_local_mmr_address(unsigned long offset) static inline unsigned long uv_read_local_mmr(unsigned long offset) { - return *uv_local_mmr_address(offset); + return readq(uv_local_mmr_address(offset)); } static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) { - *uv_local_mmr_address(offset) = val; + writeq(val, uv_local_mmr_address(offset)); } static inline unsigned char uv_read_local_mmr8(unsigned long offset) { - return *((unsigned char *)uv_local_mmr_address(offset)); + return readb(uv_local_mmr_address(offset)); } static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) { - *((unsigned char *)uv_local_mmr_address(offset)) = val; + writeb(val, uv_local_mmr_address(offset)); } /* @@ -422,7 +423,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) unsigned long val; val = (1UL << UVH_IPI_INT_SEND_SHFT) | - ((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) | + ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | (vector << UVH_IPI_INT_VECTOR_SHFT); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a34601f5298..894aa97f071 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -14,7 +14,7 @@ * Mikael Pettersson : PM converted to driver model. */ -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/kernel_stat.h> #include <linux/mc146818rtc.h> #include <linux/acpi_pmtmr.h> @@ -35,7 +35,7 @@ #include <linux/smp.h> #include <linux/mm.h> -#include <asm/perf_counter.h> +#include <asm/perf_event.h> #include <asm/x86_init.h> #include <asm/pgalloc.h> #include <asm/atomic.h> @@ -62,7 +62,7 @@ unsigned int boot_cpu_physical_apicid = -1U; /* * The highest APIC ID seen during enumeration. * - * This determines the messaging protocol we can use: if all APIC IDs + * On AMD, this determines the messaging protocol we can use: if all APIC IDs * are in the 0 ... 7 range, then we can use logical addressing which * has some performance advantages (better broadcasting). * @@ -979,7 +979,7 @@ void lapic_shutdown(void) { unsigned long flags; - if (!cpu_has_apic) + if (!cpu_has_apic && !apic_from_smp_config()) return; local_irq_save(flags); @@ -1189,7 +1189,7 @@ void __cpuinit setup_local_APIC(void) apic_write(APIC_ESR, 0); } #endif - perf_counters_lapic_init(); + perf_events_lapic_init(); preempt_disable(); @@ -1197,8 +1197,7 @@ void __cpuinit setup_local_APIC(void) * Double-check whether this APIC is really registered. * This is meaningless in clustered apic mode, so we skip it. */ - if (!apic->apic_id_registered()) - BUG(); + BUG_ON(!apic->apic_id_registered()); /* * Intel recommends to set DFR, LDR and TPR before enabling @@ -1917,24 +1916,14 @@ void __cpuinit generic_processor_info(int apicid, int version) max_physical_apicid = apicid; #ifdef CONFIG_X86_32 - /* - * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y - * but we need to work other dependencies like SMP_SUSPEND etc - * before this can be done without some confusion. - * if (CPU_HOTPLUG_ENABLED || num_processors > 8) - * - Ashok Raj <ashok.raj@intel.com> - */ - if (max_physical_apicid >= 8) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - if (!APIC_XAPIC(version)) { - def_to_bigsmp = 0; - break; - } - /* If P4 and above fall through */ - case X86_VENDOR_AMD: + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + if (num_processors > 8) + def_to_bigsmp = 1; + break; + case X86_VENDOR_AMD: + if (max_physical_apicid >= 8) def_to_bigsmp = 1; - } } #endif diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 809e1cf86d6..64970b9885f 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1874,7 +1874,7 @@ __apicdebuginit(int) print_all_ICs(void) print_PIC(); /* don't print out if apic is not there */ - if (!cpu_has_apic || disable_apic) + if (!cpu_has_apic && !apic_from_smp_config()) return 0; print_all_local_APICs(); @@ -1999,7 +1999,7 @@ void disable_IO_APIC(void) /* * Use virtual wire A mode when interrupt remapping is enabled. */ - if (cpu_has_apic) + if (cpu_has_apic || apic_from_smp_config()) disconnect_bsp_APIC(!intr_remapping_enabled && ioapic_i8259.pin != -1); } diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 65edc180fc8..c4cbd3080c1 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -64,16 +64,23 @@ void __init default_setup_apic_routing(void) apic = &apic_x2apic_phys; else apic = &apic_x2apic_cluster; - printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); } #endif if (apic == &apic_flat) { - if (max_physical_apicid >= 8) - apic = &apic_physflat; - printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + if (num_processors > 8) + apic = &apic_physflat; + break; + case X86_VENDOR_AMD: + if (max_physical_apicid >= 8) + apic = &apic_physflat; + } } + printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); + if (is_vsmp_box()) { /* need to update phys_pkg_id */ apic->phys_pkg_id = apicid_phys_pkg_id; diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 601159374e8..f5f5886a6b5 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -389,6 +389,16 @@ static __init void map_gru_high(int max_pnode) map_high("GRU", gru.s.base, shift, max_pnode, map_wb); } +static __init void map_mmr_high(int max_pnode) +{ + union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; + int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; + + mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); + if (mmr.s.enable) + map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); +} + static __init void map_mmioh_high(int max_pnode) { union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; @@ -643,6 +653,7 @@ void __init uv_system_init(void) } map_gru_high(max_pnode); + map_mmr_high(max_pnode); map_mmioh_high(max_pnode); uv_cpu_init(); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 8dd30638fe4..68537e957a9 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o -obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_MTRR) += mtrr/ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f32fa71ccf9..c910a716a71 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -184,7 +184,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) * approved Athlon */ WARN_ONCE(1, "WARNING: This combination of AMD" - "processors is not suitable for SMP.\n"); + " processors is not suitable for SMP.\n"); if (!test_taint(TAINT_UNSAFE_SMP)) add_taint(TAINT_UNSAFE_SMP); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2fea97eccf7..cc25c2b4a56 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -13,7 +13,7 @@ #include <linux/io.h> #include <asm/stackprotector.h> -#include <asm/perf_counter.h> +#include <asm/perf_event.h> #include <asm/mmu_context.h> #include <asm/hypervisor.h> #include <asm/processor.h> @@ -869,7 +869,7 @@ void __init identify_boot_cpu(void) #else vgetcpu_set_mode(); #endif - init_hw_perf_counters(); + init_hw_perf_events(); } void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 8cd5224943b..83a3d1f4efc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -489,8 +489,9 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) int i, err = 0; struct threshold_bank *b = NULL; char name[32]; +#ifdef CONFIG_SMP struct cpuinfo_x86 *c = &cpu_data(cpu); - +#endif sprintf(name, "threshold_bank%i", bank); diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 08b6ea4c62b..f04e7252760 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -126,8 +126,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) return -EINVAL; base = simple_strtoull(line + 5, &ptr, 0); - for (; isspace(*ptr); ++ptr) - ; + while (isspace(*ptr)) + ptr++; if (strncmp(ptr, "size=", 5)) return -EINVAL; @@ -135,14 +135,14 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) size = simple_strtoull(ptr + 5, &ptr, 0); if ((base & 0xfff) || (size & 0xfff)) return -EINVAL; - for (; isspace(*ptr); ++ptr) - ; + while (isspace(*ptr)) + ptr++; if (strncmp(ptr, "type=", 5)) return -EINVAL; ptr += 5; - for (; isspace(*ptr); ++ptr) - ; + while (isspace(*ptr)) + ptr++; for (i = 0; i < MTRR_NUM_TYPES; ++i) { if (strcmp(ptr, mtrr_strings[i])) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_event.c index dbdf712fae9..a3c7adb06b7 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1,5 +1,5 @@ /* - * Performance counter x86 architecture code + * Performance events x86 architecture code * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar @@ -11,7 +11,7 @@ * For licencing details see kernel-base/COPYING */ -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/capability.h> #include <linux/notifier.h> #include <linux/hardirq.h> @@ -27,10 +27,10 @@ #include <asm/stacktrace.h> #include <asm/nmi.h> -static u64 perf_counter_mask __read_mostly; +static u64 perf_event_mask __read_mostly; -/* The maximal number of PEBS counters: */ -#define MAX_PEBS_COUNTERS 4 +/* The maximal number of PEBS events: */ +#define MAX_PEBS_EVENTS 4 /* The size of a BTS record in bytes: */ #define BTS_RECORD_SIZE 24 @@ -65,11 +65,11 @@ struct debug_store { u64 pebs_index; u64 pebs_absolute_maximum; u64 pebs_interrupt_threshold; - u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; + u64 pebs_event_reset[MAX_PEBS_EVENTS]; }; -struct cpu_hw_counters { - struct perf_counter *counters[X86_PMC_IDX_MAX]; +struct cpu_hw_events { + struct perf_event *events[X86_PMC_IDX_MAX]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long interrupts; @@ -86,17 +86,17 @@ struct x86_pmu { int (*handle_irq)(struct pt_regs *); void (*disable_all)(void); void (*enable_all)(void); - void (*enable)(struct hw_perf_counter *, int); - void (*disable)(struct hw_perf_counter *, int); + void (*enable)(struct hw_perf_event *, int); + void (*disable)(struct hw_perf_event *, int); unsigned eventsel; unsigned perfctr; u64 (*event_map)(int); u64 (*raw_event)(u64); int max_events; - int num_counters; - int num_counters_fixed; - int counter_bits; - u64 counter_mask; + int num_events; + int num_events_fixed; + int event_bits; + u64 event_mask; int apic; u64 max_period; u64 intel_ctrl; @@ -106,7 +106,7 @@ struct x86_pmu { static struct x86_pmu x86_pmu __read_mostly; -static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; @@ -124,35 +124,35 @@ static const u64 p6_perfmon_event_map[] = [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, }; -static u64 p6_pmu_event_map(int event) +static u64 p6_pmu_event_map(int hw_event) { - return p6_perfmon_event_map[event]; + return p6_perfmon_event_map[hw_event]; } /* - * Counter setting that is specified not to count anything. + * Event setting that is specified not to count anything. * We use this to effectively disable a counter. * * L2_RQSTS with 0 MESI unit mask. */ -#define P6_NOP_COUNTER 0x0000002EULL +#define P6_NOP_EVENT 0x0000002EULL -static u64 p6_pmu_raw_event(u64 event) +static u64 p6_pmu_raw_event(u64 hw_event) { #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL #define P6_EVNTSEL_INV_MASK 0x00800000ULL -#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL +#define P6_EVNTSEL_REG_MASK 0xFF000000ULL #define P6_EVNTSEL_MASK \ (P6_EVNTSEL_EVENT_MASK | \ P6_EVNTSEL_UNIT_MASK | \ P6_EVNTSEL_EDGE_MASK | \ P6_EVNTSEL_INV_MASK | \ - P6_EVNTSEL_COUNTER_MASK) + P6_EVNTSEL_REG_MASK) - return event & P6_EVNTSEL_MASK; + return hw_event & P6_EVNTSEL_MASK; } @@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; -static u64 intel_pmu_event_map(int event) +static u64 intel_pmu_event_map(int hw_event) { - return intel_perfmon_event_map[event]; + return intel_perfmon_event_map[hw_event]; } /* - * Generalized hw caching related event table, filled + * Generalized hw caching related hw_event table, filled * in on a per model basis. A value of 0 means - * 'not supported', -1 means 'event makes no sense on - * this CPU', any other value means the raw event + * 'not supported', -1 means 'hw_event makes no sense on + * this CPU', any other value means the raw hw_event * ID. */ @@ -463,22 +463,22 @@ static const u64 atom_hw_cache_event_ids }, }; -static u64 intel_pmu_raw_event(u64 event) +static u64 intel_pmu_raw_event(u64 hw_event) { #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL #define CORE_EVNTSEL_INV_MASK 0x00800000ULL -#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL +#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL #define CORE_EVNTSEL_MASK \ (CORE_EVNTSEL_EVENT_MASK | \ CORE_EVNTSEL_UNIT_MASK | \ CORE_EVNTSEL_EDGE_MASK | \ CORE_EVNTSEL_INV_MASK | \ - CORE_EVNTSEL_COUNTER_MASK) + CORE_EVNTSEL_REG_MASK) - return event & CORE_EVNTSEL_MASK; + return hw_event & CORE_EVNTSEL_MASK; } static const u64 amd_hw_cache_event_ids @@ -585,39 +585,39 @@ static const u64 amd_perfmon_event_map[] = [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, }; -static u64 amd_pmu_event_map(int event) +static u64 amd_pmu_event_map(int hw_event) { - return amd_perfmon_event_map[event]; + return amd_perfmon_event_map[hw_event]; } -static u64 amd_pmu_raw_event(u64 event) +static u64 amd_pmu_raw_event(u64 hw_event) { #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL #define K7_EVNTSEL_INV_MASK 0x000800000ULL -#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL +#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL #define K7_EVNTSEL_MASK \ (K7_EVNTSEL_EVENT_MASK | \ K7_EVNTSEL_UNIT_MASK | \ K7_EVNTSEL_EDGE_MASK | \ K7_EVNTSEL_INV_MASK | \ - K7_EVNTSEL_COUNTER_MASK) + K7_EVNTSEL_REG_MASK) - return event & K7_EVNTSEL_MASK; + return hw_event & K7_EVNTSEL_MASK; } /* - * Propagate counter elapsed time into the generic counter. - * Can only be executed on the CPU where the counter is active. + * Propagate event elapsed time into the generic event. + * Can only be executed on the CPU where the event is active. * Returns the delta events processed. */ static u64 -x86_perf_counter_update(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) +x86_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) { - int shift = 64 - x86_pmu.counter_bits; + int shift = 64 - x86_pmu.event_bits; u64 prev_raw_count, new_raw_count; s64 delta; @@ -625,15 +625,15 @@ x86_perf_counter_update(struct perf_counter *counter, return 0; /* - * Careful: an NMI might modify the previous counter value. + * Careful: an NMI might modify the previous event value. * * Our tactic to handle this is to first atomically read and * exchange a new raw count - then add that new-prev delta - * count to the generic counter atomically: + * count to the generic event atomically: */ again: prev_raw_count = atomic64_read(&hwc->prev_count); - rdmsrl(hwc->counter_base + idx, new_raw_count); + rdmsrl(hwc->event_base + idx, new_raw_count); if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) @@ -642,7 +642,7 @@ again: /* * Now we have the new raw value and have updated the prev * timestamp already. We can now calculate the elapsed delta - * (counter-)time and add that to the generic counter. + * (event-)time and add that to the generic event. * * Careful, not all hw sign-extends above the physical width * of the count. @@ -650,13 +650,13 @@ again: delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; - atomic64_add(delta, &counter->count); + atomic64_add(delta, &event->count); atomic64_sub(delta, &hwc->period_left); return new_raw_count; } -static atomic_t active_counters; +static atomic_t active_events; static DEFINE_MUTEX(pmc_reserve_mutex); static bool reserve_pmc_hardware(void) @@ -667,12 +667,12 @@ static bool reserve_pmc_hardware(void) if (nmi_watchdog == NMI_LOCAL_APIC) disable_lapic_nmi_watchdog(); - for (i = 0; i < x86_pmu.num_counters; i++) { + for (i = 0; i < x86_pmu.num_events; i++) { if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) goto perfctr_fail; } - for (i = 0; i < x86_pmu.num_counters; i++) { + for (i = 0; i < x86_pmu.num_events; i++) { if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) goto eventsel_fail; } @@ -685,7 +685,7 @@ eventsel_fail: for (i--; i >= 0; i--) release_evntsel_nmi(x86_pmu.eventsel + i); - i = x86_pmu.num_counters; + i = x86_pmu.num_events; perfctr_fail: for (i--; i >= 0; i--) @@ -703,7 +703,7 @@ static void release_pmc_hardware(void) #ifdef CONFIG_X86_LOCAL_APIC int i; - for (i = 0; i < x86_pmu.num_counters; i++) { + for (i = 0; i < x86_pmu.num_events; i++) { release_perfctr_nmi(x86_pmu.perfctr + i); release_evntsel_nmi(x86_pmu.eventsel + i); } @@ -720,7 +720,7 @@ static inline bool bts_available(void) static inline void init_debug_store_on_cpu(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds) return; @@ -732,7 +732,7 @@ static inline void init_debug_store_on_cpu(int cpu) static inline void fini_debug_store_on_cpu(int cpu) { - if (!per_cpu(cpu_hw_counters, cpu).ds) + if (!per_cpu(cpu_hw_events, cpu).ds) return; wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); @@ -751,12 +751,12 @@ static void release_bts_hardware(void) fini_debug_store_on_cpu(cpu); for_each_possible_cpu(cpu) { - struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds) continue; - per_cpu(cpu_hw_counters, cpu).ds = NULL; + per_cpu(cpu_hw_events, cpu).ds = NULL; kfree((void *)(unsigned long)ds->bts_buffer_base); kfree(ds); @@ -796,7 +796,7 @@ static int reserve_bts_hardware(void) ds->bts_interrupt_threshold = ds->bts_absolute_maximum - BTS_OVFL_TH; - per_cpu(cpu_hw_counters, cpu).ds = ds; + per_cpu(cpu_hw_events, cpu).ds = ds; err = 0; } @@ -812,9 +812,9 @@ static int reserve_bts_hardware(void) return err; } -static void hw_perf_counter_destroy(struct perf_counter *counter) +static void hw_perf_event_destroy(struct perf_event *event) { - if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { + if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { release_pmc_hardware(); release_bts_hardware(); mutex_unlock(&pmc_reserve_mutex); @@ -827,7 +827,7 @@ static inline int x86_pmu_initialized(void) } static inline int -set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) +set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) { unsigned int cache_type, cache_op, cache_result; u64 config, val; @@ -880,7 +880,7 @@ static void intel_pmu_enable_bts(u64 config) static void intel_pmu_disable_bts(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long debugctlmsr; if (!cpuc->ds) @@ -898,10 +898,10 @@ static void intel_pmu_disable_bts(void) /* * Setup the hardware configuration for a given attr_type */ -static int __hw_perf_counter_init(struct perf_counter *counter) +static int __hw_perf_event_init(struct perf_event *event) { - struct perf_counter_attr *attr = &counter->attr; - struct hw_perf_counter *hwc = &counter->hw; + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; u64 config; int err; @@ -909,21 +909,23 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return -ENODEV; err = 0; - if (!atomic_inc_not_zero(&active_counters)) { + if (!atomic_inc_not_zero(&active_events)) { mutex_lock(&pmc_reserve_mutex); - if (atomic_read(&active_counters) == 0) { + if (atomic_read(&active_events) == 0) { if (!reserve_pmc_hardware()) err = -EBUSY; else err = reserve_bts_hardware(); } if (!err) - atomic_inc(&active_counters); + atomic_inc(&active_events); mutex_unlock(&pmc_reserve_mutex); } if (err) return err; + event->destroy = hw_perf_event_destroy; + /* * Generate PMC IRQs: * (keep 'enabled' bit clear for now) @@ -946,17 +948,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) /* * If we have a PMU initialized but no APIC * interrupts, we cannot sample hardware - * counters (user-space has to fall back and - * sample via a hrtimer based software counter): + * events (user-space has to fall back and + * sample via a hrtimer based software event): */ if (!x86_pmu.apic) return -EOPNOTSUPP; } - counter->destroy = hw_perf_counter_destroy; - /* - * Raw event type provide the config in the event structure + * Raw hw_event type provide the config in the hw_event structure */ if (attr->type == PERF_TYPE_RAW) { hwc->config |= x86_pmu.raw_event(attr->config); @@ -1001,7 +1001,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) static void p6_pmu_disable_all(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 val; if (!cpuc->enabled) @@ -1018,7 +1018,7 @@ static void p6_pmu_disable_all(void) static void intel_pmu_disable_all(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!cpuc->enabled) return; @@ -1034,7 +1034,7 @@ static void intel_pmu_disable_all(void) static void amd_pmu_disable_all(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; if (!cpuc->enabled) @@ -1043,12 +1043,12 @@ static void amd_pmu_disable_all(void) cpuc->enabled = 0; /* * ensure we write the disable before we start disabling the - * counters proper, so that amd_pmu_enable_counter() does the + * events proper, so that amd_pmu_enable_event() does the * right thing. */ barrier(); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for (idx = 0; idx < x86_pmu.num_events; idx++) { u64 val; if (!test_bit(idx, cpuc->active_mask)) @@ -1070,7 +1070,7 @@ void hw_perf_disable(void) static void p6_pmu_enable_all(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long val; if (cpuc->enabled) @@ -1087,7 +1087,7 @@ static void p6_pmu_enable_all(void) static void intel_pmu_enable_all(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->enabled) return; @@ -1098,19 +1098,19 @@ static void intel_pmu_enable_all(void) wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { - struct perf_counter *counter = - cpuc->counters[X86_PMC_IDX_FIXED_BTS]; + struct perf_event *event = + cpuc->events[X86_PMC_IDX_FIXED_BTS]; - if (WARN_ON_ONCE(!counter)) + if (WARN_ON_ONCE(!event)) return; - intel_pmu_enable_bts(counter->hw.config); + intel_pmu_enable_bts(event->hw.config); } } static void amd_pmu_enable_all(void) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; if (cpuc->enabled) @@ -1119,14 +1119,14 @@ static void amd_pmu_enable_all(void) cpuc->enabled = 1; barrier(); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { - struct perf_counter *counter = cpuc->counters[idx]; + for (idx = 0; idx < x86_pmu.num_events; idx++) { + struct perf_event *event = cpuc->events[idx]; u64 val; if (!test_bit(idx, cpuc->active_mask)) continue; - val = counter->hw.config; + val = event->hw.config; val |= ARCH_PERFMON_EVENTSEL0_ENABLE; wrmsrl(MSR_K7_EVNTSEL0 + idx, val); } @@ -1153,19 +1153,19 @@ static inline void intel_pmu_ack_status(u64 ack) wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } -static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) +static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) { (void)checking_wrmsrl(hwc->config_base + idx, hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); } -static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) +static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) { (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); } static inline void -intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) +intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) { int idx = __idx - X86_PMC_IDX_FIXED; u64 ctrl_val, mask; @@ -1178,10 +1178,10 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) } static inline void -p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) +p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - u64 val = P6_NOP_COUNTER; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + u64 val = P6_NOP_EVENT; if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL0_ENABLE; @@ -1190,7 +1190,7 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) } static inline void -intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) +intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) { if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { intel_pmu_disable_bts(); @@ -1202,24 +1202,24 @@ intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) return; } - x86_pmu_disable_counter(hwc, idx); + x86_pmu_disable_event(hwc, idx); } static inline void -amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) +amd_pmu_disable_event(struct hw_perf_event *hwc, int idx) { - x86_pmu_disable_counter(hwc, idx); + x86_pmu_disable_event(hwc, idx); } static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); /* * Set the next IRQ period, based on the hwc->period_left value. - * To be called with the counter disabled in hw: + * To be called with the event disabled in hw: */ static int -x86_perf_counter_set_period(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) +x86_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; @@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, ret = 1; } /* - * Quirk: certain CPUs dont like it if just 1 event is left: + * Quirk: certain CPUs dont like it if just 1 hw_event is left: */ if (unlikely(left < 2)) left = 2; @@ -1256,21 +1256,21 @@ x86_perf_counter_set_period(struct perf_counter *counter, per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; /* - * The hw counter starts counting from this counter offset, + * The hw event starts counting from this event offset, * mark it to be able to extra future deltas: */ atomic64_set(&hwc->prev_count, (u64)-left); - err = checking_wrmsrl(hwc->counter_base + idx, - (u64)(-left) & x86_pmu.counter_mask); + err = checking_wrmsrl(hwc->event_base + idx, + (u64)(-left) & x86_pmu.event_mask); - perf_counter_update_userpage(counter); + perf_event_update_userpage(event); return ret; } static inline void -intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) +intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) { int idx = __idx - X86_PMC_IDX_FIXED; u64 ctrl_val, bits, mask; @@ -1295,9 +1295,9 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) err = checking_wrmsrl(hwc->config_base, ctrl_val); } -static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) +static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 val; val = hwc->config; @@ -1308,10 +1308,10 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) } -static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) +static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) { if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { - if (!__get_cpu_var(cpu_hw_counters).enabled) + if (!__get_cpu_var(cpu_hw_events).enabled) return; intel_pmu_enable_bts(hwc->config); @@ -1323,134 +1323,134 @@ static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) return; } - x86_pmu_enable_counter(hwc, idx); + x86_pmu_enable_event(hwc, idx); } -static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) +static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->enabled) - x86_pmu_enable_counter(hwc, idx); + x86_pmu_enable_event(hwc, idx); } static int -fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) +fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) { - unsigned int event; + unsigned int hw_event; - event = hwc->config & ARCH_PERFMON_EVENT_MASK; + hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK; - if (unlikely((event == + if (unlikely((hw_event == x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && (hwc->sample_period == 1))) return X86_PMC_IDX_FIXED_BTS; - if (!x86_pmu.num_counters_fixed) + if (!x86_pmu.num_events_fixed) return -1; - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) return X86_PMC_IDX_FIXED_INSTRUCTIONS; - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) return X86_PMC_IDX_FIXED_CPU_CYCLES; - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) return X86_PMC_IDX_FIXED_BUS_CYCLES; return -1; } /* - * Find a PMC slot for the freshly enabled / scheduled in counter: + * Find a PMC slot for the freshly enabled / scheduled in event: */ -static int x86_pmu_enable(struct perf_counter *counter) +static int x86_pmu_enable(struct perf_event *event) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; int idx; - idx = fixed_mode_idx(counter, hwc); + idx = fixed_mode_idx(event, hwc); if (idx == X86_PMC_IDX_FIXED_BTS) { /* BTS is already occupied. */ if (test_and_set_bit(idx, cpuc->used_mask)) return -EAGAIN; hwc->config_base = 0; - hwc->counter_base = 0; + hwc->event_base = 0; hwc->idx = idx; } else if (idx >= 0) { /* - * Try to get the fixed counter, if that is already taken - * then try to get a generic counter: + * Try to get the fixed event, if that is already taken + * then try to get a generic event: */ if (test_and_set_bit(idx, cpuc->used_mask)) goto try_generic; hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; /* - * We set it so that counter_base + idx in wrmsr/rdmsr maps to + * We set it so that event_base + idx in wrmsr/rdmsr maps to * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: */ - hwc->counter_base = + hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; hwc->idx = idx; } else { idx = hwc->idx; - /* Try to get the previous generic counter again */ + /* Try to get the previous generic event again */ if (test_and_set_bit(idx, cpuc->used_mask)) { try_generic: idx = find_first_zero_bit(cpuc->used_mask, - x86_pmu.num_counters); - if (idx == x86_pmu.num_counters) + x86_pmu.num_events); + if (idx == x86_pmu.num_events) return -EAGAIN; set_bit(idx, cpuc->used_mask); hwc->idx = idx; } hwc->config_base = x86_pmu.eventsel; - hwc->counter_base = x86_pmu.perfctr; + hwc->event_base = x86_pmu.perfctr; } - perf_counters_lapic_init(); + perf_events_lapic_init(); x86_pmu.disable(hwc, idx); - cpuc->counters[idx] = counter; + cpuc->events[idx] = event; set_bit(idx, cpuc->active_mask); - x86_perf_counter_set_period(counter, hwc, idx); + x86_perf_event_set_period(event, hwc, idx); x86_pmu.enable(hwc, idx); - perf_counter_update_userpage(counter); + perf_event_update_userpage(event); return 0; } -static void x86_pmu_unthrottle(struct perf_counter *counter) +static void x86_pmu_unthrottle(struct perf_event *event) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || - cpuc->counters[hwc->idx] != counter)) + cpuc->events[hwc->idx] != event)) return; x86_pmu.enable(hwc, hwc->idx); } -void perf_counter_print_debug(void) +void perf_event_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; - struct cpu_hw_counters *cpuc; + struct cpu_hw_events *cpuc; unsigned long flags; int cpu, idx; - if (!x86_pmu.num_counters) + if (!x86_pmu.num_events) return; local_irq_save(flags); cpu = smp_processor_id(); - cpuc = &per_cpu(cpu_hw_counters, cpu); + cpuc = &per_cpu(cpu_hw_events, cpu); if (x86_pmu.version >= 2) { rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); @@ -1466,7 +1466,7 @@ void perf_counter_print_debug(void) } pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for (idx = 0; idx < x86_pmu.num_events; idx++) { rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); rdmsrl(x86_pmu.perfctr + idx, pmc_count); @@ -1479,7 +1479,7 @@ void perf_counter_print_debug(void) pr_info("CPU#%d: gen-PMC%d left: %016llx\n", cpu, idx, prev_left); } - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { + for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", @@ -1488,7 +1488,7 @@ void perf_counter_print_debug(void) local_irq_restore(flags); } -static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) +static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) { struct debug_store *ds = cpuc->ds; struct bts_record { @@ -1496,14 +1496,14 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) u64 to; u64 flags; }; - struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; + struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; struct bts_record *at, *top; struct perf_output_handle handle; struct perf_event_header header; struct perf_sample_data data; struct pt_regs regs; - if (!counter) + if (!event) return; if (!ds) @@ -1518,7 +1518,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) ds->bts_index = ds->bts_buffer_base; - data.period = counter->hw.last_period; + data.period = event->hw.last_period; data.addr = 0; regs.ip = 0; @@ -1527,9 +1527,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) * We will overwrite the from and to address before we output * the sample. */ - perf_prepare_sample(&header, &data, counter, ®s); + perf_prepare_sample(&header, &data, event, ®s); - if (perf_output_begin(&handle, counter, + if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) return; @@ -1537,20 +1537,20 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) data.ip = at->from; data.addr = at->to; - perf_output_sample(&handle, &header, &data, counter); + perf_output_sample(&handle, &header, &data, event); } perf_output_end(&handle); /* There's new data available. */ - counter->hw.interrupts++; - counter->pending_kill = POLL_IN; + event->hw.interrupts++; + event->pending_kill = POLL_IN; } -static void x86_pmu_disable(struct perf_counter *counter) +static void x86_pmu_disable(struct perf_event *event) { - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; /* @@ -1562,63 +1562,63 @@ static void x86_pmu_disable(struct perf_counter *counter) /* * Make sure the cleared pointer becomes visible before we - * (potentially) free the counter: + * (potentially) free the event: */ barrier(); /* - * Drain the remaining delta count out of a counter + * Drain the remaining delta count out of a event * that we are disabling: */ - x86_perf_counter_update(counter, hwc, idx); + x86_perf_event_update(event, hwc, idx); /* Drain the remaining BTS records. */ if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) intel_pmu_drain_bts_buffer(cpuc); - cpuc->counters[idx] = NULL; + cpuc->events[idx] = NULL; clear_bit(idx, cpuc->used_mask); - perf_counter_update_userpage(counter); + perf_event_update_userpage(event); } /* - * Save and restart an expired counter. Called by NMI contexts, - * so it has to be careful about preempting normal counter ops: + * Save and restart an expired event. Called by NMI contexts, + * so it has to be careful about preempting normal event ops: */ -static int intel_pmu_save_and_restart(struct perf_counter *counter) +static int intel_pmu_save_and_restart(struct perf_event *event) { - struct hw_perf_counter *hwc = &counter->hw; + struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; int ret; - x86_perf_counter_update(counter, hwc, idx); - ret = x86_perf_counter_set_period(counter, hwc, idx); + x86_perf_event_update(event, hwc, idx); + ret = x86_perf_event_set_period(event, hwc, idx); - if (counter->state == PERF_COUNTER_STATE_ACTIVE) - intel_pmu_enable_counter(hwc, idx); + if (event->state == PERF_EVENT_STATE_ACTIVE) + intel_pmu_enable_event(hwc, idx); return ret; } static void intel_pmu_reset(void) { - struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; + struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; unsigned long flags; int idx; - if (!x86_pmu.num_counters) + if (!x86_pmu.num_events) return; local_irq_save(flags); printk("clearing PMU state on CPU#%d\n", smp_processor_id()); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for (idx = 0; idx < x86_pmu.num_events; idx++) { checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); } - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { + for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); } if (ds) @@ -1630,38 +1630,38 @@ static void intel_pmu_reset(void) static int p6_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; - struct cpu_hw_counters *cpuc; - struct perf_counter *counter; - struct hw_perf_counter *hwc; + struct cpu_hw_events *cpuc; + struct perf_event *event; + struct hw_perf_event *hwc; int idx, handled = 0; u64 val; data.addr = 0; - cpuc = &__get_cpu_var(cpu_hw_counters); + cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for (idx = 0; idx < x86_pmu.num_events; idx++) { if (!test_bit(idx, cpuc->active_mask)) continue; - counter = cpuc->counters[idx]; - hwc = &counter->hw; + event = cpuc->events[idx]; + hwc = &event->hw; - val = x86_perf_counter_update(counter, hwc, idx); - if (val & (1ULL << (x86_pmu.counter_bits - 1))) + val = x86_perf_event_update(event, hwc, idx); + if (val & (1ULL << (x86_pmu.event_bits - 1))) continue; /* - * counter overflow + * event overflow */ handled = 1; - data.period = counter->hw.last_period; + data.period = event->hw.last_period; - if (!x86_perf_counter_set_period(counter, hwc, idx)) + if (!x86_perf_event_set_period(event, hwc, idx)) continue; - if (perf_counter_overflow(counter, 1, &data, regs)) - p6_pmu_disable_counter(hwc, idx); + if (perf_event_overflow(event, 1, &data, regs)) + p6_pmu_disable_event(hwc, idx); } if (handled) @@ -1677,13 +1677,13 @@ static int p6_pmu_handle_irq(struct pt_regs *regs) static int intel_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; - struct cpu_hw_counters *cpuc; + struct cpu_hw_events *cpuc; int bit, loops; u64 ack, status; data.addr = 0; - cpuc = &__get_cpu_var(cpu_hw_counters); + cpuc = &__get_cpu_var(cpu_hw_events); perf_disable(); intel_pmu_drain_bts_buffer(cpuc); @@ -1696,8 +1696,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) loops = 0; again: if (++loops > 100) { - WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); - perf_counter_print_debug(); + WARN_ONCE(1, "perfevents: irq loop stuck!\n"); + perf_event_print_debug(); intel_pmu_reset(); perf_enable(); return 1; @@ -1706,19 +1706,19 @@ again: inc_irq_stat(apic_perf_irqs); ack = status; for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { - struct perf_counter *counter = cpuc->counters[bit]; + struct perf_event *event = cpuc->events[bit]; clear_bit(bit, (unsigned long *) &status); if (!test_bit(bit, cpuc->active_mask)) continue; - if (!intel_pmu_save_and_restart(counter)) + if (!intel_pmu_save_and_restart(event)) continue; - data.period = counter->hw.last_period; + data.period = event->hw.last_period; - if (perf_counter_overflow(counter, 1, &data, regs)) - intel_pmu_disable_counter(&counter->hw, bit); + if (perf_event_overflow(event, 1, &data, regs)) + intel_pmu_disable_event(&event->hw, bit); } intel_pmu_ack_status(ack); @@ -1738,38 +1738,38 @@ again: static int amd_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; - struct cpu_hw_counters *cpuc; - struct perf_counter *counter; - struct hw_perf_counter *hwc; + struct cpu_hw_events *cpuc; + struct perf_event *event; + struct hw_perf_event *hwc; int idx, handled = 0; u64 val; data.addr = 0; - cpuc = &__get_cpu_var(cpu_hw_counters); + cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for (idx = 0; idx < x86_pmu.num_events; idx++) { if (!test_bit(idx, cpuc->active_mask)) continue; - counter = cpuc->counters[idx]; - hwc = &counter->hw; + event = cpuc->events[idx]; + hwc = &event->hw; - val = x86_perf_counter_update(counter, hwc, idx); - if (val & (1ULL << (x86_pmu.counter_bits - 1))) + val = x86_perf_event_update(event, hwc, idx); + if (val & (1ULL << (x86_pmu.event_bits - 1))) continue; /* - * counter overflow + * event overflow */ handled = 1; - data.period = counter->hw.last_period; + data.period = event->hw.last_period; - if (!x86_perf_counter_set_period(counter, hwc, idx)) + if (!x86_perf_event_set_period(event, hwc, idx)) continue; - if (perf_counter_overflow(counter, 1, &data, regs)) - amd_pmu_disable_counter(hwc, idx); + if (perf_event_overflow(event, 1, &data, regs)) + amd_pmu_disable_event(hwc, idx); } if (handled) @@ -1783,18 +1783,18 @@ void smp_perf_pending_interrupt(struct pt_regs *regs) irq_enter(); ack_APIC_irq(); inc_irq_stat(apic_pending_irqs); - perf_counter_do_pending(); + perf_event_do_pending(); irq_exit(); } -void set_perf_counter_pending(void) +void set_perf_event_pending(void) { #ifdef CONFIG_X86_LOCAL_APIC apic->send_IPI_self(LOCAL_PENDING_VECTOR); #endif } -void perf_counters_lapic_init(void) +void perf_events_lapic_init(void) { #ifdef CONFIG_X86_LOCAL_APIC if (!x86_pmu.apic || !x86_pmu_initialized()) @@ -1808,13 +1808,13 @@ void perf_counters_lapic_init(void) } static int __kprobes -perf_counter_nmi_handler(struct notifier_block *self, +perf_event_nmi_handler(struct notifier_block *self, unsigned long cmd, void *__args) { struct die_args *args = __args; struct pt_regs *regs; - if (!atomic_read(&active_counters)) + if (!atomic_read(&active_events)) return NOTIFY_DONE; switch (cmd) { @@ -1833,7 +1833,7 @@ perf_counter_nmi_handler(struct notifier_block *self, #endif /* * Can't rely on the handled return value to say it was our NMI, two - * counters could trigger 'simultaneously' raising two back-to-back NMIs. + * events could trigger 'simultaneously' raising two back-to-back NMIs. * * If the first NMI handles both, the latter will be empty and daze * the CPU. @@ -1843,8 +1843,8 @@ perf_counter_nmi_handler(struct notifier_block *self, return NOTIFY_STOP; } -static __read_mostly struct notifier_block perf_counter_nmi_notifier = { - .notifier_call = perf_counter_nmi_handler, +static __read_mostly struct notifier_block perf_event_nmi_notifier = { + .notifier_call = perf_event_nmi_handler, .next = NULL, .priority = 1 }; @@ -1854,8 +1854,8 @@ static struct x86_pmu p6_pmu = { .handle_irq = p6_pmu_handle_irq, .disable_all = p6_pmu_disable_all, .enable_all = p6_pmu_enable_all, - .enable = p6_pmu_enable_counter, - .disable = p6_pmu_disable_counter, + .enable = p6_pmu_enable_event, + .disable = p6_pmu_disable_event, .eventsel = MSR_P6_EVNTSEL0, .perfctr = MSR_P6_PERFCTR0, .event_map = p6_pmu_event_map, @@ -1864,16 +1864,16 @@ static struct x86_pmu p6_pmu = { .apic = 1, .max_period = (1ULL << 31) - 1, .version = 0, - .num_counters = 2, + .num_events = 2, /* - * Counters have 40 bits implemented. However they are designed such + * Events have 40 bits implemented. However they are designed such * that bits [32-39] are sign extensions of bit 31. As such the - * effective width of a counter for P6-like PMU is 32 bits only. + * effective width of a event for P6-like PMU is 32 bits only. * * See IA-32 Intel Architecture Software developer manual Vol 3B */ - .counter_bits = 32, - .counter_mask = (1ULL << 32) - 1, + .event_bits = 32, + .event_mask = (1ULL << 32) - 1, }; static struct x86_pmu intel_pmu = { @@ -1881,8 +1881,8 @@ static struct x86_pmu intel_pmu = { .handle_irq = intel_pmu_handle_irq, .disable_all = intel_pmu_disable_all, .enable_all = intel_pmu_enable_all, - .enable = intel_pmu_enable_counter, - .disable = intel_pmu_disable_counter, + .enable = intel_pmu_enable_event, + .disable = intel_pmu_disable_event, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, .perfctr = MSR_ARCH_PERFMON_PERFCTR0, .event_map = intel_pmu_event_map, @@ -1892,7 +1892,7 @@ static struct x86_pmu intel_pmu = { /* * Intel PMCs cannot be accessed sanely above 32 bit width, * so we install an artificial 1<<31 period regardless of - * the generic counter period: + * the generic event period: */ .max_period = (1ULL << 31) - 1, .enable_bts = intel_pmu_enable_bts, @@ -1904,16 +1904,16 @@ static struct x86_pmu amd_pmu = { .handle_irq = amd_pmu_handle_irq, .disable_all = amd_pmu_disable_all, .enable_all = amd_pmu_enable_all, - .enable = amd_pmu_enable_counter, - .disable = amd_pmu_disable_counter, + .enable = amd_pmu_enable_event, + .disable = amd_pmu_disable_event, .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, .event_map = amd_pmu_event_map, .raw_event = amd_pmu_raw_event, .max_events = ARRAY_SIZE(amd_perfmon_event_map), - .num_counters = 4, - .counter_bits = 48, - .counter_mask = (1ULL << 48) - 1, + .num_events = 4, + .event_bits = 48, + .event_mask = (1ULL << 48) - 1, .apic = 1, /* use highest bit to detect overflow */ .max_period = (1ULL << 47) - 1, @@ -1970,7 +1970,7 @@ static int intel_pmu_init(void) /* * Check whether the Architectural PerfMon supports - * Branch Misses Retired Event or not. + * Branch Misses Retired hw_event or not. */ cpuid(10, &eax.full, &ebx, &unused, &edx.full); if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) @@ -1982,15 +1982,15 @@ static int intel_pmu_init(void) x86_pmu = intel_pmu; x86_pmu.version = version; - x86_pmu.num_counters = eax.split.num_counters; - x86_pmu.counter_bits = eax.split.bit_width; - x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; + x86_pmu.num_events = eax.split.num_events; + x86_pmu.event_bits = eax.split.bit_width; + x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1; /* - * Quirk: v2 perfmon does not report fixed-purpose counters, so - * assume at least 3 counters: + * Quirk: v2 perfmon does not report fixed-purpose events, so + * assume at least 3 events: */ - x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); + x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); /* * Install the hw-cache-events table: @@ -2037,11 +2037,11 @@ static int amd_pmu_init(void) return 0; } -void __init init_hw_perf_counters(void) +void __init init_hw_perf_events(void) { int err; - pr_info("Performance Counters: "); + pr_info("Performance Events: "); switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: @@ -2054,45 +2054,45 @@ void __init init_hw_perf_counters(void) return; } if (err != 0) { - pr_cont("no PMU driver, software counters only.\n"); + pr_cont("no PMU driver, software events only.\n"); return; } pr_cont("%s PMU driver.\n", x86_pmu.name); - if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { - WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", - x86_pmu.num_counters, X86_PMC_MAX_GENERIC); - x86_pmu.num_counters = X86_PMC_MAX_GENERIC; + if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { + WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", + x86_pmu.num_events, X86_PMC_MAX_GENERIC); + x86_pmu.num_events = X86_PMC_MAX_GENERIC; } - perf_counter_mask = (1 << x86_pmu.num_counters) - 1; - perf_max_counters = x86_pmu.num_counters; + perf_event_mask = (1 << x86_pmu.num_events) - 1; + perf_max_events = x86_pmu.num_events; - if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { - WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", - x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); - x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; + if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) { + WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", + x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED); + x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED; } - perf_counter_mask |= - ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; - x86_pmu.intel_ctrl = perf_counter_mask; + perf_event_mask |= + ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED; + x86_pmu.intel_ctrl = perf_event_mask; - perf_counters_lapic_init(); - register_die_notifier(&perf_counter_nmi_notifier); + perf_events_lapic_init(); + register_die_notifier(&perf_event_nmi_notifier); - pr_info("... version: %d\n", x86_pmu.version); - pr_info("... bit width: %d\n", x86_pmu.counter_bits); - pr_info("... generic counters: %d\n", x86_pmu.num_counters); - pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); - pr_info("... max period: %016Lx\n", x86_pmu.max_period); - pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); - pr_info("... counter mask: %016Lx\n", perf_counter_mask); + pr_info("... version: %d\n", x86_pmu.version); + pr_info("... bit width: %d\n", x86_pmu.event_bits); + pr_info("... generic registers: %d\n", x86_pmu.num_events); + pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); + pr_info("... max period: %016Lx\n", x86_pmu.max_period); + pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); + pr_info("... event mask: %016Lx\n", perf_event_mask); } -static inline void x86_pmu_read(struct perf_counter *counter) +static inline void x86_pmu_read(struct perf_event *event) { - x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); + x86_perf_event_update(event, &event->hw, event->hw.idx); } static const struct pmu pmu = { @@ -2102,13 +2102,16 @@ static const struct pmu pmu = { .unthrottle = x86_pmu_unthrottle, }; -const struct pmu *hw_perf_counter_init(struct perf_counter *counter) +const struct pmu *hw_perf_event_init(struct perf_event *event) { int err; - err = __hw_perf_counter_init(counter); - if (err) + err = __hw_perf_event_init(event); + if (err) { + if (event->destroy) + event->destroy(event); return ERR_PTR(err); + } return &pmu; } @@ -2289,7 +2292,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } -void hw_perf_counter_setup_online(int cpu) +void hw_perf_event_setup_online(int cpu) { init_debug_store_on_cpu(cpu); } diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 392bea43b89..fab786f60ed 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -20,7 +20,7 @@ #include <linux/kprobes.h> #include <asm/apic.h> -#include <asm/perf_counter.h> +#include <asm/perf_event.h> struct nmi_watchdog_ctlblk { unsigned int cccr_msr; diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 0a46b4df5d8..1cbed97b59c 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -58,6 +58,9 @@ static unsigned long vmware_get_tsc_khz(void) tsc_hz = eax | (((uint64_t)ebx) << 32); do_div(tsc_hz, 1000); BUG_ON(tsc_hz >> 32); + printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", + (unsigned long) tsc_hz / 1000, + (unsigned long) tsc_hz % 1000); return tsc_hz; } @@ -69,6 +72,9 @@ void __init vmware_platform_setup(void) if (ebx != UINT_MAX) x86_platform.calibrate_tsc = vmware_get_tsc_khz; + else + printk(KERN_WARNING + "Failed to get TSC freq from the hypervisor\n"); } /* diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index a3210ce1ecc..85419bb7d4a 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1331,7 +1331,7 @@ void __init e820_reserve_resources(void) struct resource *res; u64 end; - res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); + res = alloc_bootmem(sizeof(struct resource) * e820.nr_map); e820_res = res; for (i = 0; i < e820.nr_map; i++) { end = e820.map[i].addr + e820.map[i].size - 1; diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 335f049d110..b11cab3c323 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -624,7 +624,7 @@ try_next_port: return -1; } - loop = 10; + loop = 100000; /* Reset the EHCI controller */ cmd = readl(&ehci_regs->command); cmd |= CMD_RESET; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index d59fe323807..681c3fda739 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1021,7 +1021,7 @@ apicinterrupt ERROR_APIC_VECTOR \ apicinterrupt SPURIOUS_APIC_VECTOR \ spurious_interrupt smp_spurious_interrupt -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS apicinterrupt LOCAL_PENDING_VECTOR \ perf_pending_interrupt smp_perf_pending_interrupt #endif diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 300883112e3..40f30773fb2 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -208,7 +208,7 @@ static void __init apic_intr_init(void) alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); /* Performance monitoring interrupts: */ -# ifdef CONFIG_PERF_COUNTERS +# ifdef CONFIG_PERF_EVENTS alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); # endif diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 0db7969b0dd..378e9a8f1bf 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -210,8 +210,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, { ssize_t ret = -EINVAL; - if ((len >> PAGE_SHIFT) > num_physpages) { - pr_err("microcode: too much data (max %ld pages)\n", num_physpages); + if ((len >> PAGE_SHIFT) > totalram_pages) { + pr_err("microcode: too much data (max %ld pages)\n", totalram_pages); return ret; } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 8d7d5c9c1be..7b058a2dc66 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -325,16 +325,6 @@ static int putreg(struct task_struct *child, return set_flags(child, value); #ifdef CONFIG_X86_64 - /* - * Orig_ax is really just a flag with small positive and - * negative values, so make sure to always sign-extend it - * from 32 bits so that it works correctly regardless of - * whether we come from a 32-bit environment or not. - */ - case offsetof(struct user_regs_struct, orig_ax): - value = (long) (s32) value; - break; - case offsetof(struct user_regs_struct,fs_base): if (value >= TASK_SIZE_OF(child)) return -EIO; @@ -1126,10 +1116,15 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value) case offsetof(struct user32, regs.orig_eax): /* - * Sign-extend the value so that orig_eax = -1 - * causes (long)orig_ax < 0 tests to fire correctly. + * A 32-bit debugger setting orig_eax means to restore + * the state of the task restarting a 32-bit syscall. + * Make sure we interpret the -ERESTART* codes correctly + * in case the task is not actually still sitting at the + * exit from a 32-bit syscall with TS_COMPAT still set. */ - regs->orig_ax = (long) (s32) value; + regs->orig_ax = value; + if (syscall_get_nr(child, regs) >= 0) + task_thread_info(child)->status |= TS_COMPAT; break; case offsetof(struct user32, regs.eflags): diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a55f6609fe1..f327bccf508 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -697,21 +697,6 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif - strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); - *cmdline_p = command_line; - -#ifdef CONFIG_X86_64 - /* - * Must call this twice: Once just to detect whether hardware doesn't - * support NX (so that the early EHCI debug console setup can safely - * call set_fixmap(), and then again after parsing early parameters to - * honor the respective command line option. - */ - check_efer(); -#endif - - parse_early_param(); - /* VMI may relocate the fixmap; do this before touching ioremap area */ vmi_init(); @@ -794,6 +779,21 @@ void __init setup_arch(char **cmdline_p) #endif #endif + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = command_line; + +#ifdef CONFIG_X86_64 + /* + * Must call this twice: Once just to detect whether hardware doesn't + * support NX (so that the early EHCI debug console setup can safely + * call set_fixmap(), and then again after parsing early parameters to + * honor the respective command line option. + */ + check_efer(); +#endif + + parse_early_param(); + #ifdef CONFIG_X86_64 check_efer(); #endif diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index d51321ddafd..0157cd26d7c 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -335,4 +335,4 @@ ENTRY(sys_call_table) .long sys_preadv .long sys_pwritev .long sys_rt_tgsigqueueinfo /* 335 */ - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index 808031a5ba1..699f7eeb896 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -4,7 +4,7 @@ #include <asm/e820.h> /* ready for x86_64 and x86 */ -unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); +unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE); void __init reserve_trampoline_memory(void) { @@ -26,7 +26,7 @@ void __init reserve_trampoline_memory(void) * bootstrap into the page concerned. The caller * has made sure it's suitably aligned. */ -unsigned long setup_trampoline(void) +unsigned long __cpuinit setup_trampoline(void) { memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); return virt_to_phys(trampoline_base); diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index 66d874e5404..8508237e8e4 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S @@ -28,16 +28,12 @@ */ #include <linux/linkage.h> +#include <linux/init.h> #include <asm/segment.h> #include <asm/page_types.h> /* We can free up trampoline after bootup if cpu hotplug is not supported. */ -#ifndef CONFIG_HOTPLUG_CPU -.section ".cpuinit.data","aw",@progbits -#else -.section .rodata,"a",@progbits -#endif - +__CPUINITRODATA .code16 ENTRY(trampoline_data) diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index cddfb8d386b..596d54c660a 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -25,14 +25,15 @@ */ #include <linux/linkage.h> +#include <linux/init.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> #include <asm/segment.h> #include <asm/processor-flags.h> -.section .rodata, "a", @progbits - +/* We can free up the trampoline after bootup if cpu hotplug is not supported. */ +__CPUINITRODATA .code16 ENTRY(trampoline_data) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 17409e8d109..cd982f48e23 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -666,7 +666,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || (val == CPUFREQ_RESUMECHANGE)) { - *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); + *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 0ccb57d5ee3..a46acccec38 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -45,9 +45,9 @@ PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(7); /* RWE */ #ifdef CONFIG_X86_64 - user PT_LOAD FLAGS(7); /* RWE */ + user PT_LOAD FLAGS(5); /* R_E */ #ifdef CONFIG_SMP - percpu PT_LOAD FLAGS(7); /* RWE */ + percpu PT_LOAD FLAGS(6); /* RW_ */ #endif init PT_LOAD FLAGS(7); /* RWE */ #endif diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 775a020990a..82728f2c6d5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -10,7 +10,7 @@ #include <linux/bootmem.h> /* max_low_pfn */ #include <linux/kprobes.h> /* __kprobes, ... */ #include <linux/mmiotrace.h> /* kmmio_handler, ... */ -#include <linux/perf_counter.h> /* perf_swcounter_event */ +#include <linux/perf_event.h> /* perf_sw_event */ #include <asm/traps.h> /* dotraplinkage, ... */ #include <asm/pgalloc.h> /* pgd_*(), ... */ @@ -1017,7 +1017,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -1114,11 +1114,11 @@ good_area: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 3cd7711bb94..30938c1d8d5 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -84,7 +84,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) #ifdef CONFIG_X86_PAE if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { if (after_bootmem) - pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); + pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); else pmd_table = (pmd_t *)alloc_low_page(); paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); @@ -116,7 +116,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) #endif if (!page_table) page_table = - (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); + (pte_t *)alloc_bootmem_pages(PAGE_SIZE); } else page_table = (pte_t *)alloc_low_page(); @@ -857,8 +857,6 @@ static void __init test_wp_bit(void) } } -static struct kcore_list kcore_mem, kcore_vmalloc; - void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; @@ -886,13 +884,9 @@ void __init mem_init(void) datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); - kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init, %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ea56b8cbb6a..5a4398a6006 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -647,8 +647,7 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif /* CONFIG_MEMORY_HOTPLUG */ -static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, - kcore_modules, kcore_vsyscall; +static struct kcore_list kcore_vsyscall; void __init mem_init(void) { @@ -677,17 +676,12 @@ void __init mem_init(void) initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); - kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); - kclist_add(&kcore_kernel, &_stext, _end - _stext); - kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, - VSYSCALL_END - VSYSCALL_START); + VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), max_pfn << (PAGE_SHIFT-10), codesize >> 10, absent_pages << (PAGE_SHIFT-10), diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 528bf954eb7..8cc18334414 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -225,9 +225,6 @@ void kmemcheck_hide(struct pt_regs *regs) BUG_ON(!irqs_disabled()); - if (data->balance == 0) - return; - if (unlikely(data->balance != 1)) { kmemcheck_show_all(); kmemcheck_error_save_bug(regs); diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 4899215999d..8eb05878554 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -234,11 +234,11 @@ static void arch_perfmon_setup_counters(void) if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && current_cpu_data.x86_model == 15) { eax.split.version_id = 2; - eax.split.num_counters = 2; + eax.split.num_events = 2; eax.split.bit_width = 40; } - num_counters = eax.split.num_counters; + num_counters = eax.split.num_events; op_arch_perfmon_spec.num_counters = num_counters; op_arch_perfmon_spec.num_controls = num_counters; diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index b83776180c7..7b8e75d1608 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -13,7 +13,7 @@ #define OP_X86_MODEL_H #include <asm/types.h> -#include <asm/perf_counter.h> +#include <asm/perf_event.h> struct op_msr { unsigned long addr; diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 417c9f5b4af..8aa85f17667 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -243,10 +243,6 @@ static void __restore_processor_state(struct saved_context *ctxt) do_fpu_end(); mtrr_bp_restore(); - -#ifdef CONFIG_X86_OLD_MCE - mcheck_init(&boot_cpu_data); -#endif } /* Needed by apm.c */ diff --git a/arch/xtensa/include/asm/mman.h b/arch/xtensa/include/asm/mman.h index 9b92620c8a1..fca4db425f6 100644 --- a/arch/xtensa/include/asm/mman.h +++ b/arch/xtensa/include/asm/mman.h @@ -53,6 +53,8 @@ #define MAP_LOCKED 0x8000 /* pages are locked */ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ +#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x80000 /* create a huge page mapping */ /* * Flags for msync @@ -78,6 +80,9 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 427e14fa43c..cdbc27ca966 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -203,7 +203,7 @@ void __init mem_init(void) printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " "%ldk data, %ldk init %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), ram << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), |