diff options
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/cmpxchg_32.h | 32 | ||||
-rw-r--r-- | include/asm-x86/e820_32.h | 2 | ||||
-rw-r--r-- | include/asm-x86/e820_64.h | 2 | ||||
-rw-r--r-- | include/asm-x86/io_32.h | 6 | ||||
-rw-r--r-- | include/asm-x86/io_64.h | 6 | ||||
-rw-r--r-- | include/asm-x86/irqflags.h | 29 | ||||
-rw-r--r-- | include/asm-x86/lguest_hcall.h | 2 | ||||
-rw-r--r-- | include/asm-x86/linkage.h | 35 | ||||
-rw-r--r-- | include/asm-x86/mach-rdc321x/gpio.h | 9 | ||||
-rw-r--r-- | include/asm-x86/mach-rdc321x/rdc321x_defs.h | 8 | ||||
-rw-r--r-- | include/asm-x86/nops.h | 20 | ||||
-rw-r--r-- | include/asm-x86/page.h | 4 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 2 | ||||
-rw-r--r-- | include/asm-x86/sync_bitops.h | 9 |
14 files changed, 121 insertions, 45 deletions
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index cea1dae288a..959fad00dff 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h @@ -269,22 +269,26 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ - __ret = __cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ + __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ else \ - __ret = cmpxchg_386((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ + __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ + (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ __ret; \ }) #define cmpxchg_local(ptr, o, n) \ ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ - __ret = __cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ + __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ + (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ else \ - __ret = cmpxchg_386((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ + __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ + (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ __ret; \ }) #endif @@ -301,10 +305,12 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 4)) \ - __ret = __cmpxchg64((ptr), (unsigned long long)(o), \ + __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ (unsigned long long)(n)); \ else \ - __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ + __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ + (unsigned long long)(o), \ (unsigned long long)(n)); \ __ret; \ }) @@ -312,10 +318,12 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 4)) \ - __ret = __cmpxchg64_local((ptr), (unsigned long long)(o), \ + __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ + (unsigned long long)(o), \ (unsigned long long)(n)); \ else \ - __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ + __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ + (unsigned long long)(o), \ (unsigned long long)(n)); \ __ret; \ }) diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h index f1da7ebd190..e7207a6de3e 100644 --- a/include/asm-x86/e820_32.h +++ b/include/asm-x86/e820_32.h @@ -28,6 +28,8 @@ extern void find_max_pfn(void); extern void register_bootmem_low_pages(unsigned long max_low_pfn); extern void add_memory_region(unsigned long long start, unsigned long long size, int type); +extern void update_memory_range(u64 start, u64 size, unsigned old_type, + unsigned new_type); extern void e820_register_memory(void); extern void limit_regions(unsigned long long size); extern void print_memory_map(char *who); diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index a560c4f5d50..22ede73ae72 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -18,6 +18,8 @@ extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned size, unsigned long align); extern void add_memory_region(unsigned long start, unsigned long size, int type); +extern void update_memory_range(u64 start, u64 size, unsigned old_type, + unsigned new_type); extern void setup_memory_region(void); extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index 58d2c45cd0b..d4d8fbd9378 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h @@ -114,13 +114,13 @@ static inline void * phys_to_virt(unsigned long address) * If the area you are trying to map is a PCI BAR you should have a * look at pci_iomap(). */ -extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); -extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); +extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); /* * The default ioremap() behavior is non-cached: */ -static inline void __iomem *ioremap(unsigned long offset, unsigned long size) +static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) { return ioremap_nocache(offset, size); } diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index f64a59cc396..db0be2011a3 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h @@ -158,13 +158,13 @@ extern void early_iounmap(void *addr, unsigned long size); * it's useful if some control registers are in such an area and write combining * or read caching is not desirable: */ -extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); -extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); +extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); /* * The default ioremap() behavior is non-cached: */ -static inline void __iomem *ioremap(unsigned long offset, unsigned long size) +static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) { return ioremap_nocache(offset, size); } diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 92021c1ffa3..0e2292483b3 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h @@ -70,6 +70,26 @@ static inline void raw_local_irq_restore(unsigned long flags) native_restore_fl(flags); } +#ifdef CONFIG_X86_VSMP + +/* + * Interrupt control for the VSMP architecture: + */ + +static inline void raw_local_irq_disable(void) +{ + unsigned long flags = __raw_local_save_flags(); + raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); +} + +static inline void raw_local_irq_enable(void) +{ + unsigned long flags = __raw_local_save_flags(); + raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); +} + +#else + static inline void raw_local_irq_disable(void) { native_irq_disable(); @@ -80,6 +100,8 @@ static inline void raw_local_irq_enable(void) native_irq_enable(); } +#endif + /* * Used in the idle loop; sti takes one instruction cycle * to complete: @@ -137,10 +159,17 @@ static inline unsigned long __raw_local_irq_save(void) #define raw_local_irq_save(flags) \ do { (flags) = __raw_local_irq_save(); } while (0) +#ifdef CONFIG_X86_VSMP +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); +} +#else static inline int raw_irqs_disabled_flags(unsigned long flags) { return !(flags & X86_EFLAGS_IF); } +#endif static inline int raw_irqs_disabled(void) { diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index 758b9a5d453..f239e7069ca 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h @@ -27,7 +27,7 @@ #ifndef __ASSEMBLY__ #include <asm/hw_irq.h> -/*G:031 First, how does our Guest contact the Host to ask for privileged +/*G:031 But first, how does our Guest contact the Host to ask for privileged * operations? There are two ways: the direct way is to make a "hypercall", * to make requests of the Host Itself. * diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h index 31739c7d66a..c048353f4b8 100644 --- a/include/asm-x86/linkage.h +++ b/include/asm-x86/linkage.h @@ -8,12 +8,45 @@ #ifdef CONFIG_X86_32 #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) -#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) /* * For 32-bit UML - mark functions implemented in assembly that use * regparm input parameters: */ #define asmregparm __attribute__((regparm(3))) + +/* + * Make sure the compiler doesn't do anything stupid with the + * arguments on the stack - they are owned by the *caller*, not + * the callee. This just fools gcc into not spilling into them, + * and keeps it from doing tailcall recursion and/or using the + * stack slots for temporaries, since they are live and "used" + * all the way to the end of the function. + * + * NOTE! On x86-64, all the arguments are in registers, so this + * only matters on a 32-bit kernel. + */ +#define asmlinkage_protect(n, ret, args...) \ + __asmlinkage_protect##n(ret, ##args) +#define __asmlinkage_protect_n(ret, args...) \ + __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) +#define __asmlinkage_protect0(ret) \ + __asmlinkage_protect_n(ret) +#define __asmlinkage_protect1(ret, arg1) \ + __asmlinkage_protect_n(ret, "g" (arg1)) +#define __asmlinkage_protect2(ret, arg1, arg2) \ + __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2)) +#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ + __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3)) +#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ + __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ + "g" (arg4)) +#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ + __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ + "g" (arg4), "g" (arg5)) +#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ + __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ + "g" (arg4), "g" (arg5), "g" (arg6)) + #endif #ifdef CONFIG_X86_ALIGNMENT_16 diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h index db31b929b99..acce0b7d397 100644 --- a/include/asm-x86/mach-rdc321x/gpio.h +++ b/include/asm-x86/mach-rdc321x/gpio.h @@ -5,19 +5,20 @@ extern int rdc_gpio_get_value(unsigned gpio); extern void rdc_gpio_set_value(unsigned gpio, int value); extern int rdc_gpio_direction_input(unsigned gpio); extern int rdc_gpio_direction_output(unsigned gpio, int value); - +extern int rdc_gpio_request(unsigned gpio, const char *label); +extern void rdc_gpio_free(unsigned gpio); +extern void __init rdc321x_gpio_setup(void); /* Wrappers for the arch-neutral GPIO API */ static inline int gpio_request(unsigned gpio, const char *label) { - /* Not yet implemented */ - return 0; + return rdc_gpio_request(gpio, label); } static inline void gpio_free(unsigned gpio) { - /* Not yet implemented */ + rdc_gpio_free(gpio); } static inline int gpio_direction_input(unsigned gpio) diff --git a/include/asm-x86/mach-rdc321x/rdc321x_defs.h b/include/asm-x86/mach-rdc321x/rdc321x_defs.h index 838ba8f64fd..c8e9c8bed3d 100644 --- a/include/asm-x86/mach-rdc321x/rdc321x_defs.h +++ b/include/asm-x86/mach-rdc321x/rdc321x_defs.h @@ -3,4 +3,10 @@ /* General purpose configuration and data registers */ #define RDC3210_CFGREG_ADDR 0x0CF8 #define RDC3210_CFGREG_DATA 0x0CFC -#define RDC_MAX_GPIO 0x3A + +#define RDC321X_GPIO_CTRL_REG1 0x48 +#define RDC321X_GPIO_CTRL_REG2 0x84 +#define RDC321X_GPIO_DATA_REG1 0x4c +#define RDC321X_GPIO_DATA_REG2 0x88 + +#define RDC321X_MAX_GPIO 58 diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h index e3b2bce0aff..b3930ae539b 100644 --- a/include/asm-x86/nops.h +++ b/include/asm-x86/nops.h @@ -73,16 +73,7 @@ #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" -#if defined(CONFIG_MK8) -#define ASM_NOP1 K8_NOP1 -#define ASM_NOP2 K8_NOP2 -#define ASM_NOP3 K8_NOP3 -#define ASM_NOP4 K8_NOP4 -#define ASM_NOP5 K8_NOP5 -#define ASM_NOP6 K8_NOP6 -#define ASM_NOP7 K8_NOP7 -#define ASM_NOP8 K8_NOP8 -#elif defined(CONFIG_MK7) +#if defined(CONFIG_MK7) #define ASM_NOP1 K7_NOP1 #define ASM_NOP2 K7_NOP2 #define ASM_NOP3 K7_NOP3 @@ -100,6 +91,15 @@ #define ASM_NOP6 P6_NOP6 #define ASM_NOP7 P6_NOP7 #define ASM_NOP8 P6_NOP8 +#elif defined(CONFIG_X86_64) +#define ASM_NOP1 K8_NOP1 +#define ASM_NOP2 K8_NOP2 +#define ASM_NOP3 K8_NOP3 +#define ASM_NOP4 K8_NOP4 +#define ASM_NOP5 K8_NOP5 +#define ASM_NOP6 K8_NOP6 +#define ASM_NOP7 K8_NOP7 +#define ASM_NOP8 K8_NOP8 #else #define ASM_NOP1 GENERIC_NOP1 #define ASM_NOP2 GENERIC_NOP2 diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index 1cb7c51bc29..a05b2896492 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -52,13 +52,13 @@ extern int page_is_ram(unsigned long pagenr); struct page; -static void inline clear_user_page(void *page, unsigned long vaddr, +static inline void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { clear_page(page); } -static void inline copy_user_page(void *to, void *from, unsigned long vaddr, +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage) { copy_page(to, from); diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 174b8773871..9cf472aeb9c 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -85,6 +85,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) @@ -101,6 +102,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) +#define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC) diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index cbce08a2d13..6b775c90566 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -23,10 +23,6 @@ * This function is atomic and may not be reordered. See __set_bit() * if you do not require the atomic guarantees. * - * Note: there are no guarantees that this function will not be reordered - * on non-x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ @@ -61,8 +57,7 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) * @nr: Bit to change * @addr: Address to start counting from * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. + * sync_change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ @@ -80,7 +75,6 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. * It also implies a memory barrier. */ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) @@ -99,7 +93,6 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. * It also implies a memory barrier. */ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) |