From 85eb69a16aab5a394ce043c2131319eae35e6493 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 21 Feb 2008 12:50:51 +0100 Subject: x86: increase the kernel text limit to 512 MB people sometimes do crazy stuff like building really large static arrays into their kernels or building allyesconfig kernels. Give more space to the kernel and push modules up a bit: kernel has 512 MB and modules have 1.5 GB. Should be enough for a few years ;-) Signed-off-by: Ingo Molnar --- include/asm-x86/page_64.h | 4 ++-- include/asm-x86/pgtable_64.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index 143546073b9..aee05c616e0 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -48,10 +48,10 @@ #define __VIRTUAL_MASK_SHIFT 48 /* - * Kernel image size is limited to 128 MB (see level2_kernel_pgt in + * Kernel image size is limited to 512 MB (see level2_kernel_pgt in * arch/x86/kernel/head_64.S), and it is mapped here: */ -#define KERNEL_IMAGE_SIZE (128*1024*1024) +#define KERNEL_IMAGE_SIZE (512*1024*1024) #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) #ifndef __ASSEMBLY__ diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 0a0b77bc736..01d2359e7a3 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -140,7 +140,7 @@ static inline void native_pgd_clear(pgd_t * pgd) #define VMALLOC_START _AC(0xffffc20000000000, UL) #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) #define VMEMMAP_START _AC(0xffffe20000000000, UL) -#define MODULES_VADDR _AC(0xffffffff88000000, UL) +#define MODULES_VADDR _AC(0xffffffffa0000000, UL) #define MODULES_END _AC(0xfffffffffff00000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) -- cgit v1.2.3 From 00d1c5e05736f947687be27706bda01cec104e57 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: add gbpages switches These new controls toggle experimental support for a new CPU feature, the straightforward extension of largepages from the pmd level to the pud level, which allows 1GB (kernel) TLBs instead of 2MB TLBs. Turn it off by default, as this code has not been tested well enough yet. Use the CONFIG_DIRECT_GBPAGES=y .config option or gbpages on the boot line can be used to enable it. If enabled in the .config then nogbpages boot option disables it. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/pgtable_64.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 01d2359e7a3..6ef09914acb 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -239,6 +239,8 @@ static inline int pud_large(pud_t pte) #define update_mmu_cache(vma,address,pte) do { } while (0) +extern int direct_gbpages; + /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) -- cgit v1.2.3 From 48c508b364324c35018284328b5b92c51d2b30e0 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: clean up find_e820_area(), 64-bit Change size to unsigned long, becase caller and user all used unsigned long. Also make bad_addr take an alignment parameter. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/e820_64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index 22ede73ae72..9e06c6eb4e2 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -15,7 +15,7 @@ #ifndef __ASSEMBLY__ extern unsigned long find_e820_area(unsigned long start, unsigned long end, - unsigned size, unsigned long align); + unsigned long size, unsigned long align); extern void add_memory_region(unsigned long start, unsigned long size, int type); extern void update_memory_range(u64 start, u64 size, unsigned old_type, -- cgit v1.2.3 From 92bc2056855b3250bf6fd5849f05f88d85839efa Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:09:56 -0800 Subject: x86: change most X86_32 pt_regs members to unsigned long Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/ptrace.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index d9e04b46a44..708337a3672 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -36,23 +36,23 @@ struct pt_regs { #else /* __KERNEL__ */ struct pt_regs { - long bx; - long cx; - long dx; - long si; - long di; - long bp; + unsigned long bx; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + unsigned long bp; long ax; - int ds; - int es; - int fs; + unsigned long ds; + unsigned long es; + unsigned long fs; /* int gs; */ long orig_ax; - long ip; - int cs; - long flags; - long sp; - int ss; + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; }; #include -- cgit v1.2.3 From 9902a702c76f904be0057f8647dda9d6f89d4847 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:09:57 -0800 Subject: x86: make X86_32 pt_regs members unsigned long Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/ptrace.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index 708337a3672..bc442461ac6 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -42,12 +42,12 @@ struct pt_regs { unsigned long si; unsigned long di; unsigned long bp; - long ax; + unsigned long ax; unsigned long ds; unsigned long es; unsigned long fs; /* int gs; */ - long orig_ax; + unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; @@ -145,7 +145,10 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int erro void signal_fault(struct pt_regs *regs, void __user *frame, char *where); #endif -#define regs_return_value(regs) ((regs)->ax) +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->ax; +} /* * user_mode_vm(regs) determines whether a register set came from user mode. -- cgit v1.2.3 From 04adf11435a5187383c35017a94b55701984243b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 16 Feb 2008 23:02:03 -0800 Subject: x86: remove never used nodenumer in pda Signed-off-by: Yinghai Lu Cc: Andrew Morton Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/pda.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index c0305bff0f1..d9dc209c24a 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h @@ -22,7 +22,6 @@ struct x8664_pda { offset 40!!! */ #endif char *irqstackptr; - unsigned int nodenumber; /* number of current node */ unsigned int __softirq_pending; unsigned int __nmi_count; /* number of NMI on this CPUs */ short mmu_state; -- cgit v1.2.3 From 2785c8d052278228cc3806233c09295088f83d42 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 11 Feb 2008 17:16:03 -0200 Subject: x86: call vsmp_init explicitly It becomes to early for ioremap, so we use early_ioremap Signed-off-by: Glauber Costa Signed-off-by: Ravikiran Thirumalai Acked-by: Shai Fultheim Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/setup.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index 071e054abd8..f745de21119 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h @@ -4,6 +4,10 @@ #define COMMAND_LINE_SIZE 2048 #ifndef __ASSEMBLY__ + +/* Interrupt control for vSMPowered x86_64 systems */ +void vsmp_init(void); + char *machine_specific_memory_setup(void); #ifndef CONFIG_PARAVIRT #define paravirt_post_allocator_init() do {} while (0) -- cgit v1.2.3 From bc0a733facbbde6c464e3ba5e165607fe4824cca Mon Sep 17 00:00:00 2001 From: "David P. Reed" Date: Mon, 18 Feb 2008 13:58:34 -0500 Subject: x86: define outb_pic and inb_pic to stop using outb_p and inb_p x86: define outb_pic and inb_pic to stop using outb_p and inb_p The delay between io port accesses to the PIC is now defined using outb_pic and inb_pic. This fix provides the next step, using udelay(2) to define the *PIC specific* timing requirements, rather than on bus-oriented timing, which is not well calibrated. Again, the primary reason for fixing this is to use proper delay strategy, and in particular to fix crashes that can result from using port 80 writes on machines that have resources on port 80, such as the ENE chips used by Quanta in latops it designs and sells to, e.g. HP. Signed-off-by: David P. Reed Signed-off-by: Ingo Molnar --- include/asm-x86/i8259.h | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index 67c319e0efc..e2650f21ca8 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h @@ -1,6 +1,8 @@ #ifndef __ASM_I8259_H__ #define __ASM_I8259_H__ +#include + extern unsigned int cached_irq_mask; #define __byte(x,y) (((unsigned char *) &(y))[x]) @@ -29,7 +31,28 @@ extern void enable_8259A_irq(unsigned int irq); extern void disable_8259A_irq(unsigned int irq); extern unsigned int startup_8259A_irq(unsigned int irq); -#define inb_pic inb_p -#define outb_pic outb_p +/* the PIC may need a careful delay on some platforms, hence specific calls */ +static inline unsigned char inb_pic(unsigned int port) +{ + unsigned char value = inb(port); + + /* + * delay for some accesses to PIC on motherboard or in chipset + * must be at least one microsecond, so be safe here: + */ + udelay(2); + + return value; +} + +static inline void outb_pic(unsigned char value, unsigned int port) +{ + outb(value, port); + /* + * delay for some accesses to PIC on motherboard or in chipset + * must be at least one microsecond, so be safe here: + */ + udelay(2); +} #endif /* __ASM_I8259_H__ */ -- cgit v1.2.3 From 4d46a89e7c867718020b2d5fd8f9e775293304be Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 21 Feb 2008 04:24:40 +0100 Subject: x86: clean up include/asm-x86/processor.h basic style cleanup to flush out years of neglect: - consistent indentation - whitespace fixes - consistent comments Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 638 +++++++++++++++++++++++++------------------- 1 file changed, 358 insertions(+), 280 deletions(-) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 45a2f0ab33d..43d2cc829a9 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -24,6 +24,7 @@ struct mm_struct; #include #include #include + #include #include #include @@ -37,16 +38,18 @@ struct mm_struct; static inline void *current_text_addr(void) { void *pc; - asm volatile("mov $1f,%0\n1:":"=r" (pc)); + + asm volatile("mov $1f, %0; 1:":"=r" (pc)); + return pc; } #ifdef CONFIG_X86_VSMP -#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) -#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) +# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) +# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) #else -#define ARCH_MIN_TASKALIGN 16 -#define ARCH_MIN_MMSTRUCT_ALIGN 0 +# define ARCH_MIN_TASKALIGN 16 +# define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif /* @@ -56,69 +59,81 @@ static inline void *current_text_addr(void) */ struct cpuinfo_x86 { - __u8 x86; /* CPU family */ - __u8 x86_vendor; /* CPU vendor */ - __u8 x86_model; - __u8 x86_mask; + __u8 x86; /* CPU family */ + __u8 x86_vendor; /* CPU vendor */ + __u8 x86_model; + __u8 x86_mask; #ifdef CONFIG_X86_32 - char wp_works_ok; /* It doesn't on 386's */ - char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ - char hard_math; - char rfu; - char fdiv_bug; - char f00f_bug; - char coma_bug; - char pad0; + char wp_works_ok; /* It doesn't on 386's */ + + /* Problems on some 486Dx4's and old 386's: */ + char hlt_works_ok; + char hard_math; + char rfu; + char fdiv_bug; + char f00f_bug; + char coma_bug; + char pad0; #else - /* number of 4K pages in DTLB/ITLB combined(in pages)*/ - int x86_tlbsize; - __u8 x86_virt_bits, x86_phys_bits; - /* cpuid returned core id bits */ - __u8 x86_coreid_bits; - /* Max extended CPUID function supported */ - __u32 extended_cpuid_level; + /* Number of 4K pages in DTLB/ITLB combined(in pages): */ + int x86_tlbsize; + __u8 x86_virt_bits; + __u8 x86_phys_bits; + /* CPUID returned core id bits: */ + __u8 x86_coreid_bits; + /* Max extended CPUID function supported: */ + __u32 extended_cpuid_level; #endif - int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ - __u32 x86_capability[NCAPINTS]; - char x86_vendor_id[16]; - char x86_model_id[64]; - int x86_cache_size; /* in KB - valid for CPUS which support this - call */ - int x86_cache_alignment; /* In bytes */ - int x86_power; - unsigned long loops_per_jiffy; + /* Maximum supported CPUID level, -1=no CPUID: */ + int cpuid_level; + __u32 x86_capability[NCAPINTS]; + char x86_vendor_id[16]; + char x86_model_id[64]; + /* in KB - valid for CPUS which support this call: */ + int x86_cache_size; + int x86_cache_alignment; /* In bytes */ + int x86_power; + unsigned long loops_per_jiffy; #ifdef CONFIG_SMP - cpumask_t llc_shared_map; /* cpus sharing the last level cache */ + /* cpus sharing the last level cache: */ + cpumask_t llc_shared_map; #endif - u16 x86_max_cores; /* cpuid returned max cores value */ - u16 apicid; - u16 x86_clflush_size; + /* cpuid returned max cores value: */ + u16 x86_max_cores; + u16 apicid; + u16 x86_clflush_size; #ifdef CONFIG_SMP - u16 booted_cores; /* number of cores as seen by OS */ - u16 phys_proc_id; /* Physical processor id. */ - u16 cpu_core_id; /* Core id */ - u16 cpu_index; /* index into per_cpu list */ + /* number of cores as seen by the OS: */ + u16 booted_cores; + /* Physical processor id: */ + u16 phys_proc_id; + /* Core id: */ + u16 cpu_core_id; + /* Index into per_cpu list: */ + u16 cpu_index; #endif } __attribute__((__aligned__(SMP_CACHE_BYTES))); -#define X86_VENDOR_INTEL 0 -#define X86_VENDOR_CYRIX 1 -#define X86_VENDOR_AMD 2 -#define X86_VENDOR_UMC 3 -#define X86_VENDOR_NEXGEN 4 -#define X86_VENDOR_CENTAUR 5 -#define X86_VENDOR_TRANSMETA 7 -#define X86_VENDOR_NSC 8 -#define X86_VENDOR_NUM 9 -#define X86_VENDOR_UNKNOWN 0xff +#define X86_VENDOR_INTEL 0 +#define X86_VENDOR_CYRIX 1 +#define X86_VENDOR_AMD 2 +#define X86_VENDOR_UMC 3 +#define X86_VENDOR_NEXGEN 4 +#define X86_VENDOR_CENTAUR 5 +#define X86_VENDOR_TRANSMETA 7 +#define X86_VENDOR_NSC 8 +#define X86_VENDOR_NUM 9 + +#define X86_VENDOR_UNKNOWN 0xff /* * capabilities of CPUs */ -extern struct cpuinfo_x86 boot_cpu_data; -extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct doublefault_tss; -extern __u32 cleared_cpu_caps[NCAPINTS]; +extern struct cpuinfo_x86 boot_cpu_data; +extern struct cpuinfo_x86 new_cpu_data; + +extern struct tss_struct doublefault_tss; +extern __u32 cleared_cpu_caps[NCAPINTS]; #ifdef CONFIG_SMP DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); @@ -129,7 +144,9 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); #define current_cpu_data boot_cpu_data #endif -void cpu_detect(struct cpuinfo_x86 *c); +#define cache_line_size() (boot_cpu_data.x86_cache_alignment) + +extern void cpu_detect(struct cpuinfo_x86 *c); extern void identify_cpu(struct cpuinfo_x86 *); extern void identify_boot_cpu(void); @@ -146,7 +163,7 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} #endif static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) + unsigned int *ecx, unsigned int *edx) { /* ecx is often an input as well as an output. */ __asm__("cpuid" @@ -165,54 +182,67 @@ static inline void load_cr3(pgd_t *pgdir) #ifdef CONFIG_X86_32 /* This is the TSS defined by the hardware. */ struct x86_hw_tss { - unsigned short back_link, __blh; - unsigned long sp0; - unsigned short ss0, __ss0h; - unsigned long sp1; - unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ - unsigned long sp2; - unsigned short ss2, __ss2h; - unsigned long __cr3; - unsigned long ip; - unsigned long flags; - unsigned long ax, cx, dx, bx; - unsigned long sp, bp, si, di; - unsigned short es, __esh; - unsigned short cs, __csh; - unsigned short ss, __ssh; - unsigned short ds, __dsh; - unsigned short fs, __fsh; - unsigned short gs, __gsh; - unsigned short ldt, __ldth; - unsigned short trace, io_bitmap_base; + unsigned short back_link, __blh; + unsigned long sp0; + unsigned short ss0, __ss0h; + unsigned long sp1; + /* ss1 caches MSR_IA32_SYSENTER_CS: */ + unsigned short ss1, __ss1h; + unsigned long sp2; + unsigned short ss2, __ss2h; + unsigned long __cr3; + unsigned long ip; + unsigned long flags; + unsigned long ax; + unsigned long cx; + unsigned long dx; + unsigned long bx; + unsigned long sp; + unsigned long bp; + unsigned long si; + unsigned long di; + unsigned short es, __esh; + unsigned short cs, __csh; + unsigned short ss, __ssh; + unsigned short ds, __dsh; + unsigned short fs, __fsh; + unsigned short gs, __gsh; + unsigned short ldt, __ldth; + unsigned short trace; + unsigned short io_bitmap_base; + } __attribute__((packed)); #else struct x86_hw_tss { - u32 reserved1; - u64 sp0; - u64 sp1; - u64 sp2; - u64 reserved2; - u64 ist[7]; - u32 reserved3; - u32 reserved4; - u16 reserved5; - u16 io_bitmap_base; + u32 reserved1; + u64 sp0; + u64 sp1; + u64 sp2; + u64 reserved2; + u64 ist[7]; + u32 reserved3; + u32 reserved4; + u16 reserved5; + u16 io_bitmap_base; + } __attribute__((packed)) ____cacheline_aligned; #endif /* - * Size of io_bitmap. + * IO-bitmap sizes: */ -#define IO_BITMAP_BITS 65536 -#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) -#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) -#define INVALID_IO_BITMAP_OFFSET 0x8000 -#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 +#define IO_BITMAP_BITS 65536 +#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) +#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) +#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) +#define INVALID_IO_BITMAP_OFFSET 0x8000 +#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct tss_struct { - struct x86_hw_tss x86_tss; + /* + * The hardware state: + */ + struct x86_hw_tss x86_tss; /* * The extra 1 is there because the CPU will access an @@ -220,48 +250,54 @@ struct tss_struct { * bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ - unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; + unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; /* * Cache the current maximum and the last task that used the bitmap: */ - unsigned long io_bitmap_max; - struct thread_struct *io_bitmap_owner; + unsigned long io_bitmap_max; + struct thread_struct *io_bitmap_owner; + /* - * pads the TSS to be cacheline-aligned (size is 0x100) + * Pad the TSS to be cacheline-aligned (size is 0x100): */ - unsigned long __cacheline_filler[35]; + unsigned long __cacheline_filler[35]; /* - * .. and then another 0x100 bytes for emergency kernel stack + * .. and then another 0x100 bytes for the emergency kernel stack: */ - unsigned long stack[64]; + unsigned long stack[64]; + } __attribute__((packed)); DECLARE_PER_CPU(struct tss_struct, init_tss); -/* Save the original ist values for checking stack pointers during debugging */ +/* + * Save the original ist values for checking stack pointers during debugging + */ struct orig_ist { - unsigned long ist[7]; + unsigned long ist[7]; }; #define MXCSR_DEFAULT 0x1f80 struct i387_fsave_struct { - u32 cwd; - u32 swd; - u32 twd; - u32 fip; - u32 fcs; - u32 foo; - u32 fos; - u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - u32 status; /* software status information */ + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + /* 8*10 bytes for each FP-reg = 80 bytes: */ + u32 st_space[20]; + /* Software status information: */ + u32 status; }; struct i387_fxsave_struct { - u16 cwd; - u16 swd; - u16 twd; - u16 fop; + u16 cwd; + u16 swd; + u16 twd; + u16 fop; union { struct { u64 rip; @@ -274,31 +310,40 @@ struct i387_fxsave_struct { u32 fos; }; }; - u32 mxcsr; - u32 mxcsr_mask; - u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ - u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ - u32 padding[24]; + u32 mxcsr; + u32 mxcsr_mask; + /* 8*16 bytes for each FP-reg = 128 bytes: */ + u32 st_space[32]; + /* 16*16 bytes for each XMM-reg = 256 bytes: */ + u32 xmm_space[64]; + u32 padding[24]; + } __attribute__((aligned(16))); struct i387_soft_struct { - u32 cwd; - u32 swd; - u32 twd; - u32 fip; - u32 fcs; - u32 foo; - u32 fos; - u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - u8 ftop, changed, lookahead, no_update, rm, alimit; - struct info *info; - u32 entry_eip; + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + /* 8*10 bytes for each FP-reg = 80 bytes: */ + u32 st_space[20]; + u8 ftop; + u8 changed; + u8 lookahead; + u8 no_update; + u8 rm; + u8 alimit; + struct info *info; + u32 entry_eip; }; union i387_union { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; - struct i387_soft_struct soft; + struct i387_soft_struct soft; }; #ifdef CONFIG_X86_32 @@ -313,42 +358,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); extern unsigned short num_cache_leaves; struct thread_struct { -/* cached TLS descriptors. */ - struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; - unsigned long sp0; - unsigned long sp; + /* Cached TLS descriptors: */ + struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; + unsigned long sp0; + unsigned long sp; #ifdef CONFIG_X86_32 - unsigned long sysenter_cs; + unsigned long sysenter_cs; #else - unsigned long usersp; /* Copy from PDA */ - unsigned short es, ds, fsindex, gsindex; + unsigned long usersp; /* Copy from PDA */ + unsigned short es; + unsigned short ds; + unsigned short fsindex; + unsigned short gsindex; #endif - unsigned long ip; - unsigned long fs; - unsigned long gs; -/* Hardware debugging registers */ - unsigned long debugreg0; - unsigned long debugreg1; - unsigned long debugreg2; - unsigned long debugreg3; - unsigned long debugreg6; - unsigned long debugreg7; -/* fault info */ - unsigned long cr2, trap_no, error_code; -/* floating point info */ + unsigned long ip; + unsigned long fs; + unsigned long gs; + /* Hardware debugging registers: */ + unsigned long debugreg0; + unsigned long debugreg1; + unsigned long debugreg2; + unsigned long debugreg3; + unsigned long debugreg6; + unsigned long debugreg7; + /* Fault info: */ + unsigned long cr2; + unsigned long trap_no; + unsigned long error_code; + /* Floating point info: */ union i387_union i387 __attribute__((aligned(16)));; #ifdef CONFIG_X86_32 -/* virtual 86 mode info */ + /* Virtual 86 mode info */ struct vm86_struct __user *vm86_info; unsigned long screen_bitmap; - unsigned long v86flags, v86mask, saved_sp0; - unsigned int saved_fs, saved_gs; + unsigned long v86flags; + unsigned long v86mask; + unsigned long saved_sp0; + unsigned int saved_fs; + unsigned int saved_gs; #endif -/* IO permissions */ - unsigned long *io_bitmap_ptr; - unsigned long iopl; -/* max allowed port in the bitmap, in bytes: */ - unsigned io_bitmap_max; + /* IO permissions: */ + unsigned long *io_bitmap_ptr; + unsigned long iopl; + /* Max allowed port in the bitmap, in bytes: */ + unsigned io_bitmap_max; /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ unsigned long debugctlmsr; /* Debug Store - if not 0 points to a DS Save Area configuration; @@ -358,7 +411,7 @@ struct thread_struct { static inline unsigned long native_get_debugreg(int regno) { - unsigned long val = 0; /* Damn you, gcc! */ + unsigned long val = 0; /* Damn you, gcc! */ switch (regno) { case 0: @@ -383,22 +436,22 @@ static inline void native_set_debugreg(int regno, unsigned long value) { switch (regno) { case 0: - asm("mov %0,%%db0" : /* no output */ :"r" (value)); + asm("mov %0, %%db0" ::"r" (value)); break; case 1: - asm("mov %0,%%db1" : /* no output */ :"r" (value)); + asm("mov %0, %%db1" ::"r" (value)); break; case 2: - asm("mov %0,%%db2" : /* no output */ :"r" (value)); + asm("mov %0, %%db2" ::"r" (value)); break; case 3: - asm("mov %0,%%db3" : /* no output */ :"r" (value)); + asm("mov %0, %%db3" ::"r" (value)); break; case 6: - asm("mov %0,%%db6" : /* no output */ :"r" (value)); + asm("mov %0, %%db6" ::"r" (value)); break; case 7: - asm("mov %0,%%db7" : /* no output */ :"r" (value)); + asm("mov %0, %%db7" ::"r" (value)); break; default: BUG(); @@ -412,6 +465,7 @@ static inline void native_set_iopl_mask(unsigned mask) { #ifdef CONFIG_X86_32 unsigned int reg; + __asm__ __volatile__ ("pushfl;" "popl %0;" "andl %1, %0;" @@ -423,12 +477,12 @@ static inline void native_set_iopl_mask(unsigned mask) #endif } -static inline void native_load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void +native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { tss->x86_tss.sp0 = thread->sp0; #ifdef CONFIG_X86_32 - /* Only happens when SEP is enabled, no need to test "SEP"arately */ + /* Only happens when SEP is enabled, no need to test "SEP"arately: */ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { tss->x86_tss.ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); @@ -446,8 +500,8 @@ static inline void native_swapgs(void) #ifdef CONFIG_PARAVIRT #include #else -#define __cpuid native_cpuid -#define paravirt_enabled() 0 +#define __cpuid native_cpuid +#define paravirt_enabled() 0 /* * These special macros can be used to get or set a debugging register @@ -457,8 +511,8 @@ static inline void native_swapgs(void) #define set_debugreg(value, register) \ native_set_debugreg(register, value) -static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void +load_sp0(struct tss_struct *tss, struct thread_struct *thread) { native_load_sp0(tss, thread); } @@ -473,11 +527,12 @@ static inline void load_sp0(struct tss_struct *tss, * enable), so that any CPU's that boot up * after us can get the correct flags. */ -extern unsigned long mmu_cr4_features; +extern unsigned long mmu_cr4_features; static inline void set_in_cr4(unsigned long mask) { unsigned cr4; + mmu_cr4_features |= mask; cr4 = read_cr4(); cr4 |= mask; @@ -487,6 +542,7 @@ static inline void set_in_cr4(unsigned long mask) static inline void clear_in_cr4(unsigned long mask) { unsigned cr4; + mmu_cr4_features &= ~mask; cr4 = read_cr4(); cr4 &= ~mask; @@ -494,42 +550,42 @@ static inline void clear_in_cr4(unsigned long mask) } struct microcode_header { - unsigned int hdrver; - unsigned int rev; - unsigned int date; - unsigned int sig; - unsigned int cksum; - unsigned int ldrver; - unsigned int pf; - unsigned int datasize; - unsigned int totalsize; - unsigned int reserved[3]; + unsigned int hdrver; + unsigned int rev; + unsigned int date; + unsigned int sig; + unsigned int cksum; + unsigned int ldrver; + unsigned int pf; + unsigned int datasize; + unsigned int totalsize; + unsigned int reserved[3]; }; struct microcode { - struct microcode_header hdr; - unsigned int bits[0]; + struct microcode_header hdr; + unsigned int bits[0]; }; -typedef struct microcode microcode_t; -typedef struct microcode_header microcode_header_t; +typedef struct microcode microcode_t; +typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { - unsigned int sig; - unsigned int pf; - unsigned int cksum; + unsigned int sig; + unsigned int pf; + unsigned int cksum; }; struct extended_sigtable { - unsigned int count; - unsigned int cksum; - unsigned int reserved[3]; + unsigned int count; + unsigned int cksum; + unsigned int reserved[3]; struct extended_signature sigs[0]; }; typedef struct { - unsigned long seg; + unsigned long seg; } mm_segment_t; @@ -541,7 +597,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Prepare to copy thread state - unlazy all lazy status */ +/* Prepare to copy thread state - unlazy all lazy state */ extern void prepare_to_copy(struct task_struct *tsk); unsigned long get_wchan(struct task_struct *p); @@ -578,118 +634,131 @@ static inline unsigned int cpuid_eax(unsigned int op) unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); + return eax; } + static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); + return ebx; } + static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); + return ecx; } + static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); + return edx; } /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { - __asm__ __volatile__("rep;nop": : :"memory"); + __asm__ __volatile__("rep; nop" ::: "memory"); } -/* Stop speculative execution */ +static inline void cpu_relax(void) +{ + rep_nop(); +} + +/* Stop speculative execution: */ static inline void sync_core(void) { int tmp; + asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx", "ecx", "edx", "memory"); } -#define cpu_relax() rep_nop() - -static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) +static inline void +__monitor(const void *eax, unsigned long ecx, unsigned long edx) { - /* "monitor %eax,%ecx,%edx;" */ + /* "monitor %eax, %ecx, %edx;" */ asm volatile( - ".byte 0x0f,0x01,0xc8;" - : :"a" (eax), "c" (ecx), "d"(edx)); + ".byte 0x0f, 0x01, 0xc8;" + :: "a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { - /* "mwait %eax,%ecx;" */ + /* "mwait %eax, %ecx;" */ asm volatile( - ".byte 0x0f,0x01,0xc9;" - : :"a" (eax), "c" (ecx)); + ".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); } static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { - /* "mwait %eax,%ecx;" */ + /* "mwait %eax, %ecx;" */ asm volatile( - "sti; .byte 0x0f,0x01,0xc9;" - : :"a" (eax), "c" (ecx)); + "sti; .byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); } extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); -extern int force_mwait; +extern int force_mwait; extern void select_idle_routine(const struct cpuinfo_x86 *c); -extern unsigned long boot_option_idle_override; +extern unsigned long boot_option_idle_override; extern void enable_sep_cpu(void); extern int sysenter_setup(void); /* Defined in head.S */ -extern struct desc_ptr early_gdt_descr; +extern struct desc_ptr early_gdt_descr; extern void cpu_set_gdt(int); extern void switch_to_new_gdt(void); extern void cpu_init(void); extern void init_gdt(int cpu); -/* from system description table in BIOS. Mostly for MCA use, but - * others may find it useful. */ -extern unsigned int machine_id; -extern unsigned int machine_submodel_id; -extern unsigned int BIOS_revision; +/* + * from system description table in BIOS. Mostly for MCA use, but + * others may find it useful: + */ +extern unsigned int machine_id; +extern unsigned int machine_submodel_id; +extern unsigned int BIOS_revision; -/* Boot loader type from the setup header */ -extern int bootloader_type; +/* Boot loader type from the setup header: */ +extern int bootloader_type; -extern char ignore_fpu_irq; -#define cache_line_size() (boot_cpu_data.x86_cache_alignment) +extern char ignore_fpu_irq; #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH #ifdef CONFIG_X86_32 -#define BASE_PREFETCH ASM_NOP4 -#define ARCH_HAS_PREFETCH +# define BASE_PREFETCH ASM_NOP4 +# define ARCH_HAS_PREFETCH #else -#define BASE_PREFETCH "prefetcht0 (%1)" +# define BASE_PREFETCH "prefetcht0 (%1)" #endif -/* Prefetch instructions for Pentium III and AMD Athlon */ -/* It's not worth to care about 3dnow! prefetches for the K6 - because they are microcoded there and very slow. - However we don't do prefetches for pre XP Athlons currently - That should be fixed. */ +/* + * Prefetch instructions for Pentium III (+) and AMD Athlon (+) + * + * It's not worth to care about 3dnow prefetches for the K6 + * because they are microcoded there and very slow. + */ static inline void prefetch(const void *x) { alternative_input(BASE_PREFETCH, @@ -698,8 +767,11 @@ static inline void prefetch(const void *x) "r" (x)); } -/* 3dnow! prefetch to get an exclusive cache line. Useful for - spinlocks to avoid one state transition in the cache coherency protocol. */ +/* + * 3dnow prefetch to get an exclusive cache line. + * Useful for spinlocks to avoid one state transition in the + * cache coherency protocol: + */ static inline void prefetchw(const void *x) { alternative_input(BASE_PREFETCH, @@ -708,21 +780,25 @@ static inline void prefetchw(const void *x) "r" (x)); } -#define spin_lock_prefetch(x) prefetchw(x) +static inline void spin_lock_prefetch(const void *x) +{ + prefetchw(x); +} + #ifdef CONFIG_X86_32 /* * User space process size: 3GB (default). */ -#define TASK_SIZE (PAGE_OFFSET) -#define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP - -#define INIT_THREAD { \ - .sp0 = sizeof(init_stack) + (long)&init_stack, \ - .vm86_info = NULL, \ - .sysenter_cs = __KERNEL_CS, \ - .io_bitmap_ptr = NULL, \ - .fs = __KERNEL_PERCPU, \ +#define TASK_SIZE PAGE_OFFSET +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP + +#define INIT_THREAD { \ + .sp0 = sizeof(init_stack) + (long)&init_stack, \ + .vm86_info = NULL, \ + .sysenter_cs = __KERNEL_CS, \ + .io_bitmap_ptr = NULL, \ + .fs = __KERNEL_PERCPU, \ } /* @@ -731,26 +807,27 @@ static inline void prefetchw(const void *x) * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ -#define INIT_TSS { \ - .x86_tss = { \ +#define INIT_TSS { \ + .x86_tss = { \ .sp0 = sizeof(init_stack) + (long)&init_stack, \ - .ss0 = __KERNEL_DS, \ - .ss1 = __KERNEL_CS, \ - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ - }, \ - .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ + .ss0 = __KERNEL_DS, \ + .ss1 = __KERNEL_CS, \ + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ + }, \ + .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ } -#define start_thread(regs, new_eip, new_esp) do { \ +#define start_thread(regs, new_eip, new_esp) \ +do { \ __asm__("movl %0,%%gs": :"r" (0)); \ - regs->fs = 0; \ + regs->fs = 0; \ set_fs(USER_DS); \ - regs->ds = __USER_DS; \ - regs->es = __USER_DS; \ - regs->ss = __USER_DS; \ - regs->cs = __USER_CS; \ - regs->ip = new_eip; \ - regs->sp = new_esp; \ + regs->ds = __USER_DS; \ + regs->es = __USER_DS; \ + regs->ss = __USER_DS; \ + regs->cs = __USER_CS; \ + regs->ip = new_eip; \ + regs->sp = new_esp; \ } while (0) @@ -780,24 +857,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); __regs__ - 1; \ }) -#define KSTK_ESP(task) (task_pt_regs(task)->sp) +#define KSTK_ESP(task) (task_pt_regs(task)->sp) #else /* * User space process size. 47bits minus one guard page. */ -#define TASK_SIZE64 (0x800000000000UL - 4096) +#define TASK_SIZE64 (0x800000000000UL - 4096) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ - 0xc0000000 : 0xFFFFe000) +#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ + 0xc0000000 : 0xFFFFe000) -#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ - IA32_PAGE_OFFSET : TASK_SIZE64) -#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ - IA32_PAGE_OFFSET : TASK_SIZE64) +#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ + IA32_PAGE_OFFSET : TASK_SIZE64) +#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ + IA32_PAGE_OFFSET : TASK_SIZE64) #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX TASK_SIZE64 @@ -813,12 +890,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define start_thread(regs, new_rip, new_rsp) do { \ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ load_gs_index(0); \ - (regs)->ip = (new_rip); \ - (regs)->sp = (new_rsp); \ + (regs)->ip = (new_rip); \ + (regs)->sp = (new_rsp); \ write_pda(oldrsp, (new_rsp)); \ - (regs)->cs = __USER_CS; \ - (regs)->ss = __USER_DS; \ - (regs)->flags = 0x200; \ + (regs)->cs = __USER_CS; \ + (regs)->ss = __USER_DS; \ + (regs)->flags = 0x200; \ set_fs(USER_DS); \ } while (0) @@ -826,17 +903,18 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); * Return saved PC of a blocked thread. * What is this good for? it will be always the scheduler or ret_from_fork. */ -#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) +#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) -#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ +#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) +#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ #endif /* CONFIG_X86_64 */ -/* This decides where the kernel will search for a free chunk of vm +/* + * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) -#define KSTK_EIP(task) (task_pt_regs(task)->ip) +#define KSTK_EIP(task) (task_pt_regs(task)->ip) #endif -- cgit v1.2.3 From 513ad84bf60d96a6998bca10ed07c3d340449be8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 21 Feb 2008 05:18:40 +0100 Subject: x86: de-macro start_thread() Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 43d2cc829a9..9054734589f 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -817,20 +817,6 @@ static inline void spin_lock_prefetch(const void *x) .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ } -#define start_thread(regs, new_eip, new_esp) \ -do { \ - __asm__("movl %0,%%gs": :"r" (0)); \ - regs->fs = 0; \ - set_fs(USER_DS); \ - regs->ds = __USER_DS; \ - regs->es = __USER_DS; \ - regs->ss = __USER_DS; \ - regs->cs = __USER_CS; \ - regs->ip = new_eip; \ - regs->sp = new_esp; \ -} while (0) - - extern unsigned long thread_saved_pc(struct task_struct *tsk); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) @@ -887,18 +873,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } -#define start_thread(regs, new_rip, new_rsp) do { \ - asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ - load_gs_index(0); \ - (regs)->ip = (new_rip); \ - (regs)->sp = (new_rsp); \ - write_pda(oldrsp, (new_rsp)); \ - (regs)->cs = __USER_CS; \ - (regs)->ss = __USER_DS; \ - (regs)->flags = 0x200; \ - set_fs(USER_DS); \ -} while (0) - /* * Return saved PC of a blocked thread. * What is this good for? it will be always the scheduler or ret_from_fork. @@ -909,6 +883,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ #endif /* CONFIG_X86_64 */ +extern void start_thread(struct pt_regs *regs, unsigned long new_ip, + unsigned long new_sp); + /* * This decides where the kernel will search for a free chunk of vm * space during mmap's. -- cgit v1.2.3 From 3def3d6ddf43dbe20c00c3cbc38dfacc8586998f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 22 Feb 2008 17:07:16 -0800 Subject: x86: clean up e820_reserve_resources on 64-bit e820_resource_resources could use insert_resource instead of request_resource also move code_resource, data_resource, bss_resource, and crashk_res out of e820_reserve_resources. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- include/asm-x86/e820_64.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index 9e06c6eb4e2..ef653a403e0 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -23,8 +23,7 @@ extern void update_memory_range(u64 start, u64 size, unsigned old_type, extern void setup_memory_region(void); extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); -extern void e820_reserve_resources(struct resource *code_resource, - struct resource *data_resource, struct resource *bss_resource); +extern void e820_reserve_resources(void); extern void e820_mark_nosave_regions(void); extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); -- cgit v1.2.3 From f8fffa458368ed3d57385698f775880db629bd1a Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 24 Feb 2008 21:36:28 -0800 Subject: x86: apic_is_clustered_box for vsmp quad core 8 socket system will have apic id lifting.the apic id range could be [4, 0x23]. and apic_is_clustered_box will think that need to three clusters and that is larger than 2. So it is treated as a clustered_box. and will get: Marking TSC unstable due to TSCs unsynchronized even if the CPUs have X86_FEATURE_CONSTANT_TSC set. this quick fix will check if the cpu is from AMD. but vsmp still needs that checking... this patch is fix to make sure that vsmp not to be passed. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- include/asm-x86/apic.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index bcfc07fd366..f0321a427e1 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -51,12 +51,17 @@ extern unsigned boot_cpu_id; */ #ifdef CONFIG_PARAVIRT #include +extern int is_vsmp_box(void); #else #define apic_write native_apic_write #define apic_write_atomic native_apic_write_atomic #define apic_read native_apic_read #define setup_boot_clock setup_boot_APIC_clock #define setup_secondary_clock setup_secondary_APIC_clock +static int inline is_vsmp_box(void) +{ + return 0; +} #endif static inline void native_apic_write(unsigned long reg, u32 v) -- cgit v1.2.3 From 0f8d2b926d15a68eac9c19edfdcb58a5d80b2960 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:34:21 +0100 Subject: x86: clean up cpu capabilities accesses introduce test_cpu_cap() for raw access to the real CPU capabilities as they are present in x86_capability. (cpu_has() will shortcut certain tests during build-time) Signed-off-by: Ingo Molnar --- include/asm-x86/cpufeature.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 065e92966c7..1e3102eeb82 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -120,6 +120,9 @@ extern const char * const x86_cap_flags[NCAPINTS*32]; extern const char * const x86_power_flags[32]; +#define test_cpu_cap(c, bit) \ + test_bit(bit, (unsigned long *)((c)->x86_capability)) + #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && \ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ @@ -131,7 +134,8 @@ extern const char * const x86_power_flags[32]; (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ ? 1 : \ - test_bit(bit, (unsigned long *)((c)->x86_capability))) + test_cpu_cap(c, bit)) + #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) -- cgit v1.2.3 From 78a9909aab54123c7c471022389b36972e13b48e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 17 Apr 2008 17:40:51 +0200 Subject: x86, tracing: add notrace to asm-x86/linkage.h notrace signals that a function should not be traced. Most of the time this is used by tracers to annotate code that cannot be traced - it's in a volatile state (such as in user vdso context or NMI context) or it's in the tracer internals. Signed-off-by: Ingo Molnar --- include/asm-x86/linkage.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h index c048353f4b8..64e444f8e85 100644 --- a/include/asm-x86/linkage.h +++ b/include/asm-x86/linkage.h @@ -1,6 +1,9 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H +#undef notrace +#define notrace __attribute__((no_instrument_function)) + #ifdef CONFIG_X86_64 #define __ALIGN .p2align 4,,15 #define __ALIGN_STR ".p2align 4,,15" -- cgit v1.2.3 From 40869cd038a0ecb867a7227aba46806224e4d11d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Mar 2008 13:55:32 +0100 Subject: x86: redo cded932b75ab0a5f9181e redo commit cded932b75ab0a5f9181e. Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable_32.h | 4 +++- include/asm-x86/pgtable_64.h | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 4e6a0fca0b4..997c36c6b4d 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -90,7 +90,9 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pmd_bad(x) ((pmd_val(x) \ + & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \ + != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 6ef09914acb..0a5081c98ae 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -153,12 +153,14 @@ static inline unsigned long pgd_bad(pgd_t pgd) static inline unsigned long pud_bad(pud_t pud) { - return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return pud_val(pud) & + ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); } static inline unsigned long pmd_bad(pmd_t pmd) { - return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return pmd_val(pmd) & + ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); } #define pte_none(x) (!pte_val(x)) -- cgit v1.2.3 From 9fc34113f6880b215cbea4e7017fc818700384c2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Mar 2008 09:53:17 +0100 Subject: x86: debug pmd_bad() Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable_32.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 997c36c6b4d..1e2c0d83952 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -90,7 +90,11 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) \ + +extern int pmd_bad(pmd_t pmd); + +#define pmd_bad_v1(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pmd_bad_v2(x) ((pmd_val(x) \ & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \ != _KERNPG_TABLE) -- cgit v1.2.3 From 0d7a1819e97ef89be5bcbb4b724acb9f6c873c97 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Mon, 3 Mar 2008 12:49:09 +0100 Subject: x86: wmb() confusion in system.h Comment says wmb is a nop, but it is implemented as lock addl below... Should it be compiled to nop if we know we are running on "good" Intel cpu? At least remove confusing comment for now. Signed-off-by: Pavel Machek Signed-off-by: Ingo Molnar --- include/asm-x86/system.h | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 9cff02ffe6c..428d9471497 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -296,16 +296,7 @@ void default_idle(void); */ #ifdef CONFIG_X86_32 /* - * For now, "wmb()" doesn't actually do anything, as all - * Intel CPU's follow what Intel calls a *Processor Order*, - * in which all writes are seen in the program order even - * outside the CPU. - * - * I expect future Intel CPU's to have a weaker ordering, - * but I'd also expect them to finally get their act together - * and add some real memory barriers if so. - * - * Some non intel clones support out of order store. wmb() ceases to be a + * Some non-Intel clones support out of order store. wmb() ceases to be a * nop for these. */ #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -- cgit v1.2.3 From 23b55bd9f33a1812a664e548803db34c9bec56e8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Mar 2008 10:24:37 +0100 Subject: x86: clean up switch_to() Make the code more readable and more hackable: - use symbolic asm parameters - use readable indentation - add comments that explains the details No code changed: kernel/sched.o: text data bss dec hex filename 28626 684 2640 31950 7cce sched.o.before 28626 684 2640 31950 7cce sched.o.after md5: 2823d406c18b781975cdb2e7cfea0059 sched.o.before.asm 2823d406c18b781975cdb2e7cfea0059 sched.o.after.asm Signed-off-by: Ingo Molnar --- include/asm-x86/system.h | 46 +++++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 428d9471497..299ae9605cb 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -27,22 +27,38 @@ struct task_struct *__switch_to(struct task_struct *prev, * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. */ -#define switch_to(prev, next, last) do { \ +#define switch_to(prev, next, last) \ +do { \ unsigned long esi, edi; \ - asm volatile("pushfl\n\t" /* Save flags */ \ - "pushl %%ebp\n\t" \ - "movl %%esp,%0\n\t" /* save ESP */ \ - "movl %5,%%esp\n\t" /* restore ESP */ \ - "movl $1f,%1\n\t" /* save EIP */ \ - "pushl %6\n\t" /* restore EIP */ \ - "jmp __switch_to\n" \ - "1:\t" \ - "popl %%ebp\n\t" \ - "popfl" \ - :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \ - "=a" (last), "=S" (esi), "=D" (edi) \ - :"m" (next->thread.sp), "m" (next->thread.ip), \ - "2" (prev), "d" (next)); \ + \ + asm volatile( \ + "pushfl \n\t" /* save flags */ \ + "pushl %%ebp \n\t" /* save EBP */ \ + "movl %%esp,%[prev_sp] \n\t" /* save ESP */ \ + "movl %[next_sp],%%esp \n\t" /* restore ESP */ \ + "movl $1f,%[prev_ip] \n\t" /* save EIP */ \ + "pushl %[next_ip] \n\t" /* restore EIP */ \ + "jmp __switch_to \n" /* regparm call */ \ + "1: \t" \ + "popl %%ebp \n\t" /* restore EBP */ \ + "popfl \n" /* restore flags */ \ + \ + /* output parameters */ \ + : [prev_sp] "=m" (prev->thread.sp), \ + [prev_ip] "=m" (prev->thread.ip), \ + "=a" (last), \ + \ + /* clobbered output registers: */ \ + "=S" (esi), "=D" (edi) \ + \ + /* input parameters: */ \ + : [next_sp] "m" (next->thread.sp), \ + [next_ip] "m" (next->thread.ip), \ + \ + /* regparm parameters for __switch_to(): */ \ + [prev] "a" (prev), \ + [next] "d" (next) \ + ); \ } while (0) /* -- cgit v1.2.3 From 8b6451fe5cf78909f28d3762f77df060c8603cd0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Mar 2008 10:46:38 +0100 Subject: x86: fix switch_to() clobbers Liu Pingfan noticed that switch_to() clobbers more registers than its asm constraints specify. We get away with this due to luck mostly - schedule() by its nature only has 'local' state which gets reloaded automatically. Fix it nevertheless, we could hit this anytime. it turns out that with the extra constraints gcc manages to make schedule() even more compact: text data bss dec hex filename 28626 684 2640 31950 7cce sched.o.before 28613 684 2640 31937 7cc1 sched.o.after Reported-by: Liu Pingfan Signed-off-by: Ingo Molnar --- include/asm-x86/system.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 299ae9605cb..33b0017156a 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -29,7 +29,14 @@ struct task_struct *__switch_to(struct task_struct *prev, */ #define switch_to(prev, next, last) \ do { \ - unsigned long esi, edi; \ + /* \ + * Context-switching clobbers all registers, so we clobber \ + * them explicitly, via unused output variables. \ + * (EAX and EBP is not listed because EBP is saved/restored \ + * explicitly for wchan access and EAX is the return value of \ + * __switch_to()) \ + */ \ + unsigned long ebx, ecx, edx, esi, edi; \ \ asm volatile( \ "pushfl \n\t" /* save flags */ \ @@ -49,6 +56,7 @@ do { \ "=a" (last), \ \ /* clobbered output registers: */ \ + "=b" (ebx), "=c" (ecx), "=d" (edx), \ "=S" (esi), "=D" (edi) \ \ /* input parameters: */ \ -- cgit v1.2.3 From c27cfeffad436816ecd500b8dc94acf348182b13 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:29 -0300 Subject: x86: commonize smp.h this is the first step of integrating smp.h between x86_64 and i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index f2e8319a6b0..f250d1c3f8a 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -1,5 +1,12 @@ +#ifndef _ASM_X86_SMP_H_ +#define _ASM_X86_SMP_H_ +#ifndef __ASSEMBLY__ + #ifdef CONFIG_X86_32 # include "smp_32.h" #else # include "smp_64.h" #endif + +#endif /* __ASSEMBLY__ */ +#endif -- cgit v1.2.3 From 639acb16e6b93342a786c01425cf8eb8ebbb1351 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:30 -0300 Subject: x86: merge extern function definitions move extern function definitions that are the same between smp_{32,64}.h to smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 4 ---- include/asm-x86/smp_64.h | 4 ---- 3 files changed, 3 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index f250d1c3f8a..ad7b99dda0d 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -8,5 +8,8 @@ # include "smp_64.h" #endif +extern void smp_alloc_memory(void); +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); #endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 56152e31228..27812258ac6 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -22,10 +22,6 @@ extern cpumask_t cpu_callin_map; extern int smp_num_siblings; extern unsigned int num_processors; -extern void smp_alloc_memory(void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); - extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index e0a75519ad2..2c21df289da 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -19,10 +19,6 @@ extern cpumask_t cpu_initialized; extern int smp_num_siblings; extern unsigned int num_processors; -extern void smp_alloc_memory(void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); - extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -- cgit v1.2.3 From 53ebef4961c7d5347b4fa2b878258ccd11fc9663 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:31 -0300 Subject: x86: merge extern variables definitions move extern definitions that are the same between smp_{32,64}.h to smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 4 ---- include/asm-x86/smp_64.h | 4 ---- 3 files changed, 6 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index ad7b99dda0d..c130a87c956 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -1,6 +1,12 @@ #ifndef _ASM_X86_SMP_H_ #define _ASM_X86_SMP_H_ #ifndef __ASSEMBLY__ +#include + +extern cpumask_t cpu_callout_map; + +extern int smp_num_siblings; +extern unsigned int num_processors; #ifdef CONFIG_X86_32 # include "smp_32.h" diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 27812258ac6..9a4057d9436 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -16,12 +16,8 @@ # endif #endif -extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callin_map; -extern int smp_num_siblings; -extern unsigned int num_processors; - extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 2c21df289da..284f701f2a8 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -13,12 +13,8 @@ #include #include -extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; -extern int smp_num_siblings; -extern unsigned int num_processors; - extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -- cgit v1.2.3 From 16694024d6d6fa84dfcf5400b53afe1e75cebf0d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:32 -0300 Subject: x86: define smp_ops in common header x86_64 will benefit from it Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 14 ++++++++++++++ include/asm-x86/smp_32.h | 14 -------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index c130a87c956..d11b92b5635 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -8,6 +8,20 @@ extern cpumask_t cpu_callout_map; extern int smp_num_siblings; extern unsigned int num_processors; +struct smp_ops { + void (*smp_prepare_boot_cpu)(void); + void (*smp_prepare_cpus)(unsigned max_cpus); + int (*cpu_up)(unsigned cpu); + void (*smp_cpus_done)(unsigned max_cpus); + + void (*smp_send_stop)(void); + void (*smp_send_reschedule)(int cpu); + int (*smp_call_function_mask)(cpumask_t mask, + void (*func)(void *info), void *info, + int wait); +}; + + #ifdef CONFIG_X86_32 # include "smp_32.h" #else diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 9a4057d9436..72faad6509c 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -38,20 +38,6 @@ extern void remove_siblinginfo(int cpu); /* Globals due to paravirt */ extern void set_cpu_sibling_map(int cpu); -struct smp_ops -{ - void (*smp_prepare_boot_cpu)(void); - void (*smp_prepare_cpus)(unsigned max_cpus); - int (*cpu_up)(unsigned cpu); - void (*smp_cpus_done)(unsigned max_cpus); - - void (*smp_send_stop)(void); - void (*smp_send_reschedule)(int cpu); - int (*smp_call_function_mask)(cpumask_t mask, - void (*func)(void *info), void *info, - int wait); -}; - #ifdef CONFIG_SMP extern struct smp_ops smp_ops; -- cgit v1.2.3 From c76cb36846da6d5d6fb2951968869faa4fd1001d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:33 -0300 Subject: x86: move smp_ops extern declaration to common header the smp_ops symbol is temporarily defined in smp_64.c, but it will soon be unified Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index d11b92b5635..ee98beeb751 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -21,6 +21,9 @@ struct smp_ops { int wait); }; +#ifdef CONFIG_SMP +extern struct smp_ops smp_ops; +#endif #ifdef CONFIG_X86_32 # include "smp_32.h" diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 72faad6509c..74755e8ffc7 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,8 +39,6 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -extern struct smp_ops smp_ops; - static inline void smp_prepare_boot_cpu(void) { smp_ops.smp_prepare_boot_cpu(); -- cgit v1.2.3 From 8678969e60d80527d96d2af0011e72c87c9c1fe5 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:34 -0300 Subject: x86: merge smp_send_reschedule function definition is moved to common header, x86_64 version is now called native_smp_send_reschedule Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 5 +++++ include/asm-x86/smp_32.h | 4 ---- include/asm-x86/smp_64.h | 2 -- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index ee98beeb751..28f33c03d79 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -23,6 +23,11 @@ struct smp_ops { #ifdef CONFIG_SMP extern struct smp_ops smp_ops; + +static inline void smp_send_reschedule(int cpu) +{ + smp_ops.smp_send_reschedule(cpu); +} #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 74755e8ffc7..c60a3dd3e80 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -60,10 +60,6 @@ static inline void smp_send_stop(void) { smp_ops.smp_send_stop(); } -static inline void smp_send_reschedule(int cpu) -{ - smp_ops.smp_send_reschedule(cpu); -} static inline int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), void *info, int wait) diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 284f701f2a8..b9204584aa0 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -65,8 +65,6 @@ static inline int num_booting_cpus(void) return cpus_weight(cpu_callout_map); } -extern void smp_send_reschedule(int cpu); - #else /* CONFIG_SMP */ extern unsigned int boot_cpu_id; -- cgit v1.2.3 From 64b1a21e0924dca7ea3b7cf4287fa719c8ba7fc5 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:35 -0300 Subject: x86: unify smp_call_function_mask definition is moved to common header, x86_64 function name now is native_smp_call_function_mask Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 7 +++++++ include/asm-x86/smp_32.h | 6 ------ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 28f33c03d79..d9782f4f469 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -28,6 +28,13 @@ static inline void smp_send_reschedule(int cpu) { smp_ops.smp_send_reschedule(cpu); } + +static inline int smp_call_function_mask(cpumask_t mask, + void (*func) (void *info), void *info, + int wait) +{ + return smp_ops.smp_call_function_mask(mask, func, info, wait); +} #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index c60a3dd3e80..d9337ee8c2f 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -60,12 +60,6 @@ static inline void smp_send_stop(void) { smp_ops.smp_send_stop(); } -static inline int smp_call_function_mask(cpumask_t mask, - void (*func) (void *info), void *info, - int wait) -{ - return smp_ops.smp_call_function_mask(mask, func, info, wait); -} void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); -- cgit v1.2.3 From 71d195492a6e0b22135a7156af1b41c0f99a116b Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:36 -0300 Subject: x86: unify __cpu_up. function definition is moved to common header. x86_64 version is now called native_cpu_up Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 7 +++++++ include/asm-x86/smp_32.h | 5 ----- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index d9782f4f469..7dd71410fe7 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -24,6 +24,11 @@ struct smp_ops { #ifdef CONFIG_SMP extern struct smp_ops smp_ops; +static inline int __cpu_up(unsigned int cpu) +{ + return smp_ops.cpu_up(cpu); +} + static inline void smp_send_reschedule(int cpu) { smp_ops.smp_send_reschedule(cpu); @@ -35,6 +40,8 @@ static inline int smp_call_function_mask(cpumask_t mask, { return smp_ops.smp_call_function_mask(mask, func, info, wait); } + +int native_cpu_up(unsigned int cpunum); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index d9337ee8c2f..a7fab8e1517 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -47,10 +47,6 @@ static inline void smp_prepare_cpus(unsigned int max_cpus) { smp_ops.smp_prepare_cpus(max_cpus); } -static inline int __cpu_up(unsigned int cpu) -{ - return smp_ops.cpu_up(cpu); -} static inline void smp_cpus_done(unsigned int max_cpus) { smp_ops.smp_cpus_done(max_cpus); @@ -63,7 +59,6 @@ static inline void smp_send_stop(void) void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); -int native_cpu_up(unsigned int cpunum); void native_smp_cpus_done(unsigned int max_cpus); #ifndef CONFIG_PARAVIRT -- cgit v1.2.3 From 1e3fac83da056f26bcb96e13967c157de55bf2ef Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:37 -0300 Subject: x86: unify prepare_boot_cpu definition is moved to common header. x86_64 version is now called native_prepare_boot_cpu Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 5 ----- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 7dd71410fe7..a2d69a1bec2 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -24,6 +24,11 @@ struct smp_ops { #ifdef CONFIG_SMP extern struct smp_ops smp_ops; +static inline void smp_prepare_boot_cpu(void) +{ + smp_ops.smp_prepare_boot_cpu(); +} + static inline int __cpu_up(unsigned int cpu) { return smp_ops.cpu_up(cpu); @@ -41,6 +46,7 @@ static inline int smp_call_function_mask(cpumask_t mask, return smp_ops.smp_call_function_mask(mask, func, info, wait); } +void native_smp_prepare_boot_cpu(void); int native_cpu_up(unsigned int cpunum); #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index a7fab8e1517..2c7ecfaa823 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,10 +39,6 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -static inline void smp_prepare_boot_cpu(void) -{ - smp_ops.smp_prepare_boot_cpu(); -} static inline void smp_prepare_cpus(unsigned int max_cpus) { smp_ops.smp_prepare_cpus(max_cpus); @@ -57,7 +53,6 @@ static inline void smp_send_stop(void) smp_ops.smp_send_stop(); } -void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); -- cgit v1.2.3 From 7557da67208f6ed3a1073594b7597bf20c9eb63a Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:38 -0300 Subject: x86: unify smp_prepare_cpus definition is moved to common header. x86_64 version is now called native_smp_prepare_cpus Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 5 ----- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index a2d69a1bec2..31bd99ddd8c 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -29,6 +29,11 @@ static inline void smp_prepare_boot_cpu(void) smp_ops.smp_prepare_boot_cpu(); } +static inline void smp_prepare_cpus(unsigned int max_cpus) +{ + smp_ops.smp_prepare_cpus(max_cpus); +} + static inline int __cpu_up(unsigned int cpu) { return smp_ops.cpu_up(cpu); @@ -47,6 +52,7 @@ static inline int smp_call_function_mask(cpumask_t mask, } void native_smp_prepare_boot_cpu(void); +void native_smp_prepare_cpus(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 2c7ecfaa823..50785389680 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,10 +39,6 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -static inline void smp_prepare_cpus(unsigned int max_cpus) -{ - smp_ops.smp_prepare_cpus(max_cpus); -} static inline void smp_cpus_done(unsigned int max_cpus) { smp_ops.smp_cpus_done(max_cpus); @@ -53,7 +49,6 @@ static inline void smp_send_stop(void) smp_ops.smp_send_stop(); } -void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); #ifndef CONFIG_PARAVIRT -- cgit v1.2.3 From c559764923dacef301116a248695856e6eb96e48 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:39 -0300 Subject: x86: unify smp_cpus_done definition is moved to common header. x86_64 version is now called native_smp_cpus_done Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 7 ------- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 31bd99ddd8c..9620165d3b7 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -34,6 +34,11 @@ static inline void smp_prepare_cpus(unsigned int max_cpus) smp_ops.smp_prepare_cpus(max_cpus); } +static inline void smp_cpus_done(unsigned int max_cpus) +{ + smp_ops.smp_cpus_done(max_cpus); +} + static inline int __cpu_up(unsigned int cpu) { return smp_ops.cpu_up(cpu); @@ -53,6 +58,7 @@ static inline int smp_call_function_mask(cpumask_t mask, void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); +void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 50785389680..bc90a4ed323 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,18 +39,11 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -static inline void smp_cpus_done(unsigned int max_cpus) -{ - smp_ops.smp_cpus_done(max_cpus); -} - static inline void smp_send_stop(void) { smp_ops.smp_send_stop(); } -void native_smp_cpus_done(unsigned int max_cpus); - #ifndef CONFIG_PARAVIRT #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) #endif -- cgit v1.2.3 From 93b016f8f393c1f8c27e8c4df06ad1420fac65f5 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:40 -0300 Subject: x86: move disabled_cpus to common header disabled_cpus is (up to now) a x86_64-only contruction. But it's extern declaration can be moved to common header anyway Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_64.h | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 9620165d3b7..be8a511fdb1 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -2,6 +2,7 @@ #define _ASM_X86_SMP_H_ #ifndef __ASSEMBLY__ #include +#include extern cpumask_t cpu_callout_map; @@ -60,6 +61,8 @@ void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); + +extern unsigned disabled_cpus; #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index b9204584aa0..c7a00caa6ec 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -44,7 +44,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); -extern unsigned __cpuinitdata disabled_cpus; #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- cgit v1.2.3 From 68a1c3f8cd893f5c3c1396fec5be7d8acac4fc93 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:42 -0300 Subject: x86: move prefill_possible_map to common file this patches moves prefill_possible_map() to smpboot.c Right now it is x86_64-specific, but nothing intrinsically prevents it to be used by i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 1 + include/asm-x86/smp_64.h | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index be8a511fdb1..28cb1f8bb47 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -63,6 +63,7 @@ void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); extern unsigned disabled_cpus; +extern void prefill_possible_map(void); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index c7a00caa6ec..e5bc1be7082 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -43,7 +43,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); -extern void prefill_possible_map(void); #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- cgit v1.2.3 From 3d3f487c58ef1ece714af280b29411960908149c Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:48 -0300 Subject: x86: provide hlt_works function. In x86_64, hlt always work. in i386, we'll query the cpuinfo associated with this cpu Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 9054734589f..8bec23c1552 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -144,6 +144,15 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); #define current_cpu_data boot_cpu_data #endif +static inline int hlt_works(int cpu) +{ +#ifdef CONFIG_X86_32 + return cpu_data(cpu).hlt_works_ok; +#else + return 1; +#endif +} + #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern void cpu_detect(struct cpuinfo_x86 *c); -- cgit v1.2.3 From 377d698426b8c685fb6d48fe89694fe4ce3aa1f8 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:51 -0300 Subject: x86: unify smp_send_stop function definition is moved to common header. x86_64 version is now called native_smp_send_stop Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 5 +++++ include/asm-x86/smp_32.h | 5 ----- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 28cb1f8bb47..2ab8ed4e99e 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -25,6 +25,11 @@ struct smp_ops { #ifdef CONFIG_SMP extern struct smp_ops smp_ops; +static inline void smp_send_stop(void) +{ + smp_ops.smp_send_stop(); +} + static inline void smp_prepare_boot_cpu(void) { smp_ops.smp_prepare_boot_cpu(); diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index bc90a4ed323..41b58e0bc75 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,11 +39,6 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -static inline void smp_send_stop(void) -{ - smp_ops.smp_send_stop(); -} - #ifndef CONFIG_PARAVIRT #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) #endif -- cgit v1.2.3 From fe6762030ca3728d3e24b556676114a6a64a97be Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:56 -0300 Subject: x86: remove cpu_llc_id from processor.h it is already defined in smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 8bec23c1552..e49e5e69ebb 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -355,9 +355,7 @@ union i387_union { struct i387_soft_struct soft; }; -#ifdef CONFIG_X86_32 -DECLARE_PER_CPU(u8, cpu_llc_id); -#else +#ifdef CONFIG_X86_64 DECLARE_PER_CPU(struct orig_ist, orig_ist); #endif -- cgit v1.2.3 From 5382e89670399f9db8a58b3c6f850fa4a94f6cca Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:57 -0300 Subject: x86: adjust types in smpcommon_32.c so they can have the same type as x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_32.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 41b58e0bc75..29f61952ea8 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -21,13 +21,13 @@ extern cpumask_t cpu_callin_map; extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); -extern u8 __initdata x86_cpu_to_apicid_init[]; +extern u16 __initdata x86_cpu_to_apicid_init[]; extern void *x86_cpu_to_apicid_early_ptr; DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); -DECLARE_PER_CPU(u8, cpu_llc_id); -DECLARE_PER_CPU(u8, x86_cpu_to_apicid); +DECLARE_PER_CPU(u16, cpu_llc_id); +DECLARE_PER_CPU(u16, x86_cpu_to_apicid); #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); -- cgit v1.2.3 From a355352b97901d987f54ea7c7d7161eb51a3799c Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:58 -0300 Subject: x86: move equal types to common file move definitions that are now equal in type from smpboot_{32,64}.c to smpboot.c cpu_callin_map is put temporarily in smp_64.h (already exists in smp_32.h), and will soon be merged. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index e5bc1be7082..1ecf8134bdc 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -14,6 +14,7 @@ #include extern cpumask_t cpu_initialized; +extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -- cgit v1.2.3 From 1452207689b3c0dd2ffed40735289a3a4a8c0c7c Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:59 -0300 Subject: x86: make set_cpu_sibling_map nonstatic And move its extern definition to smp.h, the common header Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 2ab8ed4e99e..1b4481aeb5c 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -22,6 +22,9 @@ struct smp_ops { int wait); }; +/* Globals due to paravirt */ +extern void set_cpu_sibling_map(int cpu); + #ifdef CONFIG_SMP extern struct smp_ops smp_ops; diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 29f61952ea8..76247a947a5 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -35,9 +35,6 @@ extern void cpu_uninit(void); extern void remove_siblinginfo(int cpu); #endif -/* Globals due to paravirt */ -extern void set_cpu_sibling_map(int cpu); - #ifdef CONFIG_SMP #ifndef CONFIG_PARAVIRT #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) -- cgit v1.2.3 From 1dbb4726faebe9e64a1e9cf40e3b39fffa065a65 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:01 -0300 Subject: x86: move hotplug related extern definitions to smp.h definitions that are inside CONFIG_HOTPLUG_CPU in the arch-specific smp*.h files are moved to common header Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 1b4481aeb5c..c800b815d37 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -80,6 +80,12 @@ extern void prefill_possible_map(void); # include "smp_64.h" #endif +#ifdef CONFIG_HOTPLUG_CPU +extern void cpu_exit_clear(void); +extern void cpu_uninit(void); +extern void remove_siblinginfo(int cpu); +#endif + extern void smp_alloc_memory(void); extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 76247a947a5..0b251346887 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -29,12 +29,6 @@ DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(u16, x86_cpu_to_apicid); -#ifdef CONFIG_HOTPLUG_CPU -extern void cpu_exit_clear(void); -extern void cpu_uninit(void); -extern void remove_siblinginfo(int cpu); -#endif - #ifdef CONFIG_SMP #ifndef CONFIG_PARAVIRT #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) -- cgit v1.2.3 From 69c18c15d39c4622c6e2f97e5db4d8c9c43adaaa Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:07 -0300 Subject: x86: merge __cpu_disable and cpu_die They are now equal, and are moved to a common file Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 3 --- include/asm-x86/smp_64.h | 3 --- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index c800b815d37..27d9f659523 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -70,6 +70,9 @@ void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); + extern unsigned disabled_cpus; extern void prefill_possible_map(void); #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 0b251346887..4fec2feb6ac 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -34,9 +34,6 @@ DECLARE_PER_CPU(u16, x86_cpu_to_apicid); #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) #endif -extern int __cpu_disable(void); -extern void __cpu_die(unsigned int cpu); - /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 1ecf8134bdc..d554d7d5732 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -42,9 +42,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) #define SMP_TRAMPOLINE_BASE 0x6000 -extern int __cpu_disable(void); -extern void __cpu_die(unsigned int cpu); - #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- cgit v1.2.3 From 420688293927a590d092ec76ef97c2565ae21aff Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:09 -0300 Subject: x86: move trampoline arrays extern definition to smp.h In here, they can serve both architectures Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 27d9f659523..b2a1697e470 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -9,6 +9,12 @@ extern cpumask_t cpu_callout_map; extern int smp_num_siblings; extern unsigned int num_processors; +/* + * Trampoline 80x86 program as an array. + */ +extern const unsigned char trampoline_data []; +extern const unsigned char trampoline_end []; + struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); -- cgit v1.2.3 From 91718e8d13c23bfe0aa6fa6b730c5c33ee9771bf Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:12 -0300 Subject: x86: unify setup_trampoline setup_trampoline() looks very similar between architectures, and this patch unifies them. The i386 version allocates bootmem memory, while the x86_64 version uses a fixed address. In this patch, we initialize the global trampoline_base to the x86_64 version, and i386 allocation can later override it. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 4 ++++ include/asm-x86/smp_64.h | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index b2a1697e470..513c8571a4a 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -14,6 +14,7 @@ extern unsigned int num_processors; */ extern const unsigned char trampoline_data []; extern const unsigned char trampoline_end []; +extern unsigned char *trampoline_base; struct smp_ops { void (*smp_prepare_boot_cpu)(void); @@ -81,6 +82,9 @@ extern void __cpu_die(unsigned int cpu); extern unsigned disabled_cpus; extern void prefill_possible_map(void); + +#define SMP_TRAMPOLINE_BASE 0x6000 +extern unsigned long setup_trampoline(void); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index d554d7d5732..394c7852433 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -40,8 +40,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) #ifdef CONFIG_SMP -#define SMP_TRAMPOLINE_BASE 0x6000 - #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- cgit v1.2.3 From ca9cda2f7b53da619fabde4c0c1bd5f61039bd5b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Mar 2008 15:15:42 +0100 Subject: x86: add comments to processor.h add comments to the FPU structures of processor.h. Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 51 +++++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index e49e5e69ebb..1f9501a3849 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -289,42 +289,47 @@ struct orig_ist { #define MXCSR_DEFAULT 0x1f80 struct i387_fsave_struct { - u32 cwd; - u32 swd; - u32 twd; - u32 fip; - u32 fcs; - u32 foo; - u32 fos; - /* 8*10 bytes for each FP-reg = 80 bytes: */ + u32 cwd; /* FPU Control Word */ + u32 swd; /* FPU Status Word */ + u32 twd; /* FPU Tag Word */ + u32 fip; /* FPU IP Offset */ + u32 fcs; /* FPU IP Selector */ + u32 foo; /* FPU Operand Pointer Offset */ + u32 fos; /* FPU Operand Pointer Selector */ + + /* 8*10 bytes for each FP-reg = 80 bytes: */ u32 st_space[20]; - /* Software status information: */ + + /* Software status information [not touched by FSAVE ]: */ u32 status; }; struct i387_fxsave_struct { - u16 cwd; - u16 swd; - u16 twd; - u16 fop; + u16 cwd; /* Control Word */ + u16 swd; /* Status Word */ + u16 twd; /* Tag Word */ + u16 fop; /* Last Instruction Opcode */ union { struct { - u64 rip; - u64 rdp; + u64 rip; /* Instruction Pointer */ + u64 rdp; /* Data Pointer */ }; struct { - u32 fip; - u32 fcs; - u32 foo; - u32 fos; + u32 fip; /* FPU IP Offset */ + u32 fcs; /* FPU IP Selector */ + u32 foo; /* FPU Operand Offset */ + u32 fos; /* FPU Operand Selector */ }; }; - u32 mxcsr; - u32 mxcsr_mask; - /* 8*16 bytes for each FP-reg = 128 bytes: */ + u32 mxcsr; /* MXCSR Register State */ + u32 mxcsr_mask; /* MXCSR Mask */ + + /* 8*16 bytes for each FP-reg = 128 bytes: */ u32 st_space[32]; - /* 16*16 bytes for each XMM-reg = 256 bytes: */ + + /* 16*16 bytes for each XMM-reg = 256 bytes: */ u32 xmm_space[64]; + u32 padding[24]; } __attribute__((aligned(16))); -- cgit v1.2.3 From 16281a998d7340a5bee4078f6f9a26c47208eb86 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 4 Mar 2008 16:46:27 -0800 Subject: x86: include/asm-x86/mutex_32.h - use angle brackets for include Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mutex_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index bbeefb96ddf..9a6b3da2591 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h @@ -9,7 +9,7 @@ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H -#include "asm/alternative.h" +#include /** * __mutex_fastpath_lock - try to take the lock by moving the count -- cgit v1.2.3 From eee6dd15723639f9270e4c561a0c82e8e18bd587 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 Mar 2008 10:39:07 +0100 Subject: x86: move extern declaration to vdso.h Before: total: 0 errors, 3 warnings, 685 lines checked After: total: 0 errors, 1 warnings, 678 lines checked No code changed: arch/x86/kernel/signal_32.o: text data bss dec hex filename 5333 0 4 5337 14d9 signal_32.o.before 5333 0 4 5337 14d9 signal_32.o.after md5: c279e98012a2808e90cfa2a7787e42a4 signal_32.o.before.asm c279e98012a2808e90cfa2a7787e42a4 signal_32.o.after.asm Signed-off-by: Ingo Molnar --- include/asm-x86/vdso.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h index 629bcb6e8e4..9bb86899abf 100644 --- a/include/asm-x86/vdso.h +++ b/include/asm-x86/vdso.h @@ -25,4 +25,11 @@ extern const char VDSO32_PRELINK[]; (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); }) #endif +/* + * These symbols are defined with the addresses in the vsyscall page. + * See vsyscall-sigreturn.S. + */ +extern void __user __kernel_sigreturn; +extern void __user __kernel_rt_sigreturn; + #endif /* asm-x86/vdso.h */ -- cgit v1.2.3 From e587cadd8f47e202a30712e2906a65a0606d5865 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 6 Mar 2008 08:48:49 -0500 Subject: x86: enhance DEBUG_RODATA support - alternatives Fix a memcpy that should be a text_poke (in apply_alternatives). Use kernel_wp_save/kernel_wp_restore in text_poke to support DEBUG_RODATA correctly and so the CPU HOTPLUG special case can be removed. Add text_poke_early, for alternatives and paravirt boot-time and module load time patching. Changelog: - Fix text_set and text_poke alignment check (mixed up bitwise and and or) - Remove text_set - Export add_nops, so it can be used by others. - Document text_poke_early. - Remove clflush, since it breaks some VIA architectures and is not strictly necessary. - Add kerneldoc to text_poke and text_poke_early. - Create a second vmap instead of using the WP bit to support Xen and VMI. - Move local_irq disable within text_poke and text_poke_early to be able to be sleepable in these functions. Signed-off-by: Mathieu Desnoyers CC: Andi Kleen CC: pageexec@freemail.hu CC: H. Peter Anvin CC: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- include/asm-x86/alternative.h | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index d8bacf3c4b0..d26416b5722 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h @@ -156,6 +156,27 @@ apply_paravirt(struct paravirt_patch_site *start, #define __parainstructions_end NULL #endif -extern void text_poke(void *addr, unsigned char *opcode, int len); +extern void add_nops(void *insns, unsigned int len); + +/* + * Clear and restore the kernel write-protection flag on the local CPU. + * Allows the kernel to edit read-only pages. + * Side-effect: any interrupt handler running between save and restore will have + * the ability to write to read-only pages. + * + * Warning: + * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and + * no thread can be preempted in the instructions being modified (no iret to an + * invalid instruction possible) or if the instructions are changed from a + * consistent state to another consistent state atomically. + * More care must be taken when modifying code in the SMP case because of + * Intel's errata. + * On the local CPU you need to be protected again NMI or MCE handlers seeing an + * inconsistent instruction while you patch. + * The _early version expects the memory to already be RW. + */ + +extern void *text_poke(void *addr, const void *opcode, size_t len); +extern void *text_poke_early(void *addr, const void *opcode, size_t len); #endif /* _ASM_X86_ALTERNATIVE_H */ -- cgit v1.2.3 From 459cce726730ca0ac93701e53aa1d0d055ce9e90 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 Mar 2008 18:38:52 +0100 Subject: x86: remove mach_reboot.h all reboot details are handled in reboot.c and quirks are handled via reboot_fixups_32.c. Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/mach_reboot.h | 61 ------------------------------ 1 file changed, 61 deletions(-) delete mode 100644 include/asm-x86/mach-default/mach_reboot.h (limited to 'include') diff --git a/include/asm-x86/mach-default/mach_reboot.h b/include/asm-x86/mach-default/mach_reboot.h deleted file mode 100644 index 6adee6a97de..00000000000 --- a/include/asm-x86/mach-default/mach_reboot.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * arch/i386/mach-generic/mach_reboot.h - * - * Machine specific reboot functions for generic. - * Split out from reboot.c by Osamu Tomita - */ -#ifndef _MACH_REBOOT_H -#define _MACH_REBOOT_H - -static inline void kb_wait(void) -{ - int i; - - for (i = 0; i < 0x10000; i++) - if ((inb_p(0x64) & 0x02) == 0) - break; -} - -static inline void mach_reboot(void) -{ - int i; - - /* old method, works on most machines */ - for (i = 0; i < 10; i++) { - kb_wait(); - udelay(50); - outb(0xfe, 0x64); /* pulse reset low */ - udelay(50); - } - - /* New method: sets the "System flag" which, when set, indicates - * successful completion of the keyboard controller self-test (Basic - * Assurance Test, BAT). This is needed for some machines with no - * keyboard plugged in. This read-modify-write sequence sets only the - * system flag - */ - for (i = 0; i < 10; i++) { - int cmd; - - outb(0x20, 0x64); /* read Controller Command Byte */ - udelay(50); - kb_wait(); - udelay(50); - cmd = inb(0x60); - udelay(50); - kb_wait(); - udelay(50); - outb(0x60, 0x64); /* write Controller Command Byte */ - udelay(50); - kb_wait(); - udelay(50); - outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */ - udelay(50); - kb_wait(); - udelay(50); - outb(0xfe, 0x64); /* pulse reset low */ - udelay(50); - } -} - -#endif /* !_MACH_REBOOT_H */ -- cgit v1.2.3 From 01aaea1afbcdb7c49fe4a567ebe3e295db9f720d Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 6 Mar 2008 13:46:39 -0800 Subject: x86: introduce initial apicid store initial_apicid from early identify. it is could be different from phys_proc_id later. also print it out in /proc/cpuinfo. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 1f9501a3849..d590da88c0d 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -101,6 +101,7 @@ struct cpuinfo_x86 { /* cpuid returned max cores value: */ u16 x86_max_cores; u16 apicid; + u16 initial_apicid; u16 x86_clflush_size; #ifdef CONFIG_SMP /* number of cores as seen by the OS: */ -- cgit v1.2.3 From e40c0fe6b0b5dd16aec3c0dad311d36b19d78fd9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 9 Mar 2008 12:35:00 -0700 Subject: x86: cleanup duplicate includes Signed-off-by: Joe Perches arch/x86/kernel/reboot.c | 1 - include/asm-x86/elf.h | 5 ++--- include/asm-x86/posix_types.h | 8 +------- include/asm-x86/processor.h | 3 +-- include/asm-x86/unistd.h | 8 +------- 5 files changed, 5 insertions(+), 20 deletions(-) Signed-off-by: Ingo Molnar --- include/asm-x86/elf.h | 5 ++--- include/asm-x86/posix_types.h | 8 +------- include/asm-x86/processor.h | 3 +-- include/asm-x86/unistd.h | 8 +------- 4 files changed, 5 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index fb62f9941e3..77325646e2f 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h @@ -82,8 +82,9 @@ extern unsigned int vdso_enabled; #define elf_check_arch_ia32(x) \ (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) -#ifdef CONFIG_X86_32 #include + +#ifdef CONFIG_X86_32 #include /* for savesegment */ #include @@ -135,8 +136,6 @@ extern unsigned int vdso_enabled; #else /* CONFIG_X86_32 */ -#include - /* * This is used to ensure we don't load something for the wrong architecture. */ diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h index bb7133dc155..fe312a5ba20 100644 --- a/include/asm-x86/posix_types.h +++ b/include/asm-x86/posix_types.h @@ -1,11 +1,5 @@ #ifdef __KERNEL__ -# ifdef CONFIG_X86_32 -# include "posix_types_32.h" -# else -# include "posix_types_64.h" -# endif -#else -# ifdef __i386__ +# if defined(CONFIG_X86_32) || defined(__i386__) # include "posix_types_32.h" # else # include "posix_types_64.h" diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index d590da88c0d..cc0268395ea 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -3,8 +3,7 @@ #include -/* migration helpers, for KVM - will be removed in 2.6.25: */ -#include +/* migration helper, for KVM - will be removed in 2.6.25: */ #define Xgt_desc_struct desc_ptr /* Forward declaration, a strange C thing */ diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h index 2a58ed3e51d..effc7ad8e12 100644 --- a/include/asm-x86/unistd.h +++ b/include/asm-x86/unistd.h @@ -1,11 +1,5 @@ #ifdef __KERNEL__ -# ifdef CONFIG_X86_32 -# include "unistd_32.h" -# else -# include "unistd_64.h" -# endif -#else -# ifdef __i386__ +# if defined(CONFIG_X86_32) || defined(__i386__) # include "unistd_32.h" # else # include "unistd_64.h" -- cgit v1.2.3 From 86975101e46ec93be972d8f46715aa6273102545 Mon Sep 17 00:00:00 2001 From: stephane eranian Date: Fri, 7 Mar 2008 13:05:27 -0800 Subject: x86: add cpu_has_arch_perfmon adds cpu_has_arch_perfmon to test presence of architectural perfmon on Intel x86 processor Signed-off-by: Stephane Eranian Signed-off-by: Ingo Molnar --- include/asm-x86/cpufeature.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 1e3102eeb82..90feb6f2562 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -185,6 +185,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) +#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 -- cgit v1.2.3 From 12db648c1518b2627cc983199a97ec6f5d6a1de2 Mon Sep 17 00:00:00 2001 From: stephane eranian Date: Fri, 7 Mar 2008 13:05:39 -0800 Subject: x86: add AMD Northbridge MSR definition adds AMD Northbridge config MSR definition Signed-off-by: Stephane Eranian Signed-off-by: Robert Richter Signed-off-by: Ingo Molnar --- include/asm-x86/msr-index.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index fae118a2527..3ed97144c07 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h @@ -83,6 +83,7 @@ /* AMD64 MSRs. Not complete. See the architecture manual for a more complete list. */ +#define MSR_AMD64_NB_CFG 0xc001001f #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 -- cgit v1.2.3 From 5b0e508415a83989fe704b4718a1a214bc333ca7 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 10 Mar 2008 13:11:17 +0000 Subject: x86: prevent unconditional writes to DebugCtl MSR Otherwise, enabling (or better, subsequent disabling) of single stepping would cause a kernel oops on CPUs not having this MSR. The patch could have been added a conditional to the MSR write in user_disable_single_step(), but centralizing the updates seems safer and (looking forward) better manageable. Signed-off-by: Jan Beulich Cc: Markus Metzger Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index cc0268395ea..40227c9bf51 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -741,6 +741,15 @@ extern void switch_to_new_gdt(void); extern void cpu_init(void); extern void init_gdt(int cpu); +static inline void update_debugctlmsr(unsigned long debugctlmsr) +{ +#ifndef CONFIG_X86_DEBUGCTLMSR + if (boot_cpu_data.x86 < 6) + return; +#endif + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); +} + /* * from system description table in BIOS. Mostly for MCA use, but * others may find it useful: -- cgit v1.2.3 From 5d570cbbf25a62e9c077f5b351fb142dbfc67288 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Tue, 11 Mar 2008 16:43:31 +0100 Subject: x86: correct/clarify comment in nops.h describes certain multibyte instructions as "generic" nops when in fact they aren't nops at all in 64-bit mode (missing REX.W causing truncation of a register). Update the comment to state that K8 or P6 style nops should be used in 64-bit mode. This matches what the alternatives code does. Signed-off-by: Mikael Pettersson Signed-off-by: Ingo Molnar --- include/asm-x86/nops.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h index b3930ae539b..ad0bedd10b8 100644 --- a/include/asm-x86/nops.h +++ b/include/asm-x86/nops.h @@ -5,6 +5,8 @@ /* generic versions from gas 1: nop + the following instructions are NOT nops in 64-bit mode, + for 64-bit mode use K8 or P6 nops instead 2: movl %esi,%esi 3: leal 0x00(%esi),%esi 4: leal 0x00(,%esi,1),%esi -- cgit v1.2.3 From 6079d2d5d11122eb52721f0f3c828952a490e6c1 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Tue, 11 Mar 2008 19:45:48 +0300 Subject: x86: move quad_local_to_mp_bus_id to numa.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/mach-numaq/mach_mpparse.h | 2 ++ include/asm-x86/mpspec.h | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 51bbac8fc0c..cd21f289e6b 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -12,6 +12,8 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); } +extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; + static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, struct mpc_config_translation *translation) { diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 781ad74ab9e..dbd63f8d475 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -9,7 +9,6 @@ extern int mp_bus_id_to_type[MAX_MP_BUSSES]; extern int mp_bus_id_to_node[MAX_MP_BUSSES]; extern int mp_bus_id_to_local[MAX_MP_BUSSES]; -extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; extern unsigned int def_to_bigsmp; extern int apic_version[MAX_APICS]; -- cgit v1.2.3 From 8643f9d02a7bb9db74634b4c062d8e70ce7c59b9 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 19 Feb 2008 03:21:06 -0800 Subject: x86: get boot_cpu_id as early for k8_scan_nodes When acpi=off or there is no SRAT defined, apicid_to_node is got from K8 Northbridge PCI configuration space in k8_scan_nodes() in arch/x86_64/mm/k8toplogy.c. The problem is that it assumes bsp apic id is 0 at that point. For four socket system with Quad core cpus installed, all cpus apic id is offset by 4, and bsp apic id is 4. For eight socket system with dual core cpus installed, all cpus apic id is offset by 2, and bsp apic id is 2. We need get boot_cpu_id --- bsp apic id, before k8_scan_nodes by called. So create early_acpi_boot_init and early_get_smp_config for get boot_cpu_id. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- include/asm-x86/apic.h | 1 + include/asm-x86/mpspec.h | 3 +++ 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index f0321a427e1..db5f7501aed 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -129,6 +129,7 @@ extern void enable_NMI_through_LVT0(void); */ #ifdef CONFIG_X86_64 extern void setup_apic_routing(void); +extern void early_init_lapic_mapping(void); #endif extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index dbd63f8d475..982550bef2c 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -25,6 +25,9 @@ extern int pic_mode; extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); +extern void early_find_smp_config(void); +extern void early_get_smp_config(void); + #endif extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; -- cgit v1.2.3 From a6333c3ccbdc0ae001cff6ee1d3633942ef763f4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:09 +0300 Subject: x86: add mp_bus_not_pci bitmap to mpparse_32.c Signed-off: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 982550bef2c..75df88e0a27 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -23,13 +23,12 @@ extern int pic_mode; /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) -extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); - extern void early_find_smp_config(void); extern void early_get_smp_config(void); #endif +extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; -- cgit v1.2.3 From c0a282c251181aa423d4831719613b8286b5b839 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:55:02 +0300 Subject: x86: make mp_bus_id_to_type optional [ mingo@elte.hu: fix boot regression. ] Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 75df88e0a27..5fa8b8bb7d0 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -6,7 +6,6 @@ #ifdef CONFIG_X86_32 #include -extern int mp_bus_id_to_type[MAX_MP_BUSSES]; extern int mp_bus_id_to_node[MAX_MP_BUSSES]; extern int mp_bus_id_to_local[MAX_MP_BUSSES]; @@ -28,7 +27,12 @@ extern void early_get_smp_config(void); #endif +#if defined(CONFIG_MCA) || defined(CONFIG_EISA) +extern int mp_bus_id_to_type[MAX_MP_BUSSES]; +#endif + extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); + extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; -- cgit v1.2.3 From e129cb490e842753b43af7aae136935fc0928dc8 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Tue, 11 Mar 2008 22:55:42 +0300 Subject: x86: move mp_bus_id_to_local to numa.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/mach-numaq/mach_mpparse.h | 2 ++ include/asm-x86/mpspec.h | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index cd21f289e6b..0917b95c42a 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -1,6 +1,8 @@ #ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H +extern int mp_bus_id_to_local[MAX_MP_BUSSES]; + static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, struct mpc_config_translation *translation) { diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 5fa8b8bb7d0..1c52bd8711f 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -7,7 +7,6 @@ #include extern int mp_bus_id_to_node[MAX_MP_BUSSES]; -extern int mp_bus_id_to_local[MAX_MP_BUSSES]; extern unsigned int def_to_bigsmp; extern int apic_version[MAX_APICS]; -- cgit v1.2.3 From 037cab07e9515149fecc2274775807f06ea6b036 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Tue, 11 Mar 2008 22:55:48 +0300 Subject: x86: move mp_bus_id_to_node to numa.c Signed-off-by: Ingo Molnar --- include/asm-x86/mach-numaq/mach_mpparse.h | 1 + include/asm-x86/mpspec.h | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 0917b95c42a..254993ae04c 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -2,6 +2,7 @@ #define __ASM_MACH_MPPARSE_H extern int mp_bus_id_to_local[MAX_MP_BUSSES]; +extern int mp_bus_id_to_node[MAX_MP_BUSSES]; static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, struct mpc_config_translation *translation) diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 1c52bd8711f..99da0a59b43 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -6,8 +6,6 @@ #ifdef CONFIG_X86_32 #include -extern int mp_bus_id_to_node[MAX_MP_BUSSES]; - extern unsigned int def_to_bigsmp; extern int apic_version[MAX_APICS]; extern u8 apicid_2_node[]; -- cgit v1.2.3 From d0173aeac4f7fa90a63319b817bd207bdc0ac87e Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:24:59 -0300 Subject: x86: use start_ipi_hook in x86_64 It is used to match i386. The definition for the non-paravirt case is moved to smp.h instead of smp_32.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 4 ---- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 513c8571a4a..4dc271b4376 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -33,6 +33,9 @@ struct smp_ops { extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP +#ifndef CONFIG_PARAVIRT +#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) +#endif extern struct smp_ops smp_ops; static inline void smp_send_stop(void) diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 4fec2feb6ac..76740def609 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -30,10 +30,6 @@ DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(u16, x86_cpu_to_apicid); #ifdef CONFIG_SMP -#ifndef CONFIG_PARAVIRT -#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) -#endif - /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), -- cgit v1.2.3 From 1d89a7f072d4f76f0538edfb474d527066ee7838 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:05 -0300 Subject: x86: merge smp_store_cpu_info now that it is the same between arches, put it into smpboot.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 2 ++ include/asm-x86/smp_32.h | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 4dc271b4376..b4c5143d7f8 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -88,6 +88,8 @@ extern void prefill_possible_map(void); #define SMP_TRAMPOLINE_BASE 0x6000 extern unsigned long setup_trampoline(void); + +void smp_store_cpu_info(int id); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 76740def609..51624abda43 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -42,8 +42,6 @@ DECLARE_PER_CPU(int, cpu_number); extern int safe_smp_processor_id(void); -void __cpuinit smp_store_cpu_info(int id); - /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { -- cgit v1.2.3 From ac56ef61a1f65aaf1cb31dca2a407322c5f0a4dd Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:10 -0300 Subject: x86: provide APIC_INTEGRATED definition for x86_64 it is always integrated, so define as 1. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/apicdef.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 550af7a6f88..b0c6b285c91 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -22,7 +22,11 @@ #define APIC_LVR_MASK 0xFF00FF #define GET_APIC_VERSION(x) ((x)&0xFFu) #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) -#define APIC_INTEGRATED(x) ((x)&0xF0u) +#ifdef CONFIG_X86_32 +# define APIC_INTEGRATED(x) ((x)&0xF0u) +#else +# define APIC_INTEGRATED(x) (1) +#endif #define APIC_XAPIC(x) ((x) >= 0x14) #define APIC_TASKPRI 0x80 #define APIC_TPRI_MASK 0xFFu -- cgit v1.2.3 From 7e1efc0cde2a266fc31932ea7aed4bb20f524544 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:18 -0300 Subject: x86: unify extern masks declaration take them off smp_{32,64}.h and move to smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 12 ++++++++++++ include/asm-x86/smp_32.h | 8 -------- include/asm-x86/smp_64.h | 11 ----------- 3 files changed, 12 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index b4c5143d7f8..d02e6eacee3 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -3,12 +3,24 @@ #ifndef __ASSEMBLY__ #include #include +#include extern cpumask_t cpu_callout_map; extern int smp_num_siblings; extern unsigned int num_processors; +extern u16 x86_cpu_to_apicid_init[]; +extern u16 x86_bios_cpu_apicid_init[]; +extern void *x86_cpu_to_apicid_early_ptr; +extern void *x86_bios_cpu_apicid_early_ptr; + +DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); +DECLARE_PER_CPU(cpumask_t, cpu_core_map); +DECLARE_PER_CPU(u16, cpu_llc_id); +DECLARE_PER_CPU(u16, x86_cpu_to_apicid); +DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); + /* * Trampoline 80x86 program as an array. */ diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 51624abda43..478f5564630 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -21,14 +21,6 @@ extern cpumask_t cpu_callin_map; extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); -extern u16 __initdata x86_cpu_to_apicid_init[]; -extern void *x86_cpu_to_apicid_early_ptr; - -DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -DECLARE_PER_CPU(cpumask_t, cpu_core_map); -DECLARE_PER_CPU(u16, cpu_llc_id); -DECLARE_PER_CPU(u16, x86_cpu_to_apicid); - #ifdef CONFIG_SMP /* * This function is needed by all SMP systems. It must _always_ be valid diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 394c7852433..1b3c0f1de9a 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -19,17 +19,6 @@ extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -extern u16 __initdata x86_cpu_to_apicid_init[]; -extern u16 __initdata x86_bios_cpu_apicid_init[]; -extern void *x86_cpu_to_apicid_early_ptr; -extern void *x86_bios_cpu_apicid_early_ptr; - -DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -DECLARE_PER_CPU(cpumask_t, cpu_core_map); -DECLARE_PER_CPU(u16, cpu_llc_id); -DECLARE_PER_CPU(u16, x86_cpu_to_apicid); -DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); - static inline int cpu_present_to_apicid(int mps_cpu) { if (cpu_present(mps_cpu)) -- cgit v1.2.3 From cbe879fc6c77b5751a91167654b75a39421d0f3f Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:19 -0300 Subject: x86: define bios to apicid mapping This mapping already exists in x86_64, just provide it for i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/mach-bigsmp/mach_apic.h | 7 ++----- include/asm-x86/mach-es7000/mach_apic.h | 8 +++----- include/asm-x86/mach-summit/mach_apic.h | 3 +-- 3 files changed, 6 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index 6df235e8ea9..0d55b1f6d56 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h @@ -1,10 +1,7 @@ #ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H - -extern u8 bios_cpu_apicid[]; - -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) +#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) #define esr_disable (1) static inline int apic_id_registered(void) @@ -90,7 +87,7 @@ static inline int apicid_to_node(int logical_apicid) static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) - return (int) bios_cpu_apicid[mps_cpu]; + return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return BAD_APICID; } diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index d23011fdf45..04cba9f1e37 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h @@ -1,9 +1,7 @@ #ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H -extern u8 bios_cpu_apicid[]; - -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) +#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) #define esr_disable (1) static inline int apic_id_registered(void) @@ -80,7 +78,7 @@ extern void enable_apic_mode(void); extern int apic_version [MAX_APICS]; static inline void setup_apic_routing(void) { - int apic = bios_cpu_apicid[smp_processor_id()]; + int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); @@ -102,7 +100,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) if (!mps_cpu) return boot_cpu_physical_apicid; else if (mps_cpu < NR_CPUS) - return (int) bios_cpu_apicid[mps_cpu]; + return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; } diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 062c97f6100..91d7641cddc 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h @@ -40,7 +40,6 @@ static inline unsigned long check_apicid_present(int bit) #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) -extern u8 bios_cpu_apicid[]; extern u8 cpu_2_logical_apicid[]; static inline void init_apic_ldr(void) @@ -110,7 +109,7 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; } -- cgit v1.2.3 From 04d1dd20f64f2b41baf5c01f57c574ca942ab4eb Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:21 -0300 Subject: x86: make node to apic mapping declarations unconditional Instead of declaring them inside of X86_64 ifdef, do it unconditionally Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/topology.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 8af05a93f09..dada89e5b15 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -32,15 +32,15 @@ /* Mappings between logical cpu number and node number */ #ifdef CONFIG_X86_32 extern int cpu_to_node_map[]; - #else -DECLARE_PER_CPU(int, x86_cpu_to_node_map); -extern int x86_cpu_to_node_map_init[]; -extern void *x86_cpu_to_node_map_early_ptr; /* Returns the number of the current Node. */ #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) #endif +DECLARE_PER_CPU(int, x86_cpu_to_node_map); +extern int x86_cpu_to_node_map_init[]; +extern void *x86_cpu_to_node_map_early_ptr; + extern cpumask_t node_to_cpumask_map[]; #define NUMA_NO_NODE (-1) -- cgit v1.2.3 From fbac7fcbadc54cc5d374873a2e60e924a056d198 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:22 -0300 Subject: x86: fix alloc_bootmem_pages_node macro missing a semicolon Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/mmzone_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index 274a59566c4..b9f5be2f603 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h @@ -129,7 +129,7 @@ static inline int pfn_valid(int pfn) struct pglist_data __maybe_unused \ *__alloc_bootmem_node__pgdat = (pgdat); \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ - __pa(MAX_DMA_ADDRESS)) \ + __pa(MAX_DMA_ADDRESS)); \ }) #define alloc_bootmem_low_pages_node(pgdat, x) \ ({ \ -- cgit v1.2.3 From e32ede19ac64b5cd896e6d28aa51d34887791ab2 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:35 -0300 Subject: x86: wipe get_nmi_reason out of nmi_64.h use mach_traps when it is supposed to be used. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/nmi_64.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h index 2eeb74e5f3f..94a5b19e362 100644 --- a/include/asm-x86/nmi_64.h +++ b/include/asm-x86/nmi_64.h @@ -36,8 +36,6 @@ static inline void unset_nmi_pm_callback(struct pm_dev * dev) extern void default_do_nmi(struct pt_regs *); extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); -#define get_nmi_reason() inb(0x61) - extern int unknown_nmi_panic; extern int nmi_watchdog_enabled; -- cgit v1.2.3 From 6d60cd5359e261cad1e519e77ca733c05c2f8025 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:36 -0300 Subject: x86: unify nmi_32.h and nmi_64.h Two more files goes away. nmi_64.h and nmi_32.h gives birth to nmi.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/nmi.h | 92 ++++++++++++++++++++++++++++++++++++++++++++++-- include/asm-x86/nmi_32.h | 61 -------------------------------- include/asm-x86/nmi_64.h | 88 --------------------------------------------- 3 files changed, 89 insertions(+), 152 deletions(-) delete mode 100644 include/asm-x86/nmi_32.h delete mode 100644 include/asm-x86/nmi_64.h (limited to 'include') diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index 53ccac14cea..2b941990347 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h @@ -1,5 +1,91 @@ -#ifdef CONFIG_X86_32 -# include "nmi_32.h" +#ifndef _ASM_X86_NMI_H_ +#define _ASM_X86_NMI_H_ + +#include +#include +#include + +#ifdef ARCH_HAS_NMI_WATCHDOG + +/** + * do_nmi_callback + * + * Check to see if a callback exists and execute it. Return 1 + * if the handler exists and was handled successfully. + */ +int do_nmi_callback(struct pt_regs *regs, int cpu); + +#ifdef CONFIG_PM + +/** Replace the PM callback routine for NMI. */ +struct pm_dev *set_nmi_pm_callback(pm_callback callback); + +/** Unset the PM callback routine back to the default. */ +void unset_nmi_pm_callback(struct pm_dev *dev); + #else -# include "nmi_64.h" + +static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback) +{ + return 0; +} + +static inline void unset_nmi_pm_callback(struct pm_dev *dev) +{ +} + +#endif /* CONFIG_PM */ + +#ifdef CONFIG_X86_64 +extern void default_do_nmi(struct pt_regs *); +extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); +#endif + +extern int check_nmi_watchdog(void); +extern int nmi_watchdog_enabled; +extern int unknown_nmi_panic; +extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); +extern int avail_to_resrv_perfctr_nmi(unsigned int); +extern int reserve_perfctr_nmi(unsigned int); +extern void release_perfctr_nmi(unsigned int); +extern int reserve_evntsel_nmi(unsigned int); +extern void release_evntsel_nmi(unsigned int); +extern void nmi_watchdog_default(void); + +extern void setup_apic_nmi_watchdog(void *); +extern void stop_apic_nmi_watchdog(void *); +extern void disable_timer_nmi_watchdog(void); +extern void enable_timer_nmi_watchdog(void); +extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); + +extern atomic_t nmi_active; +extern unsigned int nmi_watchdog; +#define NMI_DISABLED -1 +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 +#define NMI_INVALID 3 +#define NMI_DEFAULT NMI_DISABLED + +struct ctl_table; +struct file; +extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, + void __user *, size_t *, loff_t *); +extern int unknown_nmi_panic; + +void __trigger_all_cpu_backtrace(void); +#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() + +#endif + +void lapic_watchdog_stop(void); +int lapic_watchdog_init(unsigned nmi_hz); +int lapic_wd_event(unsigned nmi_hz); +unsigned lapic_adjust_nmi_hz(unsigned hz); +int lapic_watchdog_ok(void); +void disable_lapic_nmi_watchdog(void); +void enable_lapic_nmi_watchdog(void); +void stop_nmi(void); +void restart_nmi(void); + #endif diff --git a/include/asm-x86/nmi_32.h b/include/asm-x86/nmi_32.h deleted file mode 100644 index 7206c7e8a38..00000000000 --- a/include/asm-x86/nmi_32.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef ASM_NMI_H -#define ASM_NMI_H - -#include -#include - -#ifdef ARCH_HAS_NMI_WATCHDOG - -/** - * do_nmi_callback - * - * Check to see if a callback exists and execute it. Return 1 - * if the handler exists and was handled successfully. - */ -int do_nmi_callback(struct pt_regs *regs, int cpu); - -extern int nmi_watchdog_enabled; -extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); -extern int avail_to_resrv_perfctr_nmi(unsigned int); -extern int reserve_perfctr_nmi(unsigned int); -extern void release_perfctr_nmi(unsigned int); -extern int reserve_evntsel_nmi(unsigned int); -extern void release_evntsel_nmi(unsigned int); - -extern void setup_apic_nmi_watchdog (void *); -extern void stop_apic_nmi_watchdog (void *); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); - -extern atomic_t nmi_active; -extern unsigned int nmi_watchdog; -#define NMI_DISABLED -1 -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 -#define NMI_DEFAULT NMI_DISABLED - -struct ctl_table; -struct file; -extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, - void __user *, size_t *, loff_t *); -extern int unknown_nmi_panic; - -void __trigger_all_cpu_backtrace(void); -#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() - -#endif - -void lapic_watchdog_stop(void); -int lapic_watchdog_init(unsigned nmi_hz); -int lapic_wd_event(unsigned nmi_hz); -unsigned lapic_adjust_nmi_hz(unsigned hz); -int lapic_watchdog_ok(void); -void disable_lapic_nmi_watchdog(void); -void enable_lapic_nmi_watchdog(void); -void stop_nmi(void); -void restart_nmi(void); - -#endif /* ASM_NMI_H */ diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h deleted file mode 100644 index 94a5b19e362..00000000000 --- a/include/asm-x86/nmi_64.h +++ /dev/null @@ -1,88 +0,0 @@ -#ifndef ASM_NMI_H -#define ASM_NMI_H - -#include -#include - -/** - * do_nmi_callback - * - * Check to see if a callback exists and execute it. Return 1 - * if the handler exists and was handled successfully. - */ -int do_nmi_callback(struct pt_regs *regs, int cpu); - -#ifdef CONFIG_PM - -/** Replace the PM callback routine for NMI. */ -struct pm_dev * set_nmi_pm_callback(pm_callback callback); - -/** Unset the PM callback routine back to the default. */ -void unset_nmi_pm_callback(struct pm_dev * dev); - -#else - -static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback) -{ - return 0; -} - -static inline void unset_nmi_pm_callback(struct pm_dev * dev) -{ -} - -#endif /* CONFIG_PM */ - -extern void default_do_nmi(struct pt_regs *); -extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); - -extern int unknown_nmi_panic; -extern int nmi_watchdog_enabled; - -extern int check_nmi_watchdog(void); -extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); -extern int avail_to_resrv_perfctr_nmi(unsigned int); -extern int reserve_perfctr_nmi(unsigned int); -extern void release_perfctr_nmi(unsigned int); -extern int reserve_evntsel_nmi(unsigned int); -extern void release_evntsel_nmi(unsigned int); - -extern void setup_apic_nmi_watchdog (void *); -extern void stop_apic_nmi_watchdog (void *); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); - -extern void nmi_watchdog_default(void); - -extern atomic_t nmi_active; -extern unsigned int nmi_watchdog; -#define NMI_DISABLED -1 -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 -#define NMI_DEFAULT NMI_DISABLED - -struct ctl_table; -struct file; -extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, - void __user *, size_t *, loff_t *); - -extern int unknown_nmi_panic; - -void __trigger_all_cpu_backtrace(void); -#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() - - -void lapic_watchdog_stop(void); -int lapic_watchdog_init(unsigned nmi_hz); -int lapic_wd_event(unsigned nmi_hz); -unsigned lapic_adjust_nmi_hz(unsigned hz); -int lapic_watchdog_ok(void); -void disable_lapic_nmi_watchdog(void); -void enable_lapic_nmi_watchdog(void); -void stop_nmi(void); -void restart_nmi(void); - -#endif /* ASM_NMI_H */ -- cgit v1.2.3 From 50e440aa5323860d9e5960143b720e461ed0c582 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:38 -0300 Subject: x86: call nmi_watchdog_default in i386 this does not exist, so it will be an empty macro Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/nmi.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index 2b941990347..1e363021e72 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h @@ -39,6 +39,9 @@ static inline void unset_nmi_pm_callback(struct pm_dev *dev) #ifdef CONFIG_X86_64 extern void default_do_nmi(struct pt_regs *); extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); +extern void nmi_watchdog_default(void); +#else +#define nmi_watchdog_default() do {} while (0) #endif extern int check_nmi_watchdog(void); @@ -50,7 +53,6 @@ extern int reserve_perfctr_nmi(unsigned int); extern void release_perfctr_nmi(unsigned int); extern int reserve_evntsel_nmi(unsigned int); extern void release_evntsel_nmi(unsigned int); -extern void nmi_watchdog_default(void); extern void setup_apic_nmi_watchdog(void *); extern void stop_apic_nmi_watchdog(void *); -- cgit v1.2.3 From f6bc40290964b5fcb48c226ccafa4b7536d62663 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:53 -0300 Subject: x86: include mach_apic.h in smpboot_64.c and smpboot.c After the inclusion, a lot of files needs fixing for conflicts, some of them in the headers themselves, to accomodate for both i386 and x86_64 versions. [ mingo@elte.hu: build fix ] Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/apic.h | 1 - include/asm-x86/apicdef.h | 6 ------ include/asm-x86/mach-default/mach_apic.h | 11 +++++++++++ include/asm-x86/mach-default/mach_apicdef.h | 5 +++++ include/asm-x86/smp_64.h | 9 +-------- 5 files changed, 17 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index db5f7501aed..424e1dfe13c 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -128,7 +128,6 @@ extern void enable_NMI_through_LVT0(void); * On 32bit this is mach-xxx local */ #ifdef CONFIG_X86_64 -extern void setup_apic_routing(void); extern void early_init_lapic_mapping(void); #endif diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index b0c6b285c91..674a2280e21 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -12,12 +12,6 @@ #define APIC_ID 0x20 -#ifdef CONFIG_X86_64 -# define APIC_ID_MASK (0xFFu<<24) -# define GET_APIC_ID(x) (((x)>>24)&0xFFu) -# define SET_APIC_ID(x) (((x)<<24)) -#endif - #define APIC_LVR 0x30 #define APIC_LVR_MASK 0xFF00FF #define GET_APIC_VERSION(x) ((x)&0xFFu) diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index e3c2c1012c1..e081bdccde2 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -54,21 +54,27 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) return phys_map; } +#ifdef CONFIG_X86_64 +extern void setup_apic_routing(void); +#else static inline void setup_apic_routing(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Flat", nr_ioapics); } +#endif static inline int multi_timer_check(int apic, int irq) { return 0; } +#ifdef CONFIG_X86_32 static inline int apicid_to_node(int logical_apicid) { return 0; } +#endif /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) @@ -78,8 +84,13 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { +#ifdef CONFIG_X86_64 + if (cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); +#else if (mps_cpu < get_physical_broadcast()) return mps_cpu; +#endif else return BAD_APICID; } diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h index ae984131909..7b78275e6d3 100644 --- a/include/asm-x86/mach-default/mach_apicdef.h +++ b/include/asm-x86/mach-default/mach_apicdef.h @@ -3,7 +3,12 @@ #include +#ifdef CONFIG_X86_64 +#define APIC_ID_MASK (0xFFu<<24) +#define SET_APIC_ID(x) (((x)<<24)) +#else #define APIC_ID_MASK (0xF<<24) +#endif static inline unsigned get_apic_id(unsigned long x) { diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 1b3c0f1de9a..be870a4881f 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -19,14 +19,6 @@ extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (cpu_present(mps_cpu)) - return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - #ifdef CONFIG_SMP #define raw_smp_processor_id() read_pda(cpunumber) @@ -64,6 +56,7 @@ static __inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } +#include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ -- cgit v1.2.3 From 9d97d0da71ad6c7ceb76b4e29b02bed1ee9d4cd2 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:57 -0300 Subject: x86: move stack_start to smp.h voyager would conflict with it, but the types are ultimately compatible. So remove the extern definition from voyager_smp.c in favour of the common one Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index d02e6eacee3..78ef16dd194 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -28,6 +28,13 @@ extern const unsigned char trampoline_data []; extern const unsigned char trampoline_end []; extern unsigned char *trampoline_base; +/* Static state in head.S used to set up a CPU */ +extern struct { + void *sp; + unsigned short ss; +} stack_start; + + struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); -- cgit v1.2.3 From c70dcb74309cedfa64f0060f4a84792e873ceb53 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:58 -0300 Subject: x86: change boot_cpu_id to boot_cpu_physical_apicid This is to match i386. The former name was cuter, but the current is more meaningful and more general, since cpu_id can be a logical id. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/apic.h | 1 - include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 5 ----- include/asm-x86/smp_64.h | 4 ---- 4 files changed, 3 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 424e1dfe13c..b804238489a 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -44,7 +44,6 @@ extern int apic_runs_main_timer; extern int ioapic_force; extern int disable_apic; extern int disable_apic_timer; -extern unsigned boot_cpu_id; /* * Basic functions accessing APICs. diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 78ef16dd194..2ad2f4ffe49 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -109,6 +109,9 @@ extern void prefill_possible_map(void); extern unsigned long setup_trampoline(void); void smp_store_cpu_info(int id); +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +#else +#define cpu_physical_id(cpu) boot_cpu_physical_apicid #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 478f5564630..f861d041517 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -30,8 +30,6 @@ extern void zap_low_mappings (void); DECLARE_PER_CPU(int, cpu_number); #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) -#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) - extern int safe_smp_processor_id(void); /* We don't mark CPUs online until __cpu_up(), so we need another measure */ @@ -41,10 +39,7 @@ static inline int num_booting_cpus(void) } #else /* CONFIG_SMP */ - #define safe_smp_processor_id() 0 -#define cpu_physical_id(cpu) boot_cpu_physical_apicid - #endif /* !CONFIG_SMP */ #ifdef CONFIG_X86_LOCAL_APIC diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index be870a4881f..fd709cbba4d 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -22,7 +22,6 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), #ifdef CONFIG_SMP #define raw_smp_processor_id() read_pda(cpunumber) -#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define stack_smp_processor_id() \ ({ \ @@ -41,9 +40,6 @@ static inline int num_booting_cpus(void) } #else /* CONFIG_SMP */ - -extern unsigned int boot_cpu_id; -#define cpu_physical_id(cpu) boot_cpu_id #define stack_smp_processor_id() 0 #endif /* !CONFIG_SMP */ -- cgit v1.2.3 From cb3c8b9003f15efa4a750a32d2d602d40cc45d5a Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:59 -0300 Subject: x86: integrate do_boot_cpu This is a very large patch, because it depends on a lot of auxiliary static functions. But they all have been modified to the point that they're sufficiently close now. So they're just merged in smpboot.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 2ad2f4ffe49..ef26911dc22 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -9,6 +9,7 @@ extern cpumask_t cpu_callout_map; extern int smp_num_siblings; extern unsigned int num_processors; +extern cpumask_t cpu_initialized; extern u16 x86_cpu_to_apicid_init[]; extern u16 x86_bios_cpu_apicid_init[]; @@ -34,6 +35,8 @@ extern struct { unsigned short ss; } stack_start; +extern unsigned long init_rsp; +extern unsigned long initial_code; struct smp_ops { void (*smp_prepare_boot_cpu)(void); -- cgit v1.2.3 From 3fa7b3487a1317f7d3be3043dbea316ca75abed5 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:06 -0300 Subject: x86: assign nr_ioapics = 0 in smpboot_hooks.h change smpboot_setup_io_apic() by to match x86_64 behaviour Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/smpboot_hooks.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h index 7f45f631105..8e1c6c06d05 100644 --- a/include/asm-x86/mach-default/smpboot_hooks.h +++ b/include/asm-x86/mach-default/smpboot_hooks.h @@ -41,4 +41,6 @@ static inline void smpboot_setup_io_apic(void) */ if (!skip_ioapic_setup && nr_ioapics) setup_IO_APIC(); + else + nr_ioapics = 0; } -- cgit v1.2.3 From 9f3734f631267d2f36008833b62670ca342ac000 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:10 -0300 Subject: x86: introduce smpboot_clear_io_apic x86_64 has two nr_ioapics = 0 statements. In 32-bit, it can be done too. We do it through the smpboot_clear_io_apic() inline function, to cope with subarchitectures (visws) that does not compile mpparse in Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/smpboot_hooks.h | 5 +++++ include/asm-x86/mach-visws/smpboot_hooks.h | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'include') diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h index 8e1c6c06d05..3ff2c5bff93 100644 --- a/include/asm-x86/mach-default/smpboot_hooks.h +++ b/include/asm-x86/mach-default/smpboot_hooks.h @@ -44,3 +44,8 @@ static inline void smpboot_setup_io_apic(void) else nr_ioapics = 0; } + +static inline void smpboot_clear_io_apic(void) +{ + nr_ioapics = 0; +} diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h index d926471fa35..c9b83e395a2 100644 --- a/include/asm-x86/mach-visws/smpboot_hooks.h +++ b/include/asm-x86/mach-visws/smpboot_hooks.h @@ -22,3 +22,7 @@ static inline void smpboot_restore_warm_reset_vector(void) static inline void smpboot_setup_io_apic(void) { } + +static inline void smpboot_clear_io_apic(void) +{ +} -- cgit v1.2.3 From ce3fe6b2bfded4f5d931c5f2f9325dc2e3fd3a74 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:17 +0300 Subject: x86: use get_bios_ebda in mpparse_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/bios_ebda.h | 15 +++++++++++++++ include/asm-x86/mach-default/bios_ebda.h | 15 --------------- 2 files changed, 15 insertions(+), 15 deletions(-) create mode 100644 include/asm-x86/bios_ebda.h delete mode 100644 include/asm-x86/mach-default/bios_ebda.h (limited to 'include') diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h new file mode 100644 index 00000000000..9cbd9a668af --- /dev/null +++ b/include/asm-x86/bios_ebda.h @@ -0,0 +1,15 @@ +#ifndef _MACH_BIOS_EBDA_H +#define _MACH_BIOS_EBDA_H + +/* + * there is a real-mode segmented pointer pointing to the + * 4K EBDA area at 0x40E. + */ +static inline unsigned int get_bios_ebda(void) +{ + unsigned int address = *(unsigned short *)phys_to_virt(0x40E); + address <<= 4; + return address; /* 0 means none */ +} + +#endif /* _MACH_BIOS_EBDA_H */ diff --git a/include/asm-x86/mach-default/bios_ebda.h b/include/asm-x86/mach-default/bios_ebda.h deleted file mode 100644 index 9cbd9a668af..00000000000 --- a/include/asm-x86/mach-default/bios_ebda.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _MACH_BIOS_EBDA_H -#define _MACH_BIOS_EBDA_H - -/* - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E. - */ -static inline unsigned int get_bios_ebda(void) -{ - unsigned int address = *(unsigned short *)phys_to_virt(0x40E); - address <<= 4; - return address; /* 0 means none */ -} - -#endif /* _MACH_BIOS_EBDA_H */ -- cgit v1.2.3 From 4655c7deca112bea86ca00f616f19c3717f687aa Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:36 +0300 Subject: x86: remove mpc_apic_id() Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/genapic_32.h | 3 --- include/asm-x86/mach-bigsmp/mach_apic.h | 11 ----------- include/asm-x86/mach-default/mach_apic.h | 11 ----------- include/asm-x86/mach-es7000/mach_apic.h | 10 ---------- include/asm-x86/mach-generic/mach_apic.h | 1 - include/asm-x86/mach-summit/mach_apic.h | 11 ----------- 6 files changed, 47 deletions(-) (limited to 'include') diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 33e3ffe1766..5d024400ddd 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -42,8 +42,6 @@ struct genapic { int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - int (*mpc_apic_id)(struct mpc_config_processor *m, - struct mpc_config_translation *t); void (*setup_portio_remap)(void); int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); void (*enable_apic_mode)(void); @@ -105,7 +103,6 @@ struct genapic { APICFUNC(cpu_to_logical_apicid) \ APICFUNC(cpu_present_to_apicid) \ APICFUNC(apicid_to_cpu_present) \ - APICFUNC(mpc_apic_id) \ APICFUNC(setup_portio_remap) \ APICFUNC(check_phys_apicid_present) \ APICFUNC(mpc_oem_bus_info) \ diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index 0d55b1f6d56..8327907c79b 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h @@ -106,17 +106,6 @@ static inline int cpu_to_logical_apicid(int cpu) return cpu_physical_id(cpu); } -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return m->mpc_apicid; -} - static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index e081bdccde2..13900e8cc1a 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -100,17 +100,6 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) return physid_mask_of_physid(phys_apicid); } -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return m->mpc_apicid; -} - static inline void setup_portio_remap(void) { } diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index 04cba9f1e37..0137b6e142c 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h @@ -127,16 +127,6 @@ static inline int cpu_to_logical_apicid(int cpu) #endif } -static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) -{ - printk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return (m->mpc_apicid); -} - static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h index a236e702152..6eff343e123 100644 --- a/include/asm-x86/mach-generic/mach_apic.h +++ b/include/asm-x86/mach-generic/mach_apic.h @@ -19,7 +19,6 @@ #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) -#define mpc_apic_id (genapic->mpc_apic_id) #define setup_portio_remap (genapic->setup_portio_remap) #define check_apicid_present (genapic->check_apicid_present) #define check_phys_apicid_present (genapic->check_phys_apicid_present) diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 91d7641cddc..1f76c2e7023 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h @@ -125,17 +125,6 @@ static inline physid_mask_t apicid_to_cpu_present(int apicid) return physid_mask_of_physid(0); } -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return m->mpc_apicid; -} - static inline void setup_portio_remap(void) { } -- cgit v1.2.3 From d285e338899a4ff662a17b22d3bb0e48bb1465d4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:42 +0300 Subject: x86: remove mpc_oem_pci_bus() Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/genapic_32.h | 3 --- include/asm-x86/mach-default/mach_mpparse.h | 5 ----- include/asm-x86/mach-es7000/mach_mpparse.h | 5 ----- include/asm-x86/mach-generic/mach_mpparse.h | 1 - include/asm-x86/mach-numaq/mach_mpparse.h | 12 ++---------- include/asm-x86/mach-summit/mach_mpparse.h | 5 ----- 6 files changed, 2 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 5d024400ddd..5a1ae0164aa 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -50,8 +50,6 @@ struct genapic { /* mpparse */ void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, struct mpc_config_translation *); - void (*mpc_oem_pci_bus)(struct mpc_config_bus *, - struct mpc_config_translation *); /* When one of the next two hooks returns 1 the genapic is switched to this. Essentially they are additional probe @@ -106,7 +104,6 @@ struct genapic { APICFUNC(setup_portio_remap) \ APICFUNC(check_phys_apicid_present) \ APICFUNC(mpc_oem_bus_info) \ - APICFUNC(mpc_oem_pci_bus) \ APICFUNC(mps_oem_check) \ APICFUNC(get_apic_id) \ .apic_id_mask = APIC_ID_MASK, \ diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h index 1d383248258..679393c26dc 100644 --- a/include/asm-x86/mach-default/mach_mpparse.h +++ b/include/asm-x86/mach-default/mach_mpparse.h @@ -7,11 +7,6 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, // Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h index 52ee75cd0fe..3afa1b01081 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/mach-es7000/mach_mpparse.h @@ -9,11 +9,6 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - extern int parse_unisys_oem (char *oemptr); extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void setup_unisys(void); diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h index dbd9fce54f4..2a693715033 100644 --- a/include/asm-x86/mach-generic/mach_mpparse.h +++ b/include/asm-x86/mach-generic/mach_mpparse.h @@ -4,7 +4,6 @@ #include #define mpc_oem_bus_info (genapic->mpc_oem_bus_info) -#define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus) int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); int acpi_madt_oem_check(char *oem_id, char *oem_table_id); diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 254993ae04c..4a5d8a8b01a 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -15,16 +15,8 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); } -extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; - -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ - int quad = translation->trans_quad; - int local = translation->trans_local; - - quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; -} +extern void mpc_oem_pci_bus(struct mpc_config_bus *m, + struct mpc_config_translation *translation); /* Hook from generic ACPI tables.c */ static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h index c2520539d93..e1af489a880 100644 --- a/include/asm-x86/mach-summit/mach_mpparse.h +++ b/include/asm-x86/mach-summit/mach_mpparse.h @@ -18,11 +18,6 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { -- cgit v1.2.3 From 0ec153af4dec8944e6da558093914a3bce4c76f9 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:48 +0300 Subject: x86: remove mpc_oem_bus_info() Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/genapic_32.h | 4 ---- include/asm-x86/mach-default/mach_mpparse.h | 6 ------ include/asm-x86/mach-es7000/mach_mpparse.h | 6 ------ include/asm-x86/mach-generic/mach_mpparse.h | 4 ---- include/asm-x86/mach-numaq/mach_mpparse.h | 16 ++-------------- include/asm-x86/mach-summit/mach_mpparse.h | 6 ------ 6 files changed, 2 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 5a1ae0164aa..5a1b68ac3ca 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -48,9 +48,6 @@ struct genapic { u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); /* mpparse */ - void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, - struct mpc_config_translation *); - /* When one of the next two hooks returns 1 the genapic is switched to this. Essentially they are additional probe functions. */ @@ -103,7 +100,6 @@ struct genapic { APICFUNC(apicid_to_cpu_present) \ APICFUNC(setup_portio_remap) \ APICFUNC(check_phys_apicid_present) \ - APICFUNC(mpc_oem_bus_info) \ APICFUNC(mps_oem_check) \ APICFUNC(get_apic_id) \ .apic_id_mask = APIC_ID_MASK, \ diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h index 679393c26dc..d14108505bb 100644 --- a/include/asm-x86/mach-default/mach_mpparse.h +++ b/include/asm-x86/mach-default/mach_mpparse.h @@ -1,12 +1,6 @@ #ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ -// Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h index 3afa1b01081..ef26d352362 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/mach-es7000/mach_mpparse.h @@ -3,12 +3,6 @@ #include -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - extern int parse_unisys_oem (char *oemptr); extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void setup_unisys(void); diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h index 2a693715033..0d0b5ba2e9d 100644 --- a/include/asm-x86/mach-generic/mach_mpparse.h +++ b/include/asm-x86/mach-generic/mach_mpparse.h @@ -1,10 +1,6 @@ #ifndef _MACH_MPPARSE_H #define _MACH_MPPARSE_H 1 -#include - -#define mpc_oem_bus_info (genapic->mpc_oem_bus_info) - int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); int acpi_madt_oem_check(char *oem_id, char *oem_table_id); diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 4a5d8a8b01a..459b1240118 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -1,20 +1,8 @@ #ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H -extern int mp_bus_id_to_local[MAX_MP_BUSSES]; -extern int mp_bus_id_to_node[MAX_MP_BUSSES]; - -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - int quad = translation->trans_quad; - int local = translation->trans_local; - - mp_bus_id_to_node[m->mpc_busid] = quad; - mp_bus_id_to_local[m->mpc_busid] = local; - printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); -} - +extern void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, + struct mpc_config_translation *translation); extern void mpc_oem_pci_bus(struct mpc_config_bus *m, struct mpc_config_translation *translation); diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h index e1af489a880..fdf59170133 100644 --- a/include/asm-x86/mach-summit/mach_mpparse.h +++ b/include/asm-x86/mach-summit/mach_mpparse.h @@ -12,12 +12,6 @@ extern void setup_summit(void); #define setup_summit() {} #endif -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { -- cgit v1.2.3 From 864205062f1c752c80077be8ec2b15c81f4a6525 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:55 +0300 Subject: x86: make struct mpc_config_translation NUMAQ-only Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/genapic_32.h | 1 - include/asm-x86/mach-numaq/mach_apic.h | 10 ++++++++++ include/asm-x86/mpspec_def.h | 11 ----------- 3 files changed, 10 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 5a1b68ac3ca..b501ae7809b 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -14,7 +14,6 @@ * Copyright 2003 Andi Kleen, SuSE Labs. */ -struct mpc_config_translation; struct mpc_config_bus; struct mp_config_table; struct mpc_config_processor; diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h index 3b637fac890..75a56e5afbe 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/mach-numaq/mach_apic.h @@ -95,6 +95,16 @@ static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) return physid_mask_of_physid(cpu + 4*node); } +struct mpc_config_translation { + unsigned char mpc_type; + unsigned char trans_len; + unsigned char trans_type; + unsigned char trans_quad; + unsigned char trans_global; + unsigned char trans_local; + unsigned short trans_reserved; +}; + static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) { diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 3504617fe64..1f35691b4f7 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h @@ -166,17 +166,6 @@ struct mp_config_oemtable char mpc_oem[8]; }; -struct mpc_config_translation -{ - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; -}; - /* * Default configurations * -- cgit v1.2.3 From aa7d8e25eca5deb33eb08013bc78a80514349b40 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Thu, 20 Mar 2008 00:41:16 -0700 Subject: x86: fix build breakage when PCI is define and PARAVIRT is not - Fix the the build breakage when PARAVIRT is defined but PCI is not This fixes problem reported at: http://marc.info/?l=linux-kernel&m=120525966600698&w=2 - Make is_vsmp_box() available even when PARAVIRT is not defined. This is needed to determine if tsc's are reliable as a time source even when PARAVIRT is not defined. - split vsmp_init to use is_vsmp_box() and set_vsmp_pv_ops() set_vsmp_pv_ops will do nothing if PCI is not enabled in the config. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Ingo Molnar --- include/asm-x86/apic.h | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index b804238489a..b11d6524fb1 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -50,19 +50,16 @@ extern int disable_apic_timer; */ #ifdef CONFIG_PARAVIRT #include -extern int is_vsmp_box(void); #else #define apic_write native_apic_write #define apic_write_atomic native_apic_write_atomic #define apic_read native_apic_read #define setup_boot_clock setup_boot_APIC_clock #define setup_secondary_clock setup_secondary_APIC_clock -static int inline is_vsmp_box(void) -{ - return 0; -} #endif +extern int is_vsmp_box(void); + static inline void native_apic_write(unsigned long reg, u32 v) { *((volatile u32 *)(APIC_BASE + reg)) = v; -- cgit v1.2.3 From ede1389f8ab4f3a1343e567133fa9720a054a3aa Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 17 Mar 2008 22:29:32 +0200 Subject: x86: remove the write-only timer_uses_ioapic_pin_0 This patch removes the write-only timer_uses_ioapic_pin_0 (gsi can't be <= 15 in the line of it's fake usage in mpparse_32.c). Spotted by the GNU C compiler. Signed-off-by: Adrian Bunk Signed-off-by: Ingo Molnar --- include/asm-x86/io_apic.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 0f5b3fef0b0..095e8e30a34 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h @@ -146,7 +146,6 @@ extern int io_apic_get_version(int ioapic); extern int io_apic_get_redir_entries(int ioapic); extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low); -extern int timer_uses_ioapic_pin_0; #endif /* CONFIG_ACPI */ extern int (*ioapic_renumber_irq)(int ioapic, int irq); -- cgit v1.2.3 From f7d909d5475bb27d261389a3902860e086b0d4c9 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 14 Mar 2008 07:56:32 +0000 Subject: x86: simplify sync_test_bit() There really is no need for a redundant implementation here, just keep the alternative name for allowing consumers to use consistent naming. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- include/asm-x86/sync_bitops.h | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) (limited to 'include') diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index 6b775c90566..bc249f40e0e 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -123,26 +123,7 @@ static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) return oldbit; } -static __always_inline int sync_constant_test_bit(int nr, const volatile unsigned long *addr) -{ - return ((1UL << (nr & 31)) & - (((const volatile unsigned int *)addr)[nr >> 5])) != 0; -} - -static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (ADDR),"Ir" (nr)); - return oldbit; -} - -#define sync_test_bit(nr,addr) \ - (__builtin_constant_p(nr) ? \ - sync_constant_test_bit((nr),(addr)) : \ - sync_var_test_bit((nr),(addr))) +#define sync_test_bit test_bit #undef ADDR -- cgit v1.2.3 From a5ae1c372dc5bbaee905bcede524d7180d22b362 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 13 Mar 2008 19:44:56 +0300 Subject: x86: processor.h - use PAGE_SIZE instead of numeric value This patch replaces numeric constant with an appropriate macro Also 0x800000000000UL is changed to bit shifting which is complement to the code comment (thanks hpa for notice) Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 40227c9bf51..b0dece41dbc 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -870,7 +870,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); /* * User space process size. 47bits minus one guard page. */ -#define TASK_SIZE64 (0x800000000000UL - 4096) +#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. -- cgit v1.2.3 From 272b9cad6e7a2f61b13cfcd7dde0010e02e9376e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 20 Mar 2008 23:58:33 -0700 Subject: x86: early memtest to find bad ram do simple memtest after init_memory_mapping use find_e820_area_size to find all ram range that is not reserved. and do some simple bits test to find some bad ram. if find some bad ram, use reserve_early to exclude that range. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- include/asm-x86/e820_64.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index ef653a403e0..d38820b31c1 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -16,6 +16,9 @@ #ifndef __ASSEMBLY__ extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned long size, unsigned long align); +extern unsigned long find_e820_area_size(unsigned long start, + unsigned long *sizep, + unsigned long align); extern void add_memory_region(unsigned long start, unsigned long size, int type); extern void update_memory_range(u64 start, u64 size, unsigned old_type, -- cgit v1.2.3 From 2e5d9c857d4e6c9e7b7d8c8c86a68a7842d213d6 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:14 -0700 Subject: x86: PAT infrastructure patch Sets up pat_init() infrastructure. PAT MSR has following setting. PAT |PCD ||PWT ||| 000 WB _PAGE_CACHE_WB 001 WC _PAGE_CACHE_WC 010 UC- _PAGE_CACHE_UC_MINUS 011 UC _PAGE_CACHE_UC We are effectively changing WT from boot time setting to WC. UC_MINUS is used to provide backward compatibility to existing /dev/mem users(X). reserve_memtype and free_memtype are new interfaces for maintaining alias-free mapping. It is currently implemented in a simple way with a linked list and not optimized. reserve and free tracks the effective memory type, as a result of PAT and MTRR setting rather than what is actually requested in PAT. pat_init piggy backs on mtrr_init as the rules for setting both pat and mtrr are same. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- include/asm-x86/cpufeature.h | 1 + include/asm-x86/msr-index.h | 2 ++ include/asm-x86/mtrr.h | 2 ++ include/asm-x86/pat.h | 16 ++++++++++++++++ include/asm-x86/pgtable.h | 6 ++++++ 5 files changed, 27 insertions(+) create mode 100644 include/asm-x86/pat.h (limited to 'include') diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 90feb6f2562..0d609c837a4 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -186,6 +186,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) +#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index 3ed97144c07..af4e07f661b 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h @@ -57,6 +57,8 @@ #define MSR_MTRRfix4K_F8000 0x0000026f #define MSR_MTRRdefType 0x000002ff +#define MSR_IA32_CR_PAT 0x00000277 + #define MSR_IA32_DEBUGCTLMSR 0x000001d9 #define MSR_IA32_LASTBRANCHFROMIP 0x000001db #define MSR_IA32_LASTBRANCHTOIP 0x000001dc diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index 319d065800b..968794af93f 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -84,6 +84,8 @@ struct mtrr_gentry #ifdef __KERNEL__ +extern u8 mtrr_type_lookup(u64 addr, u64 end); + /* The following functions are for use by other drivers */ # ifdef CONFIG_MTRR extern void mtrr_save_fixed_ranges(void *); diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h new file mode 100644 index 00000000000..8b822b5a178 --- /dev/null +++ b/include/asm-x86/pat.h @@ -0,0 +1,16 @@ + +#ifndef _ASM_PAT_H +#define _ASM_PAT_H 1 + +#include + +extern int pat_wc_enabled; + +extern void pat_init(void); + +extern int reserve_memtype(u64 start, u64 end, + unsigned long req_type, unsigned long *ret_type); +extern int free_memtype(u64 start, u64 end); + +#endif + diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 9cf472aeb9c..ca6deb3de7c 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -57,6 +57,12 @@ #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) +#define _PAGE_CACHE_WB (0) +#define _PAGE_CACHE_WC (_PAGE_PWT) +#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) +#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) + #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -- cgit v1.2.3 From 3a96ce8cac808fbed5493adc5c605bced28e2ca1 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:16 -0700 Subject: x86: PAT make ioremap_change_attr non-static Make ioremap_change_attr() non-static and use prot_val in place of ioremap_mode. This interface is used in subsequent PAT patches. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- include/asm-x86/io.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 5a58b176dd6..6fa150fa68f 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h @@ -3,3 +3,6 @@ #else # include "io_64.h" #endif +extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, + unsigned long prot_val); + -- cgit v1.2.3 From 1219333dfdd488e85f08cf07881b8bc63cf92f21 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:18 -0700 Subject: x86: PAT use reserve free memtype in set_memory_uc Use reserve_memtype and free_memtype interfaces in set_memory_uc/set_memory_wb interfaces to avoid aliasing. Usage model of set_memory_uc and set_memory_wb is for RAM memory and users will first call set_memory_uc and call set_memory_wb after use to reset the attribute. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- include/asm-x86/cacheflush.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 5396c212d8c..5676fba10a0 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -34,6 +34,8 @@ int set_pages_nx(struct page *page, int numpages); int set_pages_ro(struct page *page, int numpages); int set_pages_rw(struct page *page, int numpages); +int _set_memory_uc(unsigned long addr, int numpages); +int _set_memory_wb(unsigned long addr, int numpages); int set_memory_uc(unsigned long addr, int numpages); int set_memory_wb(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); -- cgit v1.2.3 From ef354af4629e5cc76a3f64fc46d452f2b56d5a59 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:23 -0700 Subject: x86: PAT add set_memory_wc() interface Add a set_memory_wc interface(), similar to set_memory_uc interface. Callers has to call set_memory_uc, set_memory_wb and set_memory_wc, set_memory_wb as pairs. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- include/asm-x86/cacheflush.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 5676fba10a0..90437d3f761 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -35,8 +35,10 @@ int set_pages_ro(struct page *page, int numpages); int set_pages_rw(struct page *page, int numpages); int _set_memory_uc(unsigned long addr, int numpages); +int _set_memory_wc(unsigned long addr, int numpages); int _set_memory_wb(unsigned long addr, int numpages); int set_memory_uc(unsigned long addr, int numpages); +int set_memory_wc(unsigned long addr, int numpages); int set_memory_wb(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); -- cgit v1.2.3 From b310f381d220b2c6e3fab16e8c6e4ca13eea75b2 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:24 -0700 Subject: x86: PAT add ioremap_wc() interface Introduce ioremap_wc for wc remap. (generic wrapper is in a later patch) Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- include/asm-x86/io.h | 3 +++ include/asm-x86/pgtable.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 6fa150fa68f..599cad3505c 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h @@ -1,3 +1,5 @@ +#define ARCH_HAS_IOREMAP_WC + #ifdef CONFIG_X86_32 # include "io_32.h" #else @@ -5,4 +7,5 @@ #endif extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, unsigned long prot_val); +extern void __iomem * ioremap_wc(unsigned long offset, unsigned long size); diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index ca6deb3de7c..e814cfe96af 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -90,6 +90,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) @@ -107,6 +108,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) +#define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC) #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) -- cgit v1.2.3 From 52783fa8d6b847857fdd86df418e09c026d816f5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 21 Mar 2008 15:42:28 +0100 Subject: x86: PAT fix build fix for !CONFIG_MTRR. Signed-off-by: Ingo Molnar --- include/asm-x86/mtrr.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index 968794af93f..ee172296e05 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -84,10 +84,9 @@ struct mtrr_gentry #ifdef __KERNEL__ -extern u8 mtrr_type_lookup(u64 addr, u64 end); - /* The following functions are for use by other drivers */ # ifdef CONFIG_MTRR +extern u8 mtrr_type_lookup(u64 addr, u64 end); extern void mtrr_save_fixed_ranges(void *); extern void mtrr_save_state(void); extern int mtrr_add (unsigned long base, unsigned long size, @@ -101,6 +100,13 @@ extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); extern int mtrr_trim_uncached_memory(unsigned long end_pfn); # else +static inline u8 mtrr_type_lookup(u64 addr, u64 end) +{ + /* + * Return no-MTRRs: + */ + return 0xff; +} #define mtrr_save_fixed_ranges(arg) do {} while (0) #define mtrr_save_state() do {} while (0) static __inline__ int mtrr_add (unsigned long base, unsigned long size, -- cgit v1.2.3 From 35605a1027ac630f85a1b95684f7e86b82498cd6 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 24 Mar 2008 16:02:01 -0700 Subject: x86: enable PAT for amd k8 and fam10h make known_pat_cpu to think amd k8 and fam10h is ok too. also make tom2 below to be WRBACK Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- include/asm-x86/mtrr.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index ee172296e05..d3d26625aea 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -99,6 +99,7 @@ extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); extern int mtrr_trim_uncached_memory(unsigned long end_pfn); +extern int amd_special_default_mtrr(void); # else static inline u8 mtrr_type_lookup(u64 addr, u64 end) { -- cgit v1.2.3 From 709f744f18ebc3a810d29c8d5502bf20c3cecc70 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 13 Mar 2008 09:08:51 +0000 Subject: x86: bitops asm constraint fixes This (simplified) piece of code didn't behave as expected due to incorrect constraints in some of the bitops functions, when X86_FEATURE_xxx is referring to other than the first long: int test(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_xxx)) clear_cpu_cap(c, X86_FEATURE_xxx); return cpu_has(c, X86_FEATURE_xxx); } I'd really like understand, though, what the policy of (not) having a "memory" clobber in these operations is - currently, this appears to be totally inconsistent. Also, many comments of the non-atomic functions say those may also be re-ordered - this contradicts the use of "asm volatile" in there, which again I'd like to understand. As much as all of these, using 'int' for the 'nr' parameter and 'void *' for the 'addr' one is in conflict with Documentation/atomic_ops.txt, especially because bt{,c,r,s} indeed take the bit index as signed (which hence would really need special precaution) and access the full 32 bits (if 'unsigned long' was used properly here, 64 bits for x86-64) pointed at, so invalid uses like referencing a 'char' array cannot currently be caught. Finally, the code with and without this patch relies heavily on the -fno-strict-aliasing compiler switch and I'm not certain this really is a good idea. In the light of all of this I'm sending this as RFC, as fixing the above might warrant a much bigger patch... Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- include/asm-x86/bitops.h | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 1a23ce1a569..7a76555b676 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -24,9 +24,12 @@ /* Technically wrong, but this avoids compilation errors on some gcc versions. */ #define ADDR "=m" (*(volatile long *) addr) +#define BIT_ADDR "=m" (((volatile int *) addr)[nr >> 5]) #else #define ADDR "+m" (*(volatile long *) addr) +#define BIT_ADDR "+m" (((volatile int *) addr)[nr >> 5]) #endif +#define BASE_ADDR "m" (*(volatile int *) addr) /** * set_bit - Atomically set a bit in memory @@ -79,9 +82,8 @@ static inline void __set_bit(int nr, volatile void *addr) */ static inline void clear_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btr %1,%0" - : ADDR - : "Ir" (nr)); + asm volatile(LOCK_PREFIX "btr %1,%2" + : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -100,7 +102,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) static inline void __clear_bit(int nr, volatile void *addr) { - asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -135,7 +137,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) */ static inline void __change_bit(int nr, volatile void *addr) { - asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -149,8 +151,8 @@ static inline void __change_bit(int nr, volatile void *addr) */ static inline void change_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btc %1,%0" - : ADDR : "Ir" (nr)); + asm volatile(LOCK_PREFIX "btc %1,%2" + : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -198,10 +200,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) { int oldbit; - asm("bts %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); + asm volatile("bts %2,%3\n\t" + "sbb %0,%0" + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -238,10 +240,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btr %2,%1\n\t" + asm volatile("btr %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -250,10 +252,10 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btc %2,%1\n\t" + asm volatile("btc %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -288,10 +290,11 @@ static inline int variable_test_bit(int nr, volatile const void *addr) { int oldbit; - asm volatile("bt %2,%1\n\t" + asm volatile("bt %2,%3\n\t" "sbb %0,%0" : "=r" (oldbit) - : "m" (*(unsigned long *)addr), "Ir" (nr)); + : "m" (((volatile const int *)addr)[nr >> 5]), + "Ir" (nr), BASE_ADDR); return oldbit; } @@ -310,6 +313,8 @@ static int test_bit(int nr, const volatile unsigned long *addr); constant_test_bit((nr),(addr)) : \ variable_test_bit((nr),(addr))) +#undef BASE_ADDR +#undef BIT_ADDR #undef ADDR #ifdef CONFIG_X86_32 -- cgit v1.2.3 From 8434e73d9ec2aeaa86389e362b960ffba5edd9c9 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 21 Mar 2008 08:31:57 -0500 Subject: x86: increase max physical memory size of 64-bit Increase the maximum physical address size of x86_64 system to 44-bits. This is in preparation for future chips that support larger physical memory sizes. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/sparsemem.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index fa58cd55411..fa684f353aa 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h @@ -26,8 +26,8 @@ # endif #else /* CONFIG_X86_32 */ # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ -# define MAX_PHYSADDR_BITS 40 -# define MAX_PHYSMEM_BITS 40 +# define MAX_PHYSADDR_BITS 44 +# define MAX_PHYSMEM_BITS 44 #endif #endif /* CONFIG_SPARSEMEM */ -- cgit v1.2.3 From 9b967106da0357ef8b08847dce35584a04134f20 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 21 Mar 2008 15:14:07 -0700 Subject: x86: fix smpboot integration > yhlu@mpk:~/xx/xx/kernel/x86/linux-2.6> git-bisect bad > d1c707188ad646c8094cac9afb1738e7d0196ff2 is first bad commit > commit d1c707188ad646c8094cac9afb1738e7d0196ff2 > Author: Glauber de Oliveira Costa > Date: Wed Mar 19 14:25:53 2008 -0300 > > x86: include mach_apic.h in smpboot_64.c and smpboot.c > > After the inclusion, a lot of files needs fixing for conflicts, > some of them in the headers themselves, to accomodate for both > i386 and x86_64 versions. > > [ mingo@elte.hu: build fix ] > > Signed-off-by: Glauber Costa > Signed-off-by: Ingo Molnar > > :040000 040000 19f574e64bb8003bbe984f3a8c1315db969dfdcd > 6ffe96588c77bc936705599fa110107856201115 M arch > :040000 040000 61269347ad4f384ed85cc87c4f2d004ed94492ac > 8f5c713da25579a3cdf63db3d4c2f795261d0521 M include > yhlu@mpk:~/xx/xx/kernel/x86/linux-2.6> > attached patch fixes that. --- include/asm-x86/mach-default/mach_apicdef.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h index 7b78275e6d3..e4b29ba37de 100644 --- a/include/asm-x86/mach-default/mach_apicdef.h +++ b/include/asm-x86/mach-default/mach_apicdef.h @@ -5,13 +5,12 @@ #ifdef CONFIG_X86_64 #define APIC_ID_MASK (0xFFu<<24) +#define GET_APIC_ID(x) (((x)>>24)&0xFFu) #define SET_APIC_ID(x) (((x)<<24)) #else #define APIC_ID_MASK (0xF<<24) -#endif - static inline unsigned get_apic_id(unsigned long x) -{ +{ unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); if (APIC_XAPIC(ver)) return (((x)>>24)&0xFF); @@ -20,5 +19,6 @@ static inline unsigned get_apic_id(unsigned long x) } #define GET_APIC_ID(x) get_apic_id(x) +#endif #endif -- cgit v1.2.3 From d3463c5a66147bdd21b5865ea29fdca50ea28f7f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 4 Apr 2008 13:31:15 +0200 Subject: undo "x86: fix breakage of vSMP irq operations" revert: "x86: fix breakage of vSMP irq operations" the irqflags.h unification will solve this in a cleaner way. Signed-off-by: Ingo Molnar --- include/asm-x86/irqflags.h | 29 ----------------------------- 1 file changed, 29 deletions(-) (limited to 'include') diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 0e2292483b3..92021c1ffa3 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h @@ -70,26 +70,6 @@ static inline void raw_local_irq_restore(unsigned long flags) native_restore_fl(flags); } -#ifdef CONFIG_X86_VSMP - -/* - * Interrupt control for the VSMP architecture: - */ - -static inline void raw_local_irq_disable(void) -{ - unsigned long flags = __raw_local_save_flags(); - raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); -} - -static inline void raw_local_irq_enable(void) -{ - unsigned long flags = __raw_local_save_flags(); - raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); -} - -#else - static inline void raw_local_irq_disable(void) { native_irq_disable(); @@ -100,8 +80,6 @@ static inline void raw_local_irq_enable(void) native_irq_enable(); } -#endif - /* * Used in the idle loop; sti takes one instruction cycle * to complete: @@ -159,17 +137,10 @@ static inline unsigned long __raw_local_irq_save(void) #define raw_local_irq_save(flags) \ do { (flags) = __raw_local_irq_save(); } while (0) -#ifdef CONFIG_X86_VSMP -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); -} -#else static inline int raw_irqs_disabled_flags(unsigned long flags) { return !(flags & X86_EFLAGS_IF); } -#endif static inline int raw_irqs_disabled(void) { -- cgit v1.2.3 From 8dbeeb24e4d64a8a4bb49d54a6e4d987b9d3b6fc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:36 -0700 Subject: include/asm-x86/acpi.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/acpi.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 7a72d6aa50b..14411c9de46 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h @@ -67,16 +67,16 @@ int __acpi_release_global_lock(unsigned int *lock); */ #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ asm("divl %2;" \ - :"=a"(q32), "=d"(r32) \ - :"r"(d32), \ + : "=a"(q32), "=d"(r32) \ + : "r"(d32), \ "0"(n_lo), "1"(n_hi)) #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ asm("shrl $1,%2 ;" \ "rcrl $1,%3;" \ - :"=r"(n_hi), "=r"(n_lo) \ - :"0"(n_hi), "1"(n_lo)) + : "=r"(n_hi), "=r"(n_lo) \ + : "0"(n_hi), "1"(n_lo)) #ifdef CONFIG_ACPI extern int acpi_lapic; -- cgit v1.2.3 From 2ac1ea7ccd81f0383c6525e495a31d18fcac92db Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:37 -0700 Subject: include/asm-x86/alternative.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/alternative.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index d26416b5722..1f6a9ca1012 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h @@ -66,8 +66,8 @@ extern void alternatives_smp_module_del(struct module *mod); extern void alternatives_smp_switch(int smp); #else static inline void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end) {} + void *locks, void *locks_end, + void *text, void *text_end) {} static inline void alternatives_smp_module_del(struct module *mod) {} static inline void alternatives_smp_switch(int smp) {} #endif /* CONFIG_SMP */ @@ -148,9 +148,8 @@ struct paravirt_patch_site; void apply_paravirt(struct paravirt_patch_site *start, struct paravirt_patch_site *end); #else -static inline void -apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end) +static inline void apply_paravirt(struct paravirt_patch_site *start, + struct paravirt_patch_site *end) {} #define __parainstructions NULL #define __parainstructions_end NULL -- cgit v1.2.3 From b03aa8c6eb2d743ef4658b494dc1780e7740efc0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:38 -0700 Subject: include/asm-x86/a.out-core.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/a.out-core.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h index d2b6e11d3e9..714207a1c38 100644 --- a/include/asm-x86/a.out-core.h +++ b/include/asm-x86/a.out-core.h @@ -29,8 +29,9 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->magic = CMAGIC; dump->start_code = 0; dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); - dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; - dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; + dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; + dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1))) + >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; dump->u_debugreg[0] = current->thread.debugreg0; @@ -43,7 +44,8 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->u_debugreg[7] = current->thread.debugreg7; if (dump->start_stack < TASK_SIZE) - dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; + dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) + >> PAGE_SHIFT; dump->regs.bx = regs->bx; dump->regs.cx = regs->cx; @@ -55,7 +57,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->regs.ds = (u16)regs->ds; dump->regs.es = (u16)regs->es; dump->regs.fs = (u16)regs->fs; - savesegment(gs,gs); + savesegment(gs, gs); dump->regs.orig_ax = regs->orig_ax; dump->regs.ip = regs->ip; dump->regs.cs = (u16)regs->cs; @@ -63,7 +65,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->regs.sp = regs->sp; dump->regs.ss = (u16)regs->ss; - dump->u_fpvalid = dump_fpu (regs, &dump->i387); + dump->u_fpvalid = dump_fpu(regs, &dump->i387); } #endif /* CONFIG_X86_32 */ -- cgit v1.2.3 From 79a4a961ef92744dd8876963690a8beb56758457 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:39 -0700 Subject: include/asm-x86/apicdef.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/apicdef.h | 50 +++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 674a2280e21..8b244683431 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -14,10 +14,10 @@ #define APIC_LVR 0x30 #define APIC_LVR_MASK 0xFF00FF -#define GET_APIC_VERSION(x) ((x)&0xFFu) -#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) +#define GET_APIC_VERSION(x) ((x) & 0xFFu) +#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) #ifdef CONFIG_X86_32 -# define APIC_INTEGRATED(x) ((x)&0xF0u) +# define APIC_INTEGRATED(x) ((x) & 0xF0u) #else # define APIC_INTEGRATED(x) (1) #endif @@ -31,16 +31,16 @@ #define APIC_EIO_ACK 0x0 #define APIC_RRR 0xC0 #define APIC_LDR 0xD0 -#define APIC_LDR_MASK (0xFFu<<24) -#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) -#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) +#define APIC_LDR_MASK (0xFFu << 24) +#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) +#define SET_APIC_LOGICAL_ID(x) (((x) << 24)) #define APIC_ALL_CPUS 0xFFu #define APIC_DFR 0xE0 #define APIC_DFR_CLUSTER 0x0FFFFFFFul #define APIC_DFR_FLAT 0xFFFFFFFFul #define APIC_SPIV 0xF0 -#define APIC_SPIV_FOCUS_DISABLED (1<<9) -#define APIC_SPIV_APIC_ENABLED (1<<8) +#define APIC_SPIV_FOCUS_DISABLED (1 << 9) +#define APIC_SPIV_APIC_ENABLED (1 << 8) #define APIC_ISR 0x100 #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ #define APIC_TMR 0x180 @@ -76,27 +76,27 @@ #define APIC_DM_EXTINT 0x00700 #define APIC_VECTOR_MASK 0x000FF #define APIC_ICR2 0x310 -#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) -#define SET_APIC_DEST_FIELD(x) ((x)<<24) +#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) +#define SET_APIC_DEST_FIELD(x) ((x) << 24) #define APIC_LVTT 0x320 #define APIC_LVTTHMR 0x330 #define APIC_LVTPC 0x340 #define APIC_LVT0 0x350 -#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) -#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) -#define SET_APIC_TIMER_BASE(x) (((x)<<18)) +#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) +#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) +#define SET_APIC_TIMER_BASE(x) (((x) << 18)) #define APIC_TIMER_BASE_CLKIN 0x0 #define APIC_TIMER_BASE_TMBASE 0x1 #define APIC_TIMER_BASE_DIV 0x2 -#define APIC_LVT_TIMER_PERIODIC (1<<17) -#define APIC_LVT_MASKED (1<<16) -#define APIC_LVT_LEVEL_TRIGGER (1<<15) -#define APIC_LVT_REMOTE_IRR (1<<14) -#define APIC_INPUT_POLARITY (1<<13) -#define APIC_SEND_PENDING (1<<12) +#define APIC_LVT_TIMER_PERIODIC (1 << 17) +#define APIC_LVT_MASKED (1 << 16) +#define APIC_LVT_LEVEL_TRIGGER (1 << 15) +#define APIC_LVT_REMOTE_IRR (1 << 14) +#define APIC_INPUT_POLARITY (1 << 13) +#define APIC_SEND_PENDING (1 << 12) #define APIC_MODE_MASK 0x700 -#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) -#define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8)) +#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) +#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) #define APIC_MODE_FIXED 0x0 #define APIC_MODE_NMI 0x4 #define APIC_MODE_EXTINT 0x7 @@ -105,7 +105,7 @@ #define APIC_TMICT 0x380 #define APIC_TMCCT 0x390 #define APIC_TDCR 0x3E0 -#define APIC_TDR_DIV_TMBASE (1<<2) +#define APIC_TDR_DIV_TMBASE (1 << 2) #define APIC_TDR_DIV_1 0xB #define APIC_TDR_DIV_2 0x0 #define APIC_TDR_DIV_4 0x1 @@ -115,14 +115,14 @@ #define APIC_TDR_DIV_64 0x9 #define APIC_TDR_DIV_128 0xA #define APIC_EILVT0 0x500 -#define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */ +#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ #define APIC_EILVT_NR_AMD_10H 4 -#define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF) +#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) #define APIC_EILVT_MSG_FIX 0x0 #define APIC_EILVT_MSG_SMI 0x2 #define APIC_EILVT_MSG_NMI 0x4 #define APIC_EILVT_MSG_EXT 0x7 -#define APIC_EILVT_MASKED (1<<16) +#define APIC_EILVT_MASKED (1 << 16) #define APIC_EILVT1 0x510 #define APIC_EILVT2 0x520 #define APIC_EILVT3 0x530 -- cgit v1.2.3 From 3c311febfa8cc240e2922931d7403a6bb7f3fa1b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:40 -0700 Subject: include/asm-x86/apic.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/apic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index b11d6524fb1..be9639a9a18 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -67,7 +67,7 @@ static inline void native_apic_write(unsigned long reg, u32 v) static inline void native_apic_write_atomic(unsigned long reg, u32 v) { - (void) xchg((u32*)(APIC_BASE + reg), v); + (void)xchg((u32 *)(APIC_BASE + reg), v); } static inline u32 native_apic_read(unsigned long reg) -- cgit v1.2.3 From 78ff12eec42a4141d22dac4fdab04994384f6385 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:41 -0700 Subject: include/asm-x86/atomic_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/atomic_32.h | 143 +++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 75 deletions(-) (limited to 'include') diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h index 437aac80171..21a4825148c 100644 --- a/include/asm-x86/atomic_32.h +++ b/include/asm-x86/atomic_32.h @@ -15,138 +15,133 @@ * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ -typedef struct { int counter; } atomic_t; +typedef struct { + int counter; +} atomic_t; #define ATOMIC_INIT(i) { (i) } /** * atomic_read - read atomic variable * @v: pointer of type atomic_t - * + * * Atomically reads the value of @v. - */ + */ #define atomic_read(v) ((v)->counter) /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value - * + * * Atomically sets the value of @v to @i. - */ -#define atomic_set(v,i) (((v)->counter) = (i)) + */ +#define atomic_set(v, i) (((v)->counter) = (i)) /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t - * + * * Atomically adds @i to @v. */ -static __inline__ void atomic_add(int i, atomic_t *v) +static inline void atomic_add(int i, atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "addl %1,%0" - :"+m" (v->counter) - :"ir" (i)); + asm volatile(LOCK_PREFIX "addl %1,%0" + : "+m" (v->counter) + : "ir" (i)); } /** * atomic_sub - subtract integer from atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v. */ -static __inline__ void atomic_sub(int i, atomic_t *v) +static inline void atomic_sub(int i, atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "subl %1,%0" - :"+m" (v->counter) - :"ir" (i)); + asm volatile(LOCK_PREFIX "subl %1,%0" + : "+m" (v->counter) + : "ir" (i)); } /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ -static __inline__ int atomic_sub_and_test(int i, atomic_t *v) +static inline int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "subl %2,%0; sete %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } /** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) + */ +static inline void atomic_inc(atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "incl %0" - :"+m" (v->counter)); + asm volatile(LOCK_PREFIX "incl %0" + : "+m" (v->counter)); } /** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) + */ +static inline void atomic_dec(atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "decl %0" - :"+m" (v->counter)); + asm volatile(LOCK_PREFIX "decl %0" + : "+m" (v->counter)); } /** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) + */ +static inline int atomic_dec_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "decl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); + asm volatile(LOCK_PREFIX "decl %0; sete %1" + : "+m" (v->counter), "=qm" (c) + : : "memory"); return c != 0; } /** - * atomic_inc_and_test - increment and test + * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) + */ +static inline int atomic_inc_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "incl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); + asm volatile(LOCK_PREFIX "incl %0; sete %1" + : "+m" (v->counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -154,19 +149,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) * atomic_add_negative - add and test if negative * @v: pointer of type atomic_t * @i: integer value to add - * + * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int i, atomic_t *v) + */ +static inline int atomic_add_negative(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "addl %2,%0; sets %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -177,20 +171,19 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static __inline__ int atomic_add_return(int i, atomic_t *v) +static inline int atomic_add_return(int i, atomic_t *v) { int __i; #ifdef CONFIG_M386 unsigned long flags; - if(unlikely(boot_cpu_data.x86 <= 3)) + if (unlikely(boot_cpu_data.x86 <= 3)) goto no_xadd; #endif /* Modern 486+ processor */ __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1" - :"+r" (i), "+m" (v->counter) - : : "memory"); + asm volatile(LOCK_PREFIX "xaddl %0, %1" + : "+r" (i), "+m" (v->counter) + : : "memory"); return i + __i; #ifdef CONFIG_M386 @@ -210,9 +203,9 @@ no_xadd: /* Legacy 386 processor */ * * Atomically subtracts @i from @v and returns @v - @i */ -static __inline__ int atomic_sub_return(int i, atomic_t *v) +static inline int atomic_sub_return(int i, atomic_t *v) { - return atomic_add_return(-i,v); + return atomic_add_return(-i, v); } #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) @@ -227,7 +220,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -244,17 +237,17 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) /* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ -: : "r" (~(mask)),"m" (*addr) : "memory") +#define atomic_clear_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "andl %0,%1" \ + : : "r" (~(mask)), "m" (*(addr)) : "memory") -#define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ -: : "r" (mask),"m" (*(addr)) : "memory") +#define atomic_set_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "orl %0,%1" \ + : : "r" (mask), "m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() -- cgit v1.2.3 From 7edb3cd6cbadb686864b8f180232e4dc69d959e8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:42 -0700 Subject: include/asm-x86/atomic_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/atomic_64.h | 251 +++++++++++++++++++++----------------------- 1 file changed, 119 insertions(+), 132 deletions(-) (limited to 'include') diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index 2d20a7a19f6..3e0cd7d3833 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h @@ -22,140 +22,135 @@ * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ -typedef struct { int counter; } atomic_t; +typedef struct { + int counter; +} atomic_t; #define ATOMIC_INIT(i) { (i) } /** * atomic_read - read atomic variable * @v: pointer of type atomic_t - * + * * Atomically reads the value of @v. - */ + */ #define atomic_read(v) ((v)->counter) /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value - * + * * Atomically sets the value of @v to @i. - */ -#define atomic_set(v,i) (((v)->counter) = (i)) + */ +#define atomic_set(v, i) (((v)->counter) = (i)) /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t - * + * * Atomically adds @i to @v. */ -static __inline__ void atomic_add(int i, atomic_t *v) +static inline void atomic_add(int i, atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "addl %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + asm volatile(LOCK_PREFIX "addl %1,%0" + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); } /** * atomic_sub - subtract the atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v. */ -static __inline__ void atomic_sub(int i, atomic_t *v) +static inline void atomic_sub(int i, atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "subl %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + asm volatile(LOCK_PREFIX "subl %1,%0" + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); } /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ -static __inline__ int atomic_sub_and_test(int i, atomic_t *v) +static inline int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "subl %2,%0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "ir" (i), "m" (v->counter) : "memory"); return c; } /** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) + */ +static inline void atomic_inc(atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "incl %0" - :"=m" (v->counter) - :"m" (v->counter)); + asm volatile(LOCK_PREFIX "incl %0" + : "=m" (v->counter) + : "m" (v->counter)); } /** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) + */ +static inline void atomic_dec(atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "decl %0" - :"=m" (v->counter) - :"m" (v->counter)); + asm volatile(LOCK_PREFIX "decl %0" + : "=m" (v->counter) + : "m" (v->counter)); } /** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) + */ +static inline int atomic_dec_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "decl %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "decl %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); return c != 0; } /** - * atomic_inc_and_test - increment and test + * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) + */ +static inline int atomic_inc_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "incl %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "incl %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); return c != 0; } @@ -163,19 +158,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) * atomic_add_negative - add and test if negative * @i: integer value to add * @v: pointer of type atomic_t - * + * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int i, atomic_t *v) + */ +static inline int atomic_add_negative(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "addl %2,%0; sets %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + : "=m" (v->counter), "=qm" (c) + : "ir" (i), "m" (v->counter) : "memory"); return c; } @@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static __inline__ int atomic_add_return(int i, atomic_t *v) +static inline int atomic_add_return(int i, atomic_t *v) { int __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1" - :"+r" (i), "+m" (v->counter) - : : "memory"); + asm volatile(LOCK_PREFIX "xaddl %0, %1" + : "+r" (i), "+m" (v->counter) + : : "memory"); return i + __i; } -static __inline__ int atomic_sub_return(int i, atomic_t *v) +static inline int atomic_sub_return(int i, atomic_t *v) { - return atomic_add_return(-i,v); + return atomic_add_return(-i, v); } -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) /* An 64bit atomic type */ -typedef struct { long counter; } atomic64_t; +typedef struct { + long counter; +} atomic64_t; #define ATOMIC64_INIT(i) { (i) } @@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t; * * Atomically sets the value of @v to @i. */ -#define atomic64_set(v,i) (((v)->counter) = (i)) +#define atomic64_set(v, i) (((v)->counter) = (i)) /** * atomic64_add - add integer to atomic64 variable @@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t; * * Atomically adds @i to @v. */ -static __inline__ void atomic64_add(long i, atomic64_t *v) +static inline void atomic64_add(long i, atomic64_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "addq %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + asm volatile(LOCK_PREFIX "addq %1,%0" + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); } /** @@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v) * * Atomically subtracts @i from @v. */ -static __inline__ void atomic64_sub(long i, atomic64_t *v) +static inline void atomic64_sub(long i, atomic64_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "subq %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + asm volatile(LOCK_PREFIX "subq %1,%0" + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); } /** @@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v) * true if the result is zero, or false for all * other cases. */ -static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) +static inline int atomic64_sub_and_test(long i, atomic64_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "subq %2,%0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "ir" (i), "m" (v->counter) : "memory"); return c; } @@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) * * Atomically increments @v by 1. */ -static __inline__ void atomic64_inc(atomic64_t *v) +static inline void atomic64_inc(atomic64_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "incq %0" - :"=m" (v->counter) - :"m" (v->counter)); + asm volatile(LOCK_PREFIX "incq %0" + : "=m" (v->counter) + : "m" (v->counter)); } /** @@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v) * * Atomically decrements @v by 1. */ -static __inline__ void atomic64_dec(atomic64_t *v) +static inline void atomic64_dec(atomic64_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "decq %0" - :"=m" (v->counter) - :"m" (v->counter)); + asm volatile(LOCK_PREFIX "decq %0" + : "=m" (v->counter) + : "m" (v->counter)); } /** @@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ -static __inline__ int atomic64_dec_and_test(atomic64_t *v) +static inline int atomic64_dec_and_test(atomic64_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "decq %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "decq %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); return c != 0; } @@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static __inline__ int atomic64_inc_and_test(atomic64_t *v) +static inline int atomic64_inc_and_test(atomic64_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "incq %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "incq %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); return c != 0; } @@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static __inline__ int atomic64_add_negative(long i, atomic64_t *v) +static inline int atomic64_add_negative(long i, atomic64_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "addq %2,%0; sets %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" + : "=m" (v->counter), "=qm" (c) + : "ir" (i), "m" (v->counter) : "memory"); return c; } @@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static __inline__ long atomic64_add_return(long i, atomic64_t *v) +static inline long atomic64_add_return(long i, atomic64_t *v) { long __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddq %0, %1;" - :"+r" (i), "+m" (v->counter) - : : "memory"); + asm volatile(LOCK_PREFIX "xaddq %0, %1;" + : "+r" (i), "+m" (v->counter) + : : "memory"); return i + __i; } -static __inline__ long atomic64_sub_return(long i, atomic64_t *v) +static inline long atomic64_sub_return(long i, atomic64_t *v) { - return atomic64_add_return(-i,v); + return atomic64_add_return(-i, v); } -#define atomic64_inc_return(v) (atomic64_add_return(1,v)) -#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) +#define atomic64_inc_return(v) (atomic64_add_return(1, (v))) +#define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) -#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) +#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) /** * atomic_add_unless - add unless the number is a given value @@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +static inline int atomic64_add_unless(atomic64_t *v, long a, long u) { long c, old; c = atomic64_read(v); @@ -448,13 +434,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) /* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ -: : "r" (~(mask)),"m" (*addr) : "memory") - -#define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ -: : "r" ((unsigned)mask),"m" (*(addr)) : "memory") +#define atomic_clear_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "andl %0,%1" \ + : : "r" (~(mask)), "m" (*(addr)) : "memory") + +#define atomic_set_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "orl %0,%1" \ + : : "r" ((unsigned)(mask)), "m" (*(addr)) \ + : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() -- cgit v1.2.3 From fd591acd0f0b2382e8c0a839be9974e7c019bdb6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:43 -0700 Subject: include/asm-x86/bitops_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bitops_32.h | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index e4d75fcf9c0..2513a81f82a 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h @@ -20,20 +20,22 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) if (!size) return 0; - /* This looks at memory. Mark it volatile to tell gcc not to move it around */ - __asm__ __volatile__( - "movl $-1,%%eax\n\t" - "xorl %%edx,%%edx\n\t" - "repe; scasl\n\t" - "je 1f\n\t" - "xorl -4(%%edi),%%eax\n\t" - "subl $4,%%edi\n\t" - "bsfl %%eax,%%edx\n" - "1:\tsubl %%ebx,%%edi\n\t" - "shll $3,%%edi\n\t" - "addl %%edi,%%edx" - :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) - :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); + /* This looks at memory. + * Mark it volatile to tell gcc not to move it around + */ + asm volatile("movl $-1,%%eax\n\t" + "xorl %%edx,%%edx\n\t" + "repe; scasl\n\t" + "je 1f\n\t" + "xorl -4(%%edi),%%eax\n\t" + "subl $4,%%edi\n\t" + "bsfl %%eax,%%edx\n" + "1:\tsubl %%ebx,%%edi\n\t" + "shll $3,%%edi\n\t" + "addl %%edi,%%edx" + : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) + : "1" ((size + 31) >> 5), "2" (addr), + "b" (addr) : "memory"); return res; } @@ -75,7 +77,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) unsigned long val = *addr++; if (val) return __ffs(val) + x; - x += (sizeof(*addr)<<3); + x += sizeof(*addr) << 3; } return x; } @@ -152,10 +154,10 @@ static inline int fls(int x) #include -#define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit((nr), (unsigned long *)addr) -#define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit((nr), (unsigned long *)addr) +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit((nr), (unsigned long *)(addr)) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit((nr), (unsigned long *)(addr)) #include -- cgit v1.2.3 From 49f74946f008add0b22723244976a32b365de06f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:44 -0700 Subject: include/asm-x86/bitops_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bitops_64.h | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index aaf15194d53..365f8207ea5 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -17,35 +17,35 @@ static inline long __scanbit(unsigned long val, unsigned long max) return val; } -#define find_first_bit(addr,size) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - (__scanbit(*(unsigned long *)addr,(size))) : \ - find_first_bit(addr,size))) - #define find_next_bit(addr,size,off) \ ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ find_next_bit(addr,size,off))) -#define find_first_zero_bit(addr,size) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - (__scanbit(~*(unsigned long *)addr,(size))) : \ - find_first_zero_bit(addr,size))) - #define find_next_zero_bit(addr,size,off) \ ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ find_next_zero_bit(addr,size,off))) -static inline void set_bit_string(unsigned long *bitmap, unsigned long i, - int len) -{ - unsigned long end = i + len; +#define find_first_bit(addr, size) \ + ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ + ? (__scanbit(*(unsigned long *)(addr), (size))) \ + : find_first_bit((addr), (size)))) + +#define find_first_zero_bit(addr, size) \ + ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ + ? (__scanbit(~*(unsigned long *)(addr), (size))) \ + : find_first_zero_bit((addr), (size)))) + +static inline void set_bit_string(unsigned long *bitmap, unsigned long i, + int len) +{ + unsigned long end = i + len; while (i < end) { - __set_bit(i, bitmap); + __set_bit(i, bitmap); i++; } -} +} /** * ffz - find first zero in word. @@ -150,10 +150,10 @@ static inline int fls(int x) #include -#define ext2_set_bit_atomic(lock,nr,addr) \ - test_and_set_bit((nr),(unsigned long*)addr) -#define ext2_clear_bit_atomic(lock,nr,addr) \ - test_and_clear_bit((nr),(unsigned long*)addr) +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit((nr), (unsigned long *)(addr)) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit((nr), (unsigned long *)(addr)) #include -- cgit v1.2.3 From 286275c90f148562b973b1e1f39f9689e6676dc4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:45 -0700 Subject: include/asm-x86/bitops.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bitops.h | 37 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 7a76555b676..1ae7b270a1e 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -23,13 +23,13 @@ #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) /* Technically wrong, but this avoids compilation errors on some gcc versions. */ -#define ADDR "=m" (*(volatile long *) addr) -#define BIT_ADDR "=m" (((volatile int *) addr)[nr >> 5]) +#define ADDR "=m" (*(volatile long *)addr) +#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5]) #else #define ADDR "+m" (*(volatile long *) addr) -#define BIT_ADDR "+m" (((volatile int *) addr)[nr >> 5]) +#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5]) #endif -#define BASE_ADDR "m" (*(volatile int *) addr) +#define BASE_ADDR "m" (*(volatile int *)addr) /** * set_bit - Atomically set a bit in memory @@ -48,9 +48,7 @@ */ static inline void set_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "bts %1,%0" - : ADDR - : "Ir" (nr) : "memory"); + asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); } /** @@ -82,8 +80,7 @@ static inline void __set_bit(int nr, volatile void *addr) */ static inline void clear_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btr %1,%2" - : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -151,8 +148,7 @@ static inline void __change_bit(int nr, volatile void *addr) */ static inline void change_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btc %1,%2" - : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -168,9 +164,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) int oldbit; asm volatile(LOCK_PREFIX "bts %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); return oldbit; } @@ -202,8 +196,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) asm volatile("bts %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR - : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -221,8 +214,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) asm volatile(LOCK_PREFIX "btr %2,%1\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); return oldbit; } @@ -242,8 +234,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) asm volatile("btr %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR - : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -254,8 +245,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) asm volatile("btc %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR - : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -274,8 +264,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) asm volatile(LOCK_PREFIX "btc %2,%1\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); return oldbit; } -- cgit v1.2.3 From 86d8a08616ecbc510323bfca591816a5709c6e54 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:46 -0700 Subject: include/asm-x86/bug.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bug.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h index 8d477a20139..b69aa64b82a 100644 --- a/include/asm-x86/bug.h +++ b/include/asm-x86/bug.h @@ -12,25 +12,25 @@ # define __BUG_C0 "2:\t.quad 1b, %c0\n" #endif -#define BUG() \ - do { \ - asm volatile("1:\tud2\n" \ - ".pushsection __bug_table,\"a\"\n" \ - __BUG_C0 \ - "\t.word %c1, 0\n" \ - "\t.org 2b+%c2\n" \ - ".popsection" \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (sizeof(struct bug_entry))); \ - for(;;) ; \ - } while(0) +#define BUG() \ +do { \ + asm volatile("1:\tud2\n" \ + ".pushsection __bug_table,\"a\"\n" \ + __BUG_C0 \ + "\t.word %c1, 0\n" \ + "\t.org 2b+%c2\n" \ + ".popsection" \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (sizeof(struct bug_entry))); \ + for (;;) ; \ +} while (0) #else -#define BUG() \ - do { \ - asm volatile("ud2"); \ - for(;;) ; \ - } while(0) +#define BUG() \ +do { \ + asm volatile("ud2"); \ + for (;;) ; \ +} while (0) #endif #endif /* !CONFIG_BUG */ -- cgit v1.2.3 From 346050952cac11b25a98c7e1743412b416827314 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:47 -0700 Subject: include/asm-x86/byteorder.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/byteorder.h | 39 ++++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h index fe2f2e5d51b..e02ae2d89ac 100644 --- a/include/asm-x86/byteorder.h +++ b/include/asm-x86/byteorder.h @@ -8,50 +8,59 @@ #ifdef __i386__ -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) { #ifdef CONFIG_X86_BSWAP - __asm__("bswap %0" : "=r" (x) : "0" (x)); + asm("bswap %0" : "=r" (x) : "0" (x)); #else - __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ - "rorl $16,%0\n\t" /* swap words */ - "xchgb %b0,%h0" /* swap higher bytes */ - :"=q" (x) - : "0" (x)); + asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ + "rorl $16,%0\n\t" /* swap words */ + "xchgb %b0,%h0" /* swap higher bytes */ + : "=q" (x) + : "0" (x)); #endif return x; } -static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) +static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) { union { - struct { __u32 a,b; } s; + struct { + __u32 a; + __u32 b; + } s; __u64 u; } v; v.u = val; #ifdef CONFIG_X86_BSWAP - __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1" + asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); #else v.s.a = ___arch__swab32(v.s.a); v.s.b = ___arch__swab32(v.s.b); - __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); + asm("xchgl %0,%1" + : "=r" (v.s.a), "=r" (v.s.b) + : "0" (v.s.a), "1" (v.s.b)); #endif return v.u; } #else /* __i386__ */ -static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) +static inline __attribute_const__ __u64 ___arch__swab64(__u64 x) { - __asm__("bswapq %0" : "=r" (x) : "0" (x)); + asm("bswapq %0" + : "=r" (x) + : "0" (x)); return x; } -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) { - __asm__("bswapl %0" : "=r" (x) : "0" (x)); + asm("bswapl %0" + : "=r" (x) + : "0" (x)); return x; } -- cgit v1.2.3 From 3f61b19a9f60a0469a19113be8bb3b3623006de2 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:48 -0700 Subject: include/asm-x86/cacheflush.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/cacheflush.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 90437d3f761..7ab5b520b7b 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -14,18 +14,18 @@ #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma,pg) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define flush_icache_page(vma, pg) do { } while (0) +#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + memcpy((dst), (src), (len)) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy((dst), (src), (len)) int __deprecated_for_modules change_page_attr(struct page *page, int numpages, - pgprot_t prot); + pgprot_t prot); int set_pages_uc(struct page *page, int numpages); int set_pages_wb(struct page *page, int numpages); -- cgit v1.2.3 From 0883e91ae209f4ada4db9b383026df77351c1320 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:49 -0700 Subject: include/asm-x86/checksum_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/checksum_32.h | 152 +++++++++++++++++++++--------------------- 1 file changed, 75 insertions(+), 77 deletions(-) (limited to 'include') diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h index 75194abbe8e..52bbb0d8c4c 100644 --- a/include/asm-x86/checksum_32.h +++ b/include/asm-x86/checksum_32.h @@ -28,7 +28,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); */ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); /* * Note: when you get a NULL pointer exception here this means someone @@ -37,20 +38,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, * If you use these functions directly please don't forget the * access_ok(). */ -static __inline__ -__wsum csum_partial_copy_nocheck (const void *src, void *dst, - int len, __wsum sum) +static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum) { - return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); } -static __inline__ -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) +static inline __wsum csum_partial_copy_from_user(const void __user *src, + void *dst, + int len, __wsum sum, + int *err_ptr) { might_sleep(); return csum_partial_copy_generic((__force void *)src, dst, - len, sum, err_ptr, NULL); + len, sum, err_ptr, NULL); } /* @@ -64,30 +65,29 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum; - __asm__ __volatile__( - "movl (%1), %0 ;\n" - "subl $4, %2 ;\n" - "jbe 2f ;\n" - "addl 4(%1), %0 ;\n" - "adcl 8(%1), %0 ;\n" - "adcl 12(%1), %0 ;\n" -"1: adcl 16(%1), %0 ;\n" - "lea 4(%1), %1 ;\n" - "decl %2 ;\n" - "jne 1b ;\n" - "adcl $0, %0 ;\n" - "movl %0, %2 ;\n" - "shrl $16, %0 ;\n" - "addw %w2, %w0 ;\n" - "adcl $0, %0 ;\n" - "notl %0 ;\n" -"2: ;\n" + asm volatile("movl (%1), %0 ;\n" + "subl $4, %2 ;\n" + "jbe 2f ;\n" + "addl 4(%1), %0 ;\n" + "adcl 8(%1), %0 ;\n" + "adcl 12(%1), %0;\n" + "1: adcl 16(%1), %0 ;\n" + "lea 4(%1), %1 ;\n" + "decl %2 ;\n" + "jne 1b ;\n" + "adcl $0, %0 ;\n" + "movl %0, %2 ;\n" + "shrl $16, %0 ;\n" + "addw %w2, %w0 ;\n" + "adcl $0, %0 ;\n" + "notl %0 ;\n" + "2: ;\n" /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ - : "=r" (sum), "=r" (iph), "=r" (ihl) - : "1" (iph), "2" (ihl) - : "memory"); + : "=r" (sum), "=r" (iph), "=r" (ihl) + : "1" (iph), "2" (ihl) + : "memory"); return (__force __sum16)sum; } @@ -97,29 +97,27 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) static inline __sum16 csum_fold(__wsum sum) { - __asm__( - "addl %1, %0 ;\n" - "adcl $0xffff, %0 ;\n" - : "=r" (sum) - : "r" ((__force u32)sum << 16), - "0" ((__force u32)sum & 0xffff0000) - ); + asm("addl %1, %0 ;\n" + "adcl $0xffff, %0 ;\n" + : "=r" (sum) + : "r" ((__force u32)sum << 16), + "0" ((__force u32)sum & 0xffff0000)); return (__force __sum16)(~(__force u32)sum >> 16); } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + unsigned short len, + unsigned short proto, + __wsum sum) { - __asm__( - "addl %1, %0 ;\n" - "adcl %2, %0 ;\n" - "adcl %3, %0 ;\n" - "adcl $0, %0 ;\n" - : "=r" (sum) - : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum)); - return sum; + asm("addl %1, %0 ;\n" + "adcl %2, %0 ;\n" + "adcl %3, %0 ;\n" + "adcl $0, %0 ;\n" + : "=r" (sum) + : "g" (daddr), "g"(saddr), + "g" ((len + proto) << 8), "0" (sum)); + return sum; } /* @@ -127,11 +125,11 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + unsigned short len, + unsigned short proto, + __wsum sum) { - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } /* @@ -141,30 +139,29 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, static inline __sum16 ip_compute_csum(const void *buff, int len) { - return csum_fold (csum_partial(buff, len, 0)); + return csum_fold(csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM -static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) { - __asm__( - "addl 0(%1), %0 ;\n" - "adcl 4(%1), %0 ;\n" - "adcl 8(%1), %0 ;\n" - "adcl 12(%1), %0 ;\n" - "adcl 0(%2), %0 ;\n" - "adcl 4(%2), %0 ;\n" - "adcl 8(%2), %0 ;\n" - "adcl 12(%2), %0 ;\n" - "adcl %3, %0 ;\n" - "adcl %4, %0 ;\n" - "adcl $0, %0 ;\n" - : "=&r" (sum) - : "r" (saddr), "r" (daddr), - "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); + asm("addl 0(%1), %0 ;\n" + "adcl 4(%1), %0 ;\n" + "adcl 8(%1), %0 ;\n" + "adcl 12(%1), %0 ;\n" + "adcl 0(%2), %0 ;\n" + "adcl 4(%2), %0 ;\n" + "adcl 8(%2), %0 ;\n" + "adcl 12(%2), %0 ;\n" + "adcl %3, %0 ;\n" + "adcl %4, %0 ;\n" + "adcl $0, %0 ;\n" + : "=&r" (sum) + : "r" (saddr), "r" (daddr), + "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); return csum_fold(sum); } @@ -173,14 +170,15 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user(const void *src, - void __user *dst, - int len, __wsum sum, - int *err_ptr) +static inline __wsum csum_and_copy_to_user(const void *src, + void __user *dst, + int len, __wsum sum, + int *err_ptr) { might_sleep(); if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr); + return csum_partial_copy_generic(src, (__force void *)dst, + len, sum, NULL, err_ptr); if (len) *err_ptr = -EFAULT; -- cgit v1.2.3 From 3d3c6e10036dcbbe9fe7d69911f5638faecfbaeb Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:50 -0700 Subject: include/asm-x86/checksum_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/checksum_64.h | 118 ++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h index e5f79997dec..8bd861cc526 100644 --- a/include/asm-x86/checksum_64.h +++ b/include/asm-x86/checksum_64.h @@ -1,33 +1,31 @@ #ifndef _X86_64_CHECKSUM_H #define _X86_64_CHECKSUM_H -/* - * Checksums for x86-64 - * Copyright 2002 by Andi Kleen, SuSE Labs +/* + * Checksums for x86-64 + * Copyright 2002 by Andi Kleen, SuSE Labs * with some code from asm-x86/checksum.h - */ + */ #include #include #include -/** +/** * csum_fold - Fold and invert a 32bit checksum. * sum: 32bit unfolded sum - * + * * Fold a 32bit running checksum to 16bit and invert it. This is usually * the last step before putting a checksum into a packet. * Make sure not to mix with 64bit checksums. */ static inline __sum16 csum_fold(__wsum sum) { - __asm__( - " addl %1,%0\n" - " adcl $0xffff,%0" - : "=r" (sum) - : "r" ((__force u32)sum << 16), - "0" ((__force u32)sum & 0xffff0000) - ); + asm(" addl %1,%0\n" + " adcl $0xffff,%0" + : "=r" (sum) + : "r" ((__force u32)sum << 16), + "0" ((__force u32)sum & 0xffff0000)); return (__force __sum16)(~(__force u32)sum >> 16); } @@ -43,46 +41,46 @@ static inline __sum16 csum_fold(__wsum sum) * ip_fast_csum - Compute the IPv4 header checksum efficiently. * iph: ipv4 header * ihl: length of header / 4 - */ + */ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum; - asm( " movl (%1), %0\n" - " subl $4, %2\n" - " jbe 2f\n" - " addl 4(%1), %0\n" - " adcl 8(%1), %0\n" - " adcl 12(%1), %0\n" - "1: adcl 16(%1), %0\n" - " lea 4(%1), %1\n" - " decl %2\n" - " jne 1b\n" - " adcl $0, %0\n" - " movl %0, %2\n" - " shrl $16, %0\n" - " addw %w2, %w0\n" - " adcl $0, %0\n" - " notl %0\n" - "2:" + asm(" movl (%1), %0\n" + " subl $4, %2\n" + " jbe 2f\n" + " addl 4(%1), %0\n" + " adcl 8(%1), %0\n" + " adcl 12(%1), %0\n" + "1: adcl 16(%1), %0\n" + " lea 4(%1), %1\n" + " decl %2\n" + " jne 1b\n" + " adcl $0, %0\n" + " movl %0, %2\n" + " shrl $16, %0\n" + " addw %w2, %w0\n" + " adcl $0, %0\n" + " notl %0\n" + "2:" /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ - : "=r" (sum), "=r" (iph), "=r" (ihl) - : "1" (iph), "2" (ihl) - : "memory"); + : "=r" (sum), "=r" (iph), "=r" (ihl) + : "1" (iph), "2" (ihl) + : "memory"); return (__force __sum16)sum; } -/** +/** * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. * @saddr: source address * @daddr: destination address * @len: length of packet * @proto: ip protocol of packet - * @sum: initial sum to be added in (32bit unfolded) - * - * Returns the pseudo header checksum the input data. Result is + * @sum: initial sum to be added in (32bit unfolded) + * + * Returns the pseudo header checksum the input data. Result is * 32bit unfolded. */ static inline __wsum @@ -93,32 +91,32 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, " adcl %2, %0\n" " adcl %3, %0\n" " adcl $0, %0\n" - : "=r" (sum) + : "=r" (sum) : "g" (daddr), "g" (saddr), "g" ((len + proto)<<8), "0" (sum)); - return sum; + return sum; } -/** +/** * csum_tcpup_magic - Compute an IPv4 pseudo header checksum. * @saddr: source address * @daddr: destination address * @len: length of packet * @proto: ip protocol of packet - * @sum: initial sum to be added in (32bit unfolded) - * + * @sum: initial sum to be added in (32bit unfolded) + * * Returns the 16bit pseudo header checksum the input data already * complemented and ready to be filled in. */ -static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, unsigned short proto, __wsum sum) +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, __wsum sum) { - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } -/** +/** * csum_partial - Compute an internet checksum. * @buff: buffer to be checksummed * @len: length of buffer. @@ -127,7 +125,7 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, * Returns the 32bit unfolded internet checksum of the buffer. * Before filling it in it needs to be csum_fold()'ed. * buff should be aligned to a 64bit boundary if possible. - */ + */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 @@ -136,23 +134,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); /* Do not call this directly. Use the wrappers below */ extern __wsum csum_partial_copy_generic(const void *src, const void *dst, - int len, - __wsum sum, - int *src_err_ptr, int *dst_err_ptr); + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum isum, int *errp); + int len, __wsum isum, int *errp); extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst, - int len, __wsum isum, int *errp); -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, - __wsum sum); + int len, __wsum isum, int *errp); +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); /* Old names. To be removed. */ #define csum_and_copy_to_user csum_partial_copy_to_user #define csum_and_copy_from_user csum_partial_copy_from_user -/** +/** * ip_compute_csum - Compute an 16bit IP checksum. * @buff: buffer address. * @len: length of buffer. @@ -170,7 +167,7 @@ extern __sum16 ip_compute_csum(const void *buff, int len); * @proto: protocol of packet * @sum: initial sum (32bit unfolded) to be added in * - * Computes an IPv6 pseudo header checksum. This sum is added the checksum + * Computes an IPv6 pseudo header checksum. This sum is added the checksum * into UDP/TCP packets and contains some link layer information. * Returns the unfolded 32bit checksum. */ @@ -185,11 +182,10 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, static inline unsigned add32_with_carry(unsigned a, unsigned b) { asm("addl %2,%0\n\t" - "adcl $0,%0" - : "=r" (a) + "adcl $0,%0" + : "=r" (a) : "0" (a), "r" (b)); return a; } #endif - -- cgit v1.2.3 From 8121019cad7bfe61f8f626a85427aca66dfe0f1e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:51 -0700 Subject: include/asm-x86/cmpxchg_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/cmpxchg_32.h | 253 ++++++++++++++++++++++--------------------- 1 file changed, 132 insertions(+), 121 deletions(-) (limited to 'include') diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index 959fad00dff..bf5a69d1329 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h @@ -8,9 +8,12 @@ * you need to test for the feature in boot_cpu_data. */ -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) +#define xchg(ptr, v) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) -struct __xchg_dummy { unsigned long a[100]; }; +struct __xchg_dummy { + unsigned long a[100]; +}; #define __xg(x) ((struct __xchg_dummy *)(x)) /* @@ -27,72 +30,74 @@ struct __xchg_dummy { unsigned long a[100]; }; * of the instruction set reference 24319102.pdf. We need * the reader side to see the coherent 64bit value. */ -static inline void __set_64bit (unsigned long long * ptr, - unsigned int low, unsigned int high) +static inline void __set_64bit(unsigned long long *ptr, + unsigned int low, unsigned int high) { - __asm__ __volatile__ ( - "\n1:\t" - "movl (%0), %%eax\n\t" - "movl 4(%0), %%edx\n\t" - LOCK_PREFIX "cmpxchg8b (%0)\n\t" - "jnz 1b" - : /* no outputs */ - : "D"(ptr), - "b"(low), - "c"(high) - : "ax","dx","memory"); + asm volatile("\n1:\t" + "movl (%0), %%eax\n\t" + "movl 4(%0), %%edx\n\t" + LOCK_PREFIX "cmpxchg8b (%0)\n\t" + "jnz 1b" + : /* no outputs */ + : "D"(ptr), + "b"(low), + "c"(high) + : "ax", "dx", "memory"); } -static inline void __set_64bit_constant (unsigned long long *ptr, - unsigned long long value) +static inline void __set_64bit_constant(unsigned long long *ptr, + unsigned long long value) { - __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); + __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); } -#define ll_low(x) *(((unsigned int*)&(x))+0) -#define ll_high(x) *(((unsigned int*)&(x))+1) -static inline void __set_64bit_var (unsigned long long *ptr, - unsigned long long value) +#define ll_low(x) *(((unsigned int *)&(x)) + 0) +#define ll_high(x) *(((unsigned int *)&(x)) + 1) + +static inline void __set_64bit_var(unsigned long long *ptr, + unsigned long long value) { - __set_64bit(ptr,ll_low(value), ll_high(value)); + __set_64bit(ptr, ll_low(value), ll_high(value)); } -#define set_64bit(ptr,value) \ -(__builtin_constant_p(value) ? \ - __set_64bit_constant(ptr, value) : \ - __set_64bit_var(ptr, value) ) +#define set_64bit(ptr, value) \ + (__builtin_constant_p((value)) \ + ? __set_64bit_constant((ptr), (value)) \ + : __set_64bit_var((ptr), (value))) -#define _set_64bit(ptr,value) \ -(__builtin_constant_p(value) ? \ - __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ - __set_64bit(ptr, ll_low(value), ll_high(value)) ) +#define _set_64bit(ptr, value) \ + (__builtin_constant_p(value) \ + ? __set_64bit(ptr, (unsigned int)(value), \ + (unsigned int)((value) >> 32)) \ + : __set_64bit(ptr, ll_low((value)), ll_high((value)))) /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) { switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; + case 1: + asm volatile("xchgb %b0,%1" + : "=q" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 2: + asm volatile("xchgw %w0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 4: + asm volatile("xchgl %0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; } return x; } @@ -105,24 +110,27 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#define sync_cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) +#define sync_cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) #endif #ifdef CONFIG_X86_CMPXCHG64 -#define cmpxchg64(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ - (unsigned long long)(n))) -#define cmpxchg64_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\ - (unsigned long long)(n))) +#define cmpxchg64(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ + (unsigned long long)(n))) +#define cmpxchg64_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ + (unsigned long long)(n))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, @@ -131,22 +139,22 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long prev; switch (size) { case 1: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; @@ -158,85 +166,88 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, * isn't. */ static inline unsigned long __sync_cmpxchg(volatile void *ptr, - unsigned long old, - unsigned long new, int size) + unsigned long old, + unsigned long new, int size) { unsigned long prev; switch (size) { case 1: - __asm__ __volatile__("lock; cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("lock; cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__("lock; cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("lock; cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__("lock; cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("lock; cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; } static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, unsigned long new, int size) + unsigned long old, + unsigned long new, int size) { unsigned long prev; switch (size) { case 1: - __asm__ __volatile__("cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__("cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__("cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; } static inline unsigned long long __cmpxchg64(volatile void *ptr, - unsigned long long old, unsigned long long new) + unsigned long long old, + unsigned long long new) { unsigned long long prev; - __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchg8b %3" + : "=A"(prev) + : "b"((unsigned long)new), + "c"((unsigned long)(new >> 32)), + "m"(*__xg(ptr)), + "0"(old) + : "memory"); return prev; } static inline unsigned long long __cmpxchg64_local(volatile void *ptr, - unsigned long long old, unsigned long long new) + unsigned long long old, + unsigned long long new) { unsigned long long prev; - __asm__ __volatile__("cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) - : "memory"); + asm volatile("cmpxchg8b %3" + : "=A"(prev) + : "b"((unsigned long)new), + "c"((unsigned long)(new >> 32)), + "m"(*__xg(ptr)), + "0"(old) + : "memory"); return prev; } @@ -252,7 +263,7 @@ extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, - unsigned long new, int size) + unsigned long new, int size) { switch (size) { case 1: -- cgit v1.2.3 From e52da357a15db9e12b96b4e40dffe6b9e54bb976 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:52 -0700 Subject: include/asm-x86/cmpxchg_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/cmpxchg_64.h | 134 ++++++++++++++++++++++--------------------- 1 file changed, 69 insertions(+), 65 deletions(-) (limited to 'include') diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h index 56f5b41e071..d9b26b9a28c 100644 --- a/include/asm-x86/cmpxchg_64.h +++ b/include/asm-x86/cmpxchg_64.h @@ -3,7 +3,8 @@ #include /* Provides LOCK_PREFIX */ -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) +#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ + (ptr), sizeof(*(ptr)))) #define __xg(x) ((volatile long *)(x)) @@ -19,33 +20,34 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) { switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %k0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 8: - __asm__ __volatile__("xchgq %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; + case 1: + asm volatile("xchgb %b0,%1" + : "=q" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 2: + asm volatile("xchgw %w0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 4: + asm volatile("xchgl %k0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 8: + asm volatile("xchgq %0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; } return x; } @@ -64,61 +66,62 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long prev; switch (size) { case 1: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 8: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; } static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, unsigned long new, int size) + unsigned long old, + unsigned long new, int size) { unsigned long prev; switch (size) { case 1: - __asm__ __volatile__("cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__("cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__("cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 8: - __asm__ __volatile__("cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; @@ -126,19 +129,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #define cmpxchg(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) + (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg64(ptr, o, n) \ - ({ \ +({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg((ptr), (o), (n)); \ - }) +}) #define cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) + (unsigned long)(n), \ + sizeof(*(ptr)))) #define cmpxchg64_local(ptr, o, n) \ - ({ \ +({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ - }) +}) #endif -- cgit v1.2.3 From c96a6d41aa033c03b43e5a09448bddcc971797b1 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:53 -0700 Subject: include/asm-x86/compat.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h index d3e8f3e87ee..1793ac317a3 100644 --- a/include/asm-x86/compat.h +++ b/include/asm-x86/compat.h @@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static __inline__ void __user *compat_alloc_user_space(long len) +static inline void __user *compat_alloc_user_space(long len) { struct pt_regs *regs = task_pt_regs(current); return (void __user *)regs->sp - len; -- cgit v1.2.3 From 83ecfdd0a0a065e55172889dc660c5db3c14633a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:55 -0700 Subject: include/asm-x86/current_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/current_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h index d3524853991..5af9bdb97a1 100644 --- a/include/asm-x86/current_32.h +++ b/include/asm-x86/current_32.h @@ -11,7 +11,7 @@ static __always_inline struct task_struct *get_current(void) { return x86_read_percpu(current_task); } - + #define current get_current() #endif /* !(_I386_CURRENT_H) */ -- cgit v1.2.3 From 2cade899df7d9cc9e977a24898b0bd5a6f4a8dfd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:56 -0700 Subject: include/asm-x86/current_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/current_64.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h index bc8adecee66..2d368ede2fc 100644 --- a/include/asm-x86/current_64.h +++ b/include/asm-x86/current_64.h @@ -1,23 +1,23 @@ #ifndef _X86_64_CURRENT_H #define _X86_64_CURRENT_H -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLY__) struct task_struct; #include -static inline struct task_struct *get_current(void) -{ - struct task_struct *t = read_pda(pcurrent); +static inline struct task_struct *get_current(void) +{ + struct task_struct *t = read_pda(pcurrent); return t; -} +} #define current get_current() #else #ifndef ASM_OFFSET_H -#include +#include #endif #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg -- cgit v1.2.3 From b77619001e3acd228209bfdd864734cfdddb3d92 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:57 -0700 Subject: include/asm-x86/desc_defs.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/desc_defs.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h index e33f078b3e5..eccb4ea1f91 100644 --- a/include/asm-x86/desc_defs.h +++ b/include/asm-x86/desc_defs.h @@ -18,17 +18,19 @@ * incrementally. We keep the signature as a struct, rather than an union, * so we can get rid of it transparently in the future -- glommer */ -// 8 byte segment descriptor +/* 8 byte segment descriptor */ struct desc_struct { union { - struct { unsigned int a, b; }; + struct { + unsigned int a; + unsigned int b; + }; struct { u16 limit0; u16 base0; unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; }; - }; } __attribute__((packed)); @@ -39,7 +41,7 @@ enum { GATE_TASK = 0x5, }; -// 16byte gate +/* 16byte gate */ struct gate_struct64 { u16 offset_low; u16 segment; @@ -56,10 +58,10 @@ struct gate_struct64 { enum { DESC_TSS = 0x9, DESC_LDT = 0x2, - DESCTYPE_S = 0x10, /* !system */ + DESCTYPE_S = 0x10, /* !system */ }; -// LDT or TSS descriptor in the GDT. 16 bytes. +/* LDT or TSS descriptor in the GDT. 16 bytes. */ struct ldttss_desc64 { u16 limit0; u16 base0; @@ -84,7 +86,6 @@ struct desc_ptr { unsigned long address; } __attribute__((packed)) ; - #endif /* !__ASSEMBLY__ */ #endif -- cgit v1.2.3 From c1773a167defdec919b1dbfc9100054c44fd0ff5 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:58 -0700 Subject: include/asm-x86/desc.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/desc.h | 61 +++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index 5b6a05d3a77..268a012bcd7 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h @@ -62,8 +62,8 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) } static inline void pack_gate(gate_desc *gate, unsigned char type, - unsigned long base, unsigned dpl, unsigned flags, unsigned short seg) - + unsigned long base, unsigned dpl, unsigned flags, + unsigned short seg) { gate->a = (seg << 16) | (base & 0xffff); gate->b = (base & 0xffff0000) | @@ -84,22 +84,23 @@ static inline int desc_empty(const void *ptr) #define load_TR_desc() native_load_tr_desc() #define load_gdt(dtr) native_load_gdt(dtr) #define load_idt(dtr) native_load_idt(dtr) -#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) -#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) +#define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) +#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) #define store_gdt(dtr) native_store_gdt(dtr) #define store_idt(dtr) native_store_idt(dtr) #define store_tr(tr) (tr = native_store_tr()) -#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) +#define store_ldt(ldt) asm("sldt %0":"=m" (ldt)) #define load_TLS(t, cpu) native_load_tls(t, cpu) #define set_ldt native_set_ldt -#define write_ldt_entry(dt, entry, desc) \ - native_write_ldt_entry(dt, entry, desc) -#define write_gdt_entry(dt, entry, desc, type) \ - native_write_gdt_entry(dt, entry, desc, type) -#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) +#define write_ldt_entry(dt, entry, desc) \ + native_write_ldt_entry(dt, entry, desc) +#define write_gdt_entry(dt, entry, desc, type) \ + native_write_gdt_entry(dt, entry, desc, type) +#define write_idt_entry(dt, entry, g) \ + native_write_idt_entry(dt, entry, g) #endif static inline void native_write_idt_entry(gate_desc *idt, int entry, @@ -138,8 +139,8 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, { desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | - (limit & 0x000f0000) | ((type & 0xff) << 8) | - ((flags & 0xf) << 20); + (limit & 0x000f0000) | ((type & 0xff) << 8) | + ((flags & 0xf) << 20); desc->p = 1; } @@ -159,7 +160,6 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; desc->base3 = PTR_HIGH(addr); #else - pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); #endif } @@ -177,7 +177,8 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) * last valid byte */ set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, - IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); + IO_BITMAP_OFFSET + IO_BITMAP_BYTES + + sizeof(unsigned long) - 1); write_gdt_entry(d, entry, &tss, DESC_TSS); } @@ -186,7 +187,7 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) static inline void native_set_ldt(const void *addr, unsigned int entries) { if (likely(entries == 0)) - __asm__ __volatile__("lldt %w0"::"q" (0)); + asm volatile("lldt %w0"::"q" (0)); else { unsigned cpu = smp_processor_id(); ldt_desc ldt; @@ -195,7 +196,7 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) DESC_LDT, entries * sizeof(ldt) - 1); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &ldt, DESC_LDT); - __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); + asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); } } @@ -240,15 +241,15 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; } -#define _LDT_empty(info) (\ - (info)->base_addr == 0 && \ - (info)->limit == 0 && \ - (info)->contents == 0 && \ - (info)->read_exec_only == 1 && \ - (info)->seg_32bit == 0 && \ - (info)->limit_in_pages == 0 && \ - (info)->seg_not_present == 1 && \ - (info)->useable == 0) +#define _LDT_empty(info) \ + ((info)->base_addr == 0 && \ + (info)->limit == 0 && \ + (info)->contents == 0 && \ + (info)->read_exec_only == 1 && \ + (info)->seg_32bit == 0 && \ + (info)->limit_in_pages == 0 && \ + (info)->seg_not_present == 1 && \ + (info)->useable == 0) #ifdef CONFIG_X86_64 #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) @@ -287,7 +288,7 @@ static inline unsigned long get_desc_limit(const struct desc_struct *desc) } static inline void _set_gate(int gate, unsigned type, void *addr, - unsigned dpl, unsigned ist, unsigned seg) + unsigned dpl, unsigned ist, unsigned seg) { gate_desc s; pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); @@ -370,10 +371,10 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist) * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. */ #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ - movb idx*8+4(gdt), lo_b; \ - movb idx*8+7(gdt), hi_b; \ - shll $16, base; \ - movw idx*8+2(gdt), lo_w; + movb idx * 8 + 4(gdt), lo_b; \ + movb idx * 8 + 7(gdt), hi_b; \ + shll $16, base; \ + movw idx * 8 + 2(gdt), lo_w; #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 925a09b27fc73593eb705e50e6db86c4a22091db Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:59 -0700 Subject: include/asm-x86/div64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/div64.h | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h index e98d16e7a37..0dbf8bf3ef0 100644 --- a/include/asm-x86/div64.h +++ b/include/asm-x86/div64.h @@ -17,18 +17,20 @@ * This ends up being the most efficient "calling * convention" on x86. */ -#define do_div(n,base) ({ \ - unsigned long __upper, __low, __high, __mod, __base; \ - __base = (base); \ - asm("":"=a" (__low), "=d" (__high):"A" (n)); \ - __upper = __high; \ - if (__high) { \ - __upper = __high % (__base); \ - __high = __high / (__base); \ - } \ - asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ - asm("":"=A" (n):"a" (__low),"d" (__high)); \ - __mod; \ +#define do_div(n, base) \ +({ \ + unsigned long __upper, __low, __high, __mod, __base; \ + __base = (base); \ + asm("":"=a" (__low), "=d" (__high) : "A" (n)); \ + __upper = __high; \ + if (__high) { \ + __upper = __high % (__base); \ + __high = __high / (__base); \ + } \ + asm("divl %2":"=a" (__low), "=d" (__mod) \ + : "rm" (__base), "0" (__low), "1" (__upper)); \ + asm("":"=A" (n) : "a" (__low), "d" (__high)); \ + __mod; \ }) /* @@ -37,14 +39,13 @@ * * Warning, this will do an exception if X overflows. */ -#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) +#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c) -static inline long -div_ll_X_l_rem(long long divs, long div, long *rem) +static inline long div_ll_X_l_rem(long long divs, long div, long *rem) { long dum2; - __asm__("divl %2":"=a"(dum2), "=d"(*rem) - : "rm"(div), "A"(divs)); + asm("divl %2":"=a"(dum2), "=d"(*rem) + : "rm"(div), "A"(divs)); return dum2; -- cgit v1.2.3 From 113cbebac7c88ce78a48801e159108ce6c9d1fb3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:00 -0700 Subject: include/asm-x86/dma.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/dma.h | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h index e9733ce8988..ca1098a7e58 100644 --- a/include/asm-x86/dma.h +++ b/include/asm-x86/dma.h @@ -12,7 +12,6 @@ #include /* need byte IO */ #include - #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER #define dma_outb outb_p #else @@ -74,15 +73,15 @@ #ifdef CONFIG_X86_32 /* The maximum address that we can perform a DMA transfer to on this platform */ -#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) +#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000) #else /* 16MB ISA DMA zone */ -#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) +#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) /* 4GB broken PCI/AGP hardware bus master zone */ -#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) +#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) /* Compat define for old dma zone */ #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) @@ -154,20 +153,20 @@ extern spinlock_t dma_spin_lock; -static __inline__ unsigned long claim_dma_lock(void) +static inline unsigned long claim_dma_lock(void) { unsigned long flags; spin_lock_irqsave(&dma_spin_lock, flags); return flags; } -static __inline__ void release_dma_lock(unsigned long flags) +static inline void release_dma_lock(unsigned long flags) { spin_unlock_irqrestore(&dma_spin_lock, flags); } /* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) +static inline void enable_dma(unsigned int dmanr) { if (dmanr <= 3) dma_outb(dmanr, DMA1_MASK_REG); @@ -175,7 +174,7 @@ static __inline__ void enable_dma(unsigned int dmanr) dma_outb(dmanr & 3, DMA2_MASK_REG); } -static __inline__ void disable_dma(unsigned int dmanr) +static inline void disable_dma(unsigned int dmanr) { if (dmanr <= 3) dma_outb(dmanr | 4, DMA1_MASK_REG); @@ -190,7 +189,7 @@ static __inline__ void disable_dma(unsigned int dmanr) * --- In order to do that, the DMA routines below should --- * --- only be used while holding the DMA lock ! --- */ -static __inline__ void clear_dma_ff(unsigned int dmanr) +static inline void clear_dma_ff(unsigned int dmanr) { if (dmanr <= 3) dma_outb(0, DMA1_CLEAR_FF_REG); @@ -199,7 +198,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr) } /* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +static inline void set_dma_mode(unsigned int dmanr, char mode) { if (dmanr <= 3) dma_outb(mode | dmanr, DMA1_MODE_REG); @@ -212,7 +211,7 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) * the lower 16 bits of the DMA current address register, but a 64k boundary * may have been crossed. */ -static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) +static inline void set_dma_page(unsigned int dmanr, char pagenr) { switch (dmanr) { case 0: @@ -243,15 +242,15 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) /* Set transfer address & page bits for specific DMA channel. * Assumes dma flipflop is clear. */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) +static inline void set_dma_addr(unsigned int dmanr, unsigned int a) { set_dma_page(dmanr, a>>16); if (dmanr <= 3) { dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); } else { - dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); - dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); } } @@ -264,18 +263,18 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) * Assumes dma flip-flop is clear. * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +static inline void set_dma_count(unsigned int dmanr, unsigned int count) { count--; if (dmanr <= 3) { - dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); - dma_outb((count >> 8) & 0xff, - ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb((count >> 8) & 0xff, + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); } else { - dma_outb((count >> 1) & 0xff, - ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); - dma_outb((count >> 9) & 0xff, - ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 1) & 0xff, + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 9) & 0xff, + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); } } @@ -288,7 +287,7 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) * * Assumes DMA flip-flop is clear. */ -static __inline__ int get_dma_residue(unsigned int dmanr) +static inline int get_dma_residue(unsigned int dmanr) { unsigned int io_port; /* using short to get 16-bit wrap around */ -- cgit v1.2.3 From c884534f7e74e017f96f81efc31f26ef2b15572e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:03 -0700 Subject: include/asm-x86/dwarf2_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/dwarf2_64.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h index eedc08526b0..c950519a264 100644 --- a/include/asm-x86/dwarf2_64.h +++ b/include/asm-x86/dwarf2_64.h @@ -1,16 +1,15 @@ #ifndef _DWARF2_H #define _DWARF2_H 1 - #ifndef __ASSEMBLY__ #warning "asm/dwarf2.h should be only included in pure assembly files" #endif -/* +/* Macros for dwarf2 CFI unwind table entries. - See "as.info" for details on these pseudo ops. Unfortunately - they are only supported in very new binutils, so define them - away for older version. + See "as.info" for details on these pseudo ops. Unfortunately + they are only supported in very new binutils, so define them + away for older version. */ #ifdef CONFIG_AS_CFI -- cgit v1.2.3 From 1257d6e0408ba21d72dc3db9ef69d7287c74507b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:04 -0700 Subject: include/asm-x86/e820_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/e820_32.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h index e7207a6de3e..43b1a8bd4b3 100644 --- a/include/asm-x86/e820_32.h +++ b/include/asm-x86/e820_32.h @@ -34,8 +34,8 @@ extern void e820_register_memory(void); extern void limit_regions(unsigned long long size); extern void print_memory_map(char *who); extern void init_iomem_resources(struct resource *code_resource, - struct resource *data_resource, - struct resource *bss_resource); + struct resource *data_resource, + struct resource *bss_resource); #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) extern void e820_mark_nosave_regions(void); -- cgit v1.2.3 From 86bbc83526a4543db0a4ffed8e4cf679eddd534c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:05 -0700 Subject: include/asm-x86/e820_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/e820_64.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index d38820b31c1..f478c57eb06 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -14,22 +14,24 @@ #include #ifndef __ASSEMBLY__ -extern unsigned long find_e820_area(unsigned long start, unsigned long end, +extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned long size, unsigned long align); extern unsigned long find_e820_area_size(unsigned long start, unsigned long *sizep, unsigned long align); -extern void add_memory_region(unsigned long start, unsigned long size, +extern void add_memory_region(unsigned long start, unsigned long size, int type); extern void update_memory_range(u64 start, u64 size, unsigned old_type, unsigned new_type); extern void setup_memory_region(void); -extern void contig_e820_setup(void); +extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); extern void e820_reserve_resources(void); extern void e820_mark_nosave_regions(void); -extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); -extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); +extern int e820_any_mapped(unsigned long start, unsigned long end, + unsigned type); +extern int e820_all_mapped(unsigned long start, unsigned long end, + unsigned type); extern int e820_any_non_reserved(unsigned long start, unsigned long end); extern int is_memory_any_valid(unsigned long start, unsigned long end); extern int e820_all_non_reserved(unsigned long start, unsigned long end); @@ -37,8 +39,8 @@ extern int is_memory_all_valid(unsigned long start, unsigned long end); extern unsigned long e820_hole_size(unsigned long start, unsigned long end); extern void e820_setup_gap(void); -extern void e820_register_active_regions(int nid, - unsigned long start_pfn, unsigned long end_pfn); +extern void e820_register_active_regions(int nid, unsigned long start_pfn, + unsigned long end_pfn); extern void finish_e820_parsing(void); -- cgit v1.2.3 From 451dd9835898d4cc3c0ee8f9e4883807b760eb02 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:06 -0700 Subject: include/asm-x86/edac.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/edac.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h index cf3200a745a..a8088f63a30 100644 --- a/include/asm-x86/edac.h +++ b/include/asm-x86/edac.h @@ -3,7 +3,7 @@ /* ECC atomic, DMA, SMP and interrupt safe scrub function */ -static __inline__ void atomic_scrub(void *va, u32 size) +static inline void atomic_scrub(void *va, u32 size) { u32 i, *virt_addr = va; @@ -12,7 +12,7 @@ static __inline__ void atomic_scrub(void *va, u32 size) * are interrupt, DMA and SMP safe. */ for (i = 0; i < size / 4; i++, virt_addr++) - __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); + asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); } #endif -- cgit v1.2.3 From 0c6f6abf6e63227b1e6e22e412c3f63c0163a520 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:07 -0700 Subject: include/asm-x86/efi.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/efi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h index ea9734b74ac..d53004b855c 100644 --- a/include/asm-x86/efi.h +++ b/include/asm-x86/efi.h @@ -20,7 +20,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); */ #define efi_call_virt(f, args...) \ - ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) + ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) #define efi_call_virt0(f) efi_call_virt(f) #define efi_call_virt1(f, a1) efi_call_virt(f, a1) -- cgit v1.2.3 From 486386f6c1b5144ccc8eb2f28def1712e1dd6c3d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:08 -0700 Subject: include/asm-x86/elf.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/elf.h | 107 ++++++++++++++++++++++++++++---------------------- 1 file changed, 60 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index 77325646e2f..8f232dc5b5f 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h @@ -11,7 +11,7 @@ typedef unsigned long elf_greg_t; -#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) +#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; @@ -100,10 +100,11 @@ extern unsigned int vdso_enabled; We might as well make sure everything else is cleared too (except for %esp), just to make things more deterministic. */ -#define ELF_PLAT_INIT(_r, load_addr) do { \ - _r->bx = 0; _r->cx = 0; _r->dx = 0; \ - _r->si = 0; _r->di = 0; _r->bp = 0; \ - _r->ax = 0; \ +#define ELF_PLAT_INIT(_r, load_addr) \ + do { \ + _r->bx = 0; _r->cx = 0; _r->dx = 0; \ + _r->si = 0; _r->di = 0; _r->bp = 0; \ + _r->ax = 0; \ } while (0) /* @@ -111,24 +112,25 @@ extern unsigned int vdso_enabled; * now struct_user_regs, they are different) */ -#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ - pr_reg[0] = regs->bx; \ - pr_reg[1] = regs->cx; \ - pr_reg[2] = regs->dx; \ - pr_reg[3] = regs->si; \ - pr_reg[4] = regs->di; \ - pr_reg[5] = regs->bp; \ - pr_reg[6] = regs->ax; \ - pr_reg[7] = regs->ds & 0xffff; \ - pr_reg[8] = regs->es & 0xffff; \ - pr_reg[9] = regs->fs & 0xffff; \ - savesegment(gs, pr_reg[10]); \ - pr_reg[11] = regs->orig_ax; \ - pr_reg[12] = regs->ip; \ - pr_reg[13] = regs->cs & 0xffff; \ - pr_reg[14] = regs->flags; \ - pr_reg[15] = regs->sp; \ - pr_reg[16] = regs->ss & 0xffff; \ +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ +do { \ + pr_reg[0] = regs->bx; \ + pr_reg[1] = regs->cx; \ + pr_reg[2] = regs->dx; \ + pr_reg[3] = regs->si; \ + pr_reg[4] = regs->di; \ + pr_reg[5] = regs->bp; \ + pr_reg[6] = regs->ax; \ + pr_reg[7] = regs->ds & 0xffff; \ + pr_reg[8] = regs->es & 0xffff; \ + pr_reg[9] = regs->fs & 0xffff; \ + savesegment(gs, pr_reg[10]); \ + pr_reg[11] = regs->orig_ax; \ + pr_reg[12] = regs->ip; \ + pr_reg[13] = regs->cs & 0xffff; \ + pr_reg[14] = regs->flags; \ + pr_reg[15] = regs->sp; \ + pr_reg[16] = regs->ss & 0xffff; \ } while (0); #define ELF_PLATFORM (utsname()->machine) @@ -139,7 +141,7 @@ extern unsigned int vdso_enabled; /* * This is used to ensure we don't load something for the wrong architecture. */ -#define elf_check_arch(x) \ +#define elf_check_arch(x) \ ((x)->e_machine == EM_X86_64) #define compat_elf_check_arch(x) elf_check_arch_ia32(x) @@ -168,24 +170,30 @@ static inline void elf_common_init(struct thread_struct *t, t->ds = t->es = ds; } -#define ELF_PLAT_INIT(_r, load_addr) do { \ - elf_common_init(¤t->thread, _r, 0); \ - clear_thread_flag(TIF_IA32); \ +#define ELF_PLAT_INIT(_r, load_addr) \ +do { \ + elf_common_init(¤t->thread, _r, 0); \ + clear_thread_flag(TIF_IA32); \ } while (0) -#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ +#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ elf_common_init(¤t->thread, regs, __USER_DS) -#define compat_start_thread(regs, ip, sp) do { \ - start_ia32_thread(regs, ip, sp); \ - set_fs(USER_DS); \ - } while (0) -#define COMPAT_SET_PERSONALITY(ex, ibcs2) do { \ - if (test_thread_flag(TIF_IA32)) \ - clear_thread_flag(TIF_ABI_PENDING); \ - else \ - set_thread_flag(TIF_ABI_PENDING); \ - current->personality |= force_personality32; \ - } while (0) + +#define compat_start_thread(regs, ip, sp) \ +do { \ + start_ia32_thread(regs, ip, sp); \ + set_fs(USER_DS); \ +} while (0) + +#define COMPAT_SET_PERSONALITY(ex, ibcs2) \ +do { \ + if (test_thread_flag(TIF_IA32)) \ + clear_thread_flag(TIF_ABI_PENDING); \ + else \ + set_thread_flag(TIF_ABI_PENDING); \ + current->personality |= force_personality32; \ +} while (0) + #define COMPAT_ELF_PLATFORM ("i686") /* @@ -194,7 +202,8 @@ static inline void elf_common_init(struct thread_struct *t, * getting dumped. */ -#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ +do { \ unsigned v; \ (pr_reg)[0] = (regs)->r15; \ (pr_reg)[1] = (regs)->r14; \ @@ -268,10 +277,12 @@ extern int force_personality32; struct task_struct; -#define ARCH_DLINFO_IA32(vdso_enabled) \ -do if (vdso_enabled) { \ +#define ARCH_DLINFO_IA32(vdso_enabled) \ +do { \ + if (vdso_enabled) { \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ + } \ } while (0) #ifdef CONFIG_X86_32 @@ -289,9 +300,11 @@ do if (vdso_enabled) { \ /* 1GB for 64bit, 8MB for 32bit */ #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) -#define ARCH_DLINFO \ -do if (vdso_enabled) { \ - NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ +#define ARCH_DLINFO \ +do { \ + if (vdso_enabled) \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (unsigned long)current->mm->context.vdso); \ } while (0) #define AT_SYSINFO 32 @@ -304,8 +317,8 @@ do if (vdso_enabled) { \ #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) -#define VDSO_ENTRY \ - ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) +#define VDSO_ENTRY \ + ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) struct linux_binprm; -- cgit v1.2.3 From cb7d0617b776b91a0643b8c5988f236414129b7d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:09 -0700 Subject: include/asm-x86/fixmap_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/fixmap_32.h | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index a7404d50686..eb1665125c4 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h @@ -99,8 +99,7 @@ enum fixed_addresses { */ #define NR_FIX_BTMAPS 64 #define FIX_BTMAPS_NESTING 4 - FIX_BTMAP_END = - __end_of_permanent_fixed_addresses + 512 - + FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 - (__end_of_permanent_fixed_addresses & 511), FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, FIX_WP_TEST, @@ -110,20 +109,20 @@ enum fixed_addresses { __end_of_fixed_addresses }; -extern void __set_fixmap (enum fixed_addresses idx, - unsigned long phys, pgprot_t flags); +extern void __set_fixmap(enum fixed_addresses idx, + unsigned long phys, pgprot_t flags); extern void reserve_top_address(unsigned long reserve); -#define set_fixmap(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL) +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) -#define clear_fixmap(idx) \ - __set_fixmap(idx, 0, __pgprot(0)) +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, __pgprot(0)) #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) @@ -156,7 +155,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); - return __fix_to_virt(idx); + return __fix_to_virt(idx); } static inline unsigned long virt_to_fix(const unsigned long vaddr) -- cgit v1.2.3 From dca13fb0fc89d4214090a0ae14aeeaa50c42a6c4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:10 -0700 Subject: include/asm-x86/fixmap_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/fixmap_64.h | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h index 70ddb21e645..f3d76858c0e 100644 --- a/include/asm-x86/fixmap_64.h +++ b/include/asm-x86/fixmap_64.h @@ -34,32 +34,34 @@ enum fixed_addresses { VSYSCALL_LAST_PAGE, - VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, + VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, VSYSCALL_HPET, FIX_DBGP_BASE, FIX_EARLYCON_MEM_BASE, FIX_HPET_BASE, FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ FIX_IO_APIC_BASE_0, - FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, + FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, FIX_EFI_IO_MAP_LAST_PAGE, - FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1, + FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE + + MAX_EFI_IO_PAGES - 1, #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT FIX_OHCI1394_BASE, #endif __end_of_fixed_addresses }; -extern void __set_fixmap (enum fixed_addresses idx, - unsigned long phys, pgprot_t flags); +extern void __set_fixmap(enum fixed_addresses idx, + unsigned long phys, pgprot_t flags); -#define set_fixmap(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL) +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -- cgit v1.2.3 From 4637bc07c85621b0c10320da8cf3b34de83efa0f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:11 -0700 Subject: include/asm-x86/floppy.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/floppy.h | 87 +++++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h index a48d7153c09..438b3033a25 100644 --- a/include/asm-x86/floppy.h +++ b/include/asm-x86/floppy.h @@ -20,20 +20,21 @@ * driver otherwise. It doesn't matter much for performance anyway, as most * floppy accesses go through the track buffer. */ -#define _CROSS_64KB(a,s,vdma) \ -(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) +#define _CROSS_64KB(a, s, vdma) \ + (!(vdma) && \ + ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) -#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) +#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1) -#define SW fd_routine[use_virtual_dma&1] +#define SW fd_routine[use_virtual_dma & 1] #define CSW fd_routine[can_use_virtual_dma & 1] #define fd_inb(port) inb_p(port) -#define fd_outb(value,port) outb_p(value,port) +#define fd_outb(value, port) outb_p(value, port) -#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") +#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy") #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) @@ -57,15 +58,15 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) #undef TRACE_FLPY_INT #ifdef TRACE_FLPY_INT - static int calls=0; - static int bytes=0; - static int dma_wait=0; + static int calls; + static int bytes; + static int dma_wait; #endif if (!doing_pdma) return floppy_interrupt(irq, dev_id); #ifdef TRACE_FLPY_INT - if(!calls) + if (!calls) bytes = virtual_dma_count; #endif @@ -74,42 +75,42 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) register char *lptr; st = 1; - for(lcount=virtual_dma_count, lptr=virtual_dma_addr; - lcount; lcount--, lptr++) { - st=inb(virtual_dma_port+4) & 0xa0 ; - if(st != 0xa0) + for (lcount = virtual_dma_count, lptr = virtual_dma_addr; + lcount; lcount--, lptr++) { + st = inb(virtual_dma_port + 4) & 0xa0; + if (st != 0xa0) break; - if(virtual_dma_mode) - outb_p(*lptr, virtual_dma_port+5); + if (virtual_dma_mode) + outb_p(*lptr, virtual_dma_port + 5); else - *lptr = inb_p(virtual_dma_port+5); + *lptr = inb_p(virtual_dma_port + 5); } virtual_dma_count = lcount; virtual_dma_addr = lptr; - st = inb(virtual_dma_port+4); + st = inb(virtual_dma_port + 4); } #ifdef TRACE_FLPY_INT calls++; #endif - if(st == 0x20) + if (st == 0x20) return IRQ_HANDLED; - if(!(st & 0x20)) { + if (!(st & 0x20)) { virtual_dma_residue += virtual_dma_count; - virtual_dma_count=0; + virtual_dma_count = 0; #ifdef TRACE_FLPY_INT printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", virtual_dma_count, virtual_dma_residue, calls, bytes, dma_wait); calls = 0; - dma_wait=0; + dma_wait = 0; #endif doing_pdma = 0; floppy_interrupt(irq, dev_id); return IRQ_HANDLED; } #ifdef TRACE_FLPY_INT - if(!virtual_dma_count) + if (!virtual_dma_count) dma_wait++; #endif return IRQ_HANDLED; @@ -117,14 +118,14 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) static void fd_disable_dma(void) { - if(! (can_use_virtual_dma & 1)) + if (!(can_use_virtual_dma & 1)) disable_dma(FLOPPY_DMA); doing_pdma = 0; virtual_dma_residue += virtual_dma_count; - virtual_dma_count=0; + virtual_dma_count = 0; } -static int vdma_request_dma(unsigned int dmanr, const char * device_id) +static int vdma_request_dma(unsigned int dmanr, const char *device_id) { return 0; } @@ -142,7 +143,7 @@ static int vdma_get_dma_residue(unsigned int dummy) static int fd_request_irq(void) { - if(can_use_virtual_dma) + if (can_use_virtual_dma) return request_irq(FLOPPY_IRQ, floppy_hardint, IRQF_DISABLED, "floppy", NULL); else @@ -152,13 +153,13 @@ static int fd_request_irq(void) static unsigned long dma_mem_alloc(unsigned long size) { - return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size)); + return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size)); } static unsigned long vdma_mem_alloc(unsigned long size) { - return (unsigned long) vmalloc(size); + return (unsigned long)vmalloc(size); } @@ -166,7 +167,7 @@ static unsigned long vdma_mem_alloc(unsigned long size) static void _fd_dma_mem_free(unsigned long addr, unsigned long size) { - if((unsigned long) addr >= (unsigned long) high_memory) + if ((unsigned long)addr >= (unsigned long)high_memory) vfree((void *)addr); else free_pages(addr, get_order(size)); @@ -176,10 +177,10 @@ static void _fd_dma_mem_free(unsigned long addr, unsigned long size) static void _fd_chose_dma_mode(char *addr, unsigned long size) { - if(can_use_virtual_dma == 2) { - if((unsigned long) addr >= (unsigned long) high_memory || - isa_virt_to_bus(addr) >= 0x1000000 || - _CROSS_64KB(addr, size, 0)) + if (can_use_virtual_dma == 2) { + if ((unsigned long)addr >= (unsigned long)high_memory || + isa_virt_to_bus(addr) >= 0x1000000 || + _CROSS_64KB(addr, size, 0)) use_virtual_dma = 1; else use_virtual_dma = 0; @@ -195,7 +196,7 @@ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) { doing_pdma = 1; virtual_dma_port = io; - virtual_dma_mode = (mode == DMA_MODE_WRITE); + virtual_dma_mode = (mode == DMA_MODE_WRITE); virtual_dma_addr = addr; virtual_dma_count = size; virtual_dma_residue = 0; @@ -213,18 +214,18 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) /* actual, physical DMA */ doing_pdma = 0; clear_dma_ff(FLOPPY_DMA); - set_dma_mode(FLOPPY_DMA,mode); - set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); - set_dma_count(FLOPPY_DMA,size); + set_dma_mode(FLOPPY_DMA, mode); + set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr)); + set_dma_count(FLOPPY_DMA, size); enable_dma(FLOPPY_DMA); return 0; } static struct fd_routine_l { - int (*_request_dma)(unsigned int dmanr, const char * device_id); + int (*_request_dma)(unsigned int dmanr, const char *device_id); void (*_free_dma)(unsigned int dmanr); int (*_get_dma_residue)(unsigned int dummy); - unsigned long (*_dma_mem_alloc) (unsigned long size); + unsigned long (*_dma_mem_alloc)(unsigned long size); int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); } fd_routine[] = { { @@ -252,7 +253,8 @@ static int FDC2 = -1; * is needed to prevent corrupted CMOS RAM in case "insmod floppy" * coincides with another rtc CMOS user. Paul G. */ -#define FLOPPY0_TYPE ({ \ +#define FLOPPY0_TYPE \ +({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ @@ -261,7 +263,8 @@ static int FDC2 = -1; val; \ }) -#define FLOPPY1_TYPE ({ \ +#define FLOPPY1_TYPE \ +({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ -- cgit v1.2.3 From 9407913fc16dde0e632b9639557422c6a792469d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:12 -0700 Subject: include/asm-x86/futex.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/futex.h | 101 ++++++++++++++++++++++++++---------------------- 1 file changed, 55 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index c9952ea9f69..ac0fbf24d72 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h @@ -12,35 +12,32 @@ #include #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile( \ -"1: " insn "\n" \ -"2: .section .fixup,\"ax\"\n \ -3: mov %3, %1\n \ - jmp 2b\n \ - .previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ - : "i" (-EFAULT), "0" (oparg), "1" (0)) + asm volatile("1:\t" insn "\n" \ + "2:\t.section .fixup,\"ax\"\n" \ + "3:\tmov\t%3, %1\n" \ + "\tjmp\t2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ + : "i" (-EFAULT), "0" (oparg), "1" (0)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile( \ -"1: movl %2, %0\n \ - movl %0, %3\n" \ - insn "\n" \ -"2: lock; cmpxchgl %3, %2\n \ - jnz 1b\n \ -3: .section .fixup,\"ax\"\n \ -4: mov %5, %1\n \ - jmp 3b\n \ - .previous\n" \ - _ASM_EXTABLE(1b,4b) \ - _ASM_EXTABLE(2b,4b) \ - : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ - "=&r" (tem) \ - : "r" (oparg), "i" (-EFAULT), "1" (0)) - -static inline int -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) + asm volatile("1:\tmovl %2, %0\n" \ + "\tmovl\t%0, %3\n" \ + "\t" insn "\n" \ + "2:\tlock; cmpxchgl %3, %2\n" \ + "\tjnz\t1b\n" \ + "3:\t.section .fixup,\"ax\"\n" \ + "4:\tmov\t%5, %1\n" \ + "\tjmp\t3b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=&a" (oldval), "=&r" (ret), \ + "+m" (*uaddr), "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0)) + +static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -87,20 +84,33 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (!ret) { switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; + case FUTEX_OP_CMP_EQ: + ret = (oldval == cmparg); + break; + case FUTEX_OP_CMP_NE: + ret = (oldval != cmparg); + break; + case FUTEX_OP_CMP_LT: + ret = (oldval < cmparg); + break; + case FUTEX_OP_CMP_GE: + ret = (oldval >= cmparg); + break; + case FUTEX_OP_CMP_LE: + ret = (oldval <= cmparg); + break; + case FUTEX_OP_CMP_GT: + ret = (oldval > cmparg); + break; + default: + ret = -ENOSYS; } } return ret; } -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, + int newval) { #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) @@ -112,16 +122,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - __asm__ __volatile__( - "1: lock; cmpxchgl %3, %1 \n" - "2: .section .fixup, \"ax\" \n" - "3: mov %2, %0 \n" - " jmp 2b \n" - " .previous \n" - _ASM_EXTABLE(1b,3b) - : "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) - : "memory" + asm volatile("1:\tlock; cmpxchgl %3, %1\n" + "2:\t.section .fixup, \"ax\"\n" + "3:\tmov %2, %0\n" + "\tjmp 2b\n" + "\t.previous\n" + _ASM_EXTABLE(1b, 3b) + : "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "memory" ); return oldval; -- cgit v1.2.3 From eee28c251ed8ef2c6b66f0e08e9467a8cc5cf886 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:13 -0700 Subject: include/asm-x86/genapic_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/genapic_32.h | 89 ++++++++++++++++++++++---------------------- 1 file changed, 45 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index b501ae7809b..5fa893dce72 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -18,18 +18,18 @@ struct mpc_config_bus; struct mp_config_table; struct mpc_config_processor; -struct genapic { - char *name; - int (*probe)(void); +struct genapic { + char *name; + int (*probe)(void); int (*apic_id_registered)(void); cpumask_t (*target_cpus)(void); int int_delivery_mode; - int int_dest_mode; + int int_dest_mode; int ESR_DISABLE; int apic_destination_logical; unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); - unsigned long (*check_apicid_present)(int apicid); + unsigned long (*check_apicid_present)(int apicid); int no_balance_irq; int no_ioapic_check; void (*init_apic_ldr)(void); @@ -37,21 +37,21 @@ struct genapic { void (*setup_apic_routing)(void); int (*multi_timer_check)(int apic, int irq); - int (*apicid_to_node)(int logical_apicid); + int (*apicid_to_node)(int logical_apicid); int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - void (*setup_portio_remap)(void); + void (*setup_portio_remap)(void); int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); void (*enable_apic_mode)(void); u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); /* mpparse */ /* When one of the next two hooks returns 1 the genapic - is switched to this. Essentially they are additional probe + is switched to this. Essentially they are additional probe functions. */ - int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, - char *productid); + int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, + char *productid); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); unsigned (*get_apic_id)(unsigned long x); @@ -64,7 +64,7 @@ struct genapic { void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); #endif -}; +}; #define APICFUNC(x) .x = x, @@ -77,39 +77,40 @@ struct genapic { #define IPIFUNC(x) #endif -#define APIC_INIT(aname, aprobe) { \ - .name = aname, \ - .probe = aprobe, \ - .int_delivery_mode = INT_DELIVERY_MODE, \ - .int_dest_mode = INT_DEST_MODE, \ - .no_balance_irq = NO_BALANCE_IRQ, \ - .ESR_DISABLE = esr_disable, \ - .apic_destination_logical = APIC_DEST_LOGICAL, \ - APICFUNC(apic_id_registered) \ - APICFUNC(target_cpus) \ - APICFUNC(check_apicid_used) \ - APICFUNC(check_apicid_present) \ - APICFUNC(init_apic_ldr) \ - APICFUNC(ioapic_phys_id_map) \ - APICFUNC(setup_apic_routing) \ - APICFUNC(multi_timer_check) \ - APICFUNC(apicid_to_node) \ - APICFUNC(cpu_to_logical_apicid) \ - APICFUNC(cpu_present_to_apicid) \ - APICFUNC(apicid_to_cpu_present) \ - APICFUNC(setup_portio_remap) \ - APICFUNC(check_phys_apicid_present) \ - APICFUNC(mps_oem_check) \ - APICFUNC(get_apic_id) \ - .apic_id_mask = APIC_ID_MASK, \ - APICFUNC(cpu_mask_to_apicid) \ - APICFUNC(acpi_madt_oem_check) \ - IPIFUNC(send_IPI_mask) \ - IPIFUNC(send_IPI_allbutself) \ - IPIFUNC(send_IPI_all) \ - APICFUNC(enable_apic_mode) \ - APICFUNC(phys_pkg_id) \ - } +#define APIC_INIT(aname, aprobe) \ +{ \ + .name = aname, \ + .probe = aprobe, \ + .int_delivery_mode = INT_DELIVERY_MODE, \ + .int_dest_mode = INT_DEST_MODE, \ + .no_balance_irq = NO_BALANCE_IRQ, \ + .ESR_DISABLE = esr_disable, \ + .apic_destination_logical = APIC_DEST_LOGICAL, \ + APICFUNC(apic_id_registered) \ + APICFUNC(target_cpus) \ + APICFUNC(check_apicid_used) \ + APICFUNC(check_apicid_present) \ + APICFUNC(init_apic_ldr) \ + APICFUNC(ioapic_phys_id_map) \ + APICFUNC(setup_apic_routing) \ + APICFUNC(multi_timer_check) \ + APICFUNC(apicid_to_node) \ + APICFUNC(cpu_to_logical_apicid) \ + APICFUNC(cpu_present_to_apicid) \ + APICFUNC(apicid_to_cpu_present) \ + APICFUNC(setup_portio_remap) \ + APICFUNC(check_phys_apicid_present) \ + APICFUNC(mps_oem_check) \ + APICFUNC(get_apic_id) \ + .apic_id_mask = APIC_ID_MASK, \ + APICFUNC(cpu_mask_to_apicid) \ + APICFUNC(acpi_madt_oem_check) \ + IPIFUNC(send_IPI_mask) \ + IPIFUNC(send_IPI_allbutself) \ + IPIFUNC(send_IPI_all) \ + APICFUNC(enable_apic_mode) \ + APICFUNC(phys_pkg_id) \ +} extern struct genapic *genapic; -- cgit v1.2.3 From 6394d982a96a4b8f7973dea1d9d5a99dbe1847f7 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:14 -0700 Subject: include/asm-x86/geode.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/geode.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 9e7280092a4..9870cc1f2f8 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@ -167,7 +167,7 @@ static inline int is_geode(void) /* MFGPTs */ #define MFGPT_MAX_TIMERS 8 -#define MFGPT_TIMER_ANY -1 +#define MFGPT_TIMER_ANY (-1) #define MFGPT_DOMAIN_WORKING 1 #define MFGPT_DOMAIN_STANDBY 2 -- cgit v1.2.3 From b9c2fcf0eb6190f942312a6e04f91a80efb91b72 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:15 -0700 Subject: include/asm-x86/highmem.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/highmem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h index 479767c9195..e153f3b4477 100644 --- a/include/asm-x86/highmem.h +++ b/include/asm-x86/highmem.h @@ -8,7 +8,7 @@ * Gerhard.Wichert@pdb.siemens.de * * - * Redesigned the x86 32-bit VM architecture to deal with + * Redesigned the x86 32-bit VM architecture to deal with * up to 16 Terabyte physical memory. With current x86 CPUs * we now support up to 64 Gigabytes physical RAM. * -- cgit v1.2.3 From dbdab9f6308645a4462e81c607d6282bcb27141d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:16 -0700 Subject: include/asm-x86/hw_irq_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/hw_irq_64.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h index 312a58d6dac..0062ef390f6 100644 --- a/include/asm-x86/hw_irq_64.h +++ b/include/asm-x86/hw_irq_64.h @@ -36,7 +36,7 @@ * cleanup after irq migration. */ #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR - + /* * Vectors 0x30-0x3f are used for ISA interrupts. */ @@ -159,13 +159,12 @@ extern atomic_t irq_mis_count; * SMP has a few special interrupts for IPI messages */ -#define BUILD_IRQ(nr) \ -asmlinkage void IRQ_NAME(nr); \ -__asm__( \ -"\n.p2align\n" \ -"IRQ" #nr "_interrupt:\n\t" \ - "push $~(" #nr ") ; " \ - "jmp common_interrupt"); +#define BUILD_IRQ(nr) \ + asmlinkage void IRQ_NAME(nr); \ + asm("\n.p2align\n" \ + "IRQ" #nr "_interrupt:\n\t" \ + "push $~(" #nr ") ; " \ + "jmp common_interrupt"); #define platform_legacy_irq(irq) ((irq) < 16) -- cgit v1.2.3 From e574b023cf03f796727ea654acfc16a0df805086 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:17 -0700 Subject: include/asm-x86/hypertransport.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/hypertransport.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h index c16c6ff4bdd..d2bbd238b3e 100644 --- a/include/asm-x86/hypertransport.h +++ b/include/asm-x86/hypertransport.h @@ -8,12 +8,14 @@ #define HT_IRQ_LOW_BASE 0xf8000000 #define HT_IRQ_LOW_VECTOR_SHIFT 16 -#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 -#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) +#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 +#define HT_IRQ_LOW_VECTOR(v) \ + (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) #define HT_IRQ_LOW_DEST_ID_SHIFT 8 -#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 -#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) +#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 +#define HT_IRQ_LOW_DEST_ID(v) \ + (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) #define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 #define HT_IRQ_LOW_DM_LOGICAL 0x0000040 @@ -36,7 +38,8 @@ #define HT_IRQ_HIGH_DEST_ID_SHIFT 0 -#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff -#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) +#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff +#define HT_IRQ_HIGH_DEST_ID(v) \ + ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) #endif /* ASM_HYPERTRANSPORT_H */ -- cgit v1.2.3 From affe66374ca16572fbb22cefe267d6072c49ce9d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:18 -0700 Subject: include/asm-x86/i387.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/i387.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index f377b76b2f3..54522b814f1 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -41,7 +41,7 @@ static inline void tolerant_fwait(void) { asm volatile("1: fwait\n" "2:\n" - _ASM_EXTABLE(1b,2b)); + _ASM_EXTABLE(1b, 2b)); } static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) @@ -54,7 +54,7 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) "3: movl $-1,%[err]\n" " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) + _ASM_EXTABLE(1b, 3b) : [err] "=r" (err) #if 0 /* See comment in __save_init_fpu() below. */ : [fx] "r" (fx), "m" (*fx), "0" (0)); @@ -76,11 +76,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) static inline void clear_fpu_state(struct i387_fxsave_struct *fx) { if (unlikely(fx->swd & X87_FSW_ES)) - asm volatile("fnclex"); + asm volatile("fnclex"); alternative_input(ASM_NOP8 ASM_NOP2, - " emms\n" /* clear stack tags */ - " fildl %%gs:0", /* load to clear state */ - X86_FEATURE_FXSAVE_LEAK); + " emms\n" /* clear stack tags */ + " fildl %%gs:0", /* load to clear state */ + X86_FEATURE_FXSAVE_LEAK); } static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) @@ -93,14 +93,15 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) "3: movl $-1,%[err]\n" " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) + _ASM_EXTABLE(1b, 3b) : [err] "=r" (err), "=m" (*fx) #if 0 /* See comment in __fxsave_clear() below. */ : [fx] "r" (fx), "0" (0)); #else : [fx] "cdaSDb" (fx), "0" (0)); #endif - if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) + if (unlikely(err) && + __clear_user(fx, sizeof(struct i387_fxsave_struct))) err = -EFAULT; /* No need to clear here because the caller clears USED_MATH */ return err; @@ -156,8 +157,10 @@ static inline int save_i387(struct _fpstate __user *buf) return 0; clear_used_math(); /* trigger finit */ if (task_thread_info(tsk)->status & TS_USEDFPU) { - err = save_i387_checking((struct i387_fxsave_struct __user *)buf); - if (err) return err; + err = save_i387_checking((struct i387_fxsave_struct __user *) + buf); + if (err) + return err; task_thread_info(tsk)->status &= ~TS_USEDFPU; stts(); } else { -- cgit v1.2.3 From ace4fdbb2fa842fc557bc0ae5909d790eb335ff8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:19 -0700 Subject: include/asm-x86/i8259.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/i8259.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index e2650f21ca8..45d4df3e51e 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h @@ -5,7 +5,7 @@ extern unsigned int cached_irq_mask; -#define __byte(x,y) (((unsigned char *) &(y))[x]) +#define __byte(x, y) (((unsigned char *)&(y))[x]) #define cached_master_mask (__byte(0, cached_irq_mask)) #define cached_slave_mask (__byte(1, cached_irq_mask)) -- cgit v1.2.3 From 9bd73425142a610ae72c5e2b89e036e5214bb6ca Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:20 -0700 Subject: include/asm-x86/ia32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ia32.h | 62 ++++++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h index aa9733206e2..55d3abe5276 100644 --- a/include/asm-x86/ia32.h +++ b/include/asm-x86/ia32.h @@ -14,19 +14,19 @@ /* signal.h */ struct sigaction32 { - unsigned int sa_handler; /* Really a pointer, but need to deal - with 32 bits */ - unsigned int sa_flags; - unsigned int sa_restorer; /* Another 32 bit pointer */ - compat_sigset_t sa_mask; /* A 32 bit mask */ + unsigned int sa_handler; /* Really a pointer, but need to deal + with 32 bits */ + unsigned int sa_flags; + unsigned int sa_restorer; /* Another 32 bit pointer */ + compat_sigset_t sa_mask; /* A 32 bit mask */ }; struct old_sigaction32 { - unsigned int sa_handler; /* Really a pointer, but need to deal - with 32 bits */ - compat_old_sigset_t sa_mask; /* A 32 bit mask */ - unsigned int sa_flags; - unsigned int sa_restorer; /* Another 32 bit pointer */ + unsigned int sa_handler; /* Really a pointer, but need to deal + with 32 bits */ + compat_old_sigset_t sa_mask; /* A 32 bit mask */ + unsigned int sa_flags; + unsigned int sa_restorer; /* Another 32 bit pointer */ }; typedef struct sigaltstack_ia32 { @@ -65,7 +65,7 @@ struct stat64 { long long st_size; unsigned int st_blksize; - long long st_blocks;/* Number 512-byte blocks allocated. */ + long long st_blocks;/* Number 512-byte blocks allocated */ unsigned st_atime; unsigned st_atime_nsec; @@ -77,13 +77,13 @@ struct stat64 { unsigned long long st_ino; } __attribute__((packed)); -typedef struct compat_siginfo{ +typedef struct compat_siginfo { int si_signo; int si_errno; int si_code; union { - int _pad[((128/sizeof(int)) - 3)]; + int _pad[((128 / sizeof(int)) - 3)]; /* kill() */ struct { @@ -129,28 +129,26 @@ typedef struct compat_siginfo{ } _sifields; } compat_siginfo_t; -struct sigframe32 -{ - u32 pretcode; - int sig; - struct sigcontext_ia32 sc; - struct _fpstate_ia32 fpstate; - unsigned int extramask[_COMPAT_NSIG_WORDS-1]; +struct sigframe32 { + u32 pretcode; + int sig; + struct sigcontext_ia32 sc; + struct _fpstate_ia32 fpstate; + unsigned int extramask[_COMPAT_NSIG_WORDS-1]; }; -struct rt_sigframe32 -{ - u32 pretcode; - int sig; - u32 pinfo; - u32 puc; - compat_siginfo_t info; - struct ucontext_ia32 uc; - struct _fpstate_ia32 fpstate; +struct rt_sigframe32 { + u32 pretcode; + int sig; + u32 pinfo; + u32 puc; + compat_siginfo_t info; + struct ucontext_ia32 uc; + struct _fpstate_ia32 fpstate; }; struct ustat32 { - __u32 f_tfree; + __u32 f_tfree; compat_ino_t f_tinode; char f_fname[6]; char f_fpack[6]; @@ -168,5 +166,5 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm); #endif #endif /* !CONFIG_IA32_SUPPORT */ - -#endif + +#endif -- cgit v1.2.3 From dcd215c9089c1203eed6f84dde2eb2cc0bbf9501 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:22 -0700 Subject: include/asm-x86/io_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/io_32.h | 137 +++++++++++++++++++++++++++++------------------- 1 file changed, 83 insertions(+), 54 deletions(-) (limited to 'include') diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index d4d8fbd9378..509045f5fda 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h @@ -65,14 +65,14 @@ * * The returned physical address is the physical (CPU) mapping for * the memory address given. It is only valid to use this function on - * addresses directly mapped or allocated via kmalloc. + * addresses directly mapped or allocated via kmalloc. * * This function does not give bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ - -static inline unsigned long virt_to_phys(volatile void * address) + +static inline unsigned long virt_to_phys(volatile void *address) { return __pa(address); } @@ -90,7 +90,7 @@ static inline unsigned long virt_to_phys(volatile void * address) * this function */ -static inline void * phys_to_virt(unsigned long address) +static inline void *phys_to_virt(unsigned long address) { return __va(address); } @@ -169,16 +169,19 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); static inline unsigned char readb(const volatile void __iomem *addr) { - return *(volatile unsigned char __force *) addr; + return *(volatile unsigned char __force *)addr; } + static inline unsigned short readw(const volatile void __iomem *addr) { - return *(volatile unsigned short __force *) addr; + return *(volatile unsigned short __force *)addr; } + static inline unsigned int readl(const volatile void __iomem *addr) { return *(volatile unsigned int __force *) addr; } + #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) @@ -188,15 +191,17 @@ static inline unsigned int readl(const volatile void __iomem *addr) static inline void writeb(unsigned char b, volatile void __iomem *addr) { - *(volatile unsigned char __force *) addr = b; + *(volatile unsigned char __force *)addr = b; } + static inline void writew(unsigned short b, volatile void __iomem *addr) { - *(volatile unsigned short __force *) addr = b; + *(volatile unsigned short __force *)addr = b; } + static inline void writel(unsigned int b, volatile void __iomem *addr) { - *(volatile unsigned int __force *) addr = b; + *(volatile unsigned int __force *)addr = b; } #define __raw_writeb writeb #define __raw_writew writew @@ -239,12 +244,12 @@ memcpy_toio(volatile void __iomem *dst, const void *src, int count) * 1. Out of order aware processors * 2. Accidentally out of order processors (PPro errata #51) */ - + #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) static inline void flush_write_buffers(void) { - __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); + asm volatile("lock; addl $0,0(%%esp)": : :"memory"); } #else @@ -264,7 +269,8 @@ extern void io_delay_init(void); #include #else -static inline void slow_down_io(void) { +static inline void slow_down_io(void) +{ native_io_delay(); #ifdef REALLY_SLOW_IO native_io_delay(); @@ -275,51 +281,74 @@ static inline void slow_down_io(void) { #endif -#define __BUILDIO(bwl,bw,type) \ -static inline void out##bwl(unsigned type value, int port) { \ - out##bwl##_local(value, port); \ -} \ -static inline unsigned type in##bwl(int port) { \ - return in##bwl##_local(port); \ +#define __BUILDIO(bwl, bw, type) \ +static inline void out##bwl(unsigned type value, int port) \ +{ \ + out##bwl##_local(value, port); \ +} \ + \ +static inline unsigned type in##bwl(int port) \ +{ \ + return in##bwl##_local(port); \ } -#define BUILDIO(bwl,bw,type) \ -static inline void out##bwl##_local(unsigned type value, int port) { \ - __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ -} \ -static inline unsigned type in##bwl##_local(int port) { \ - unsigned type value; \ - __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ - return value; \ -} \ -static inline void out##bwl##_local_p(unsigned type value, int port) { \ - out##bwl##_local(value, port); \ - slow_down_io(); \ -} \ -static inline unsigned type in##bwl##_local_p(int port) { \ - unsigned type value = in##bwl##_local(port); \ - slow_down_io(); \ - return value; \ -} \ -__BUILDIO(bwl,bw,type) \ -static inline void out##bwl##_p(unsigned type value, int port) { \ - out##bwl(value, port); \ - slow_down_io(); \ -} \ -static inline unsigned type in##bwl##_p(int port) { \ - unsigned type value = in##bwl(port); \ - slow_down_io(); \ - return value; \ -} \ -static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ - __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ -} \ -static inline void ins##bwl(int port, void *addr, unsigned long count) { \ - __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ +#define BUILDIO(bwl, bw, type) \ +static inline void out##bwl##_local(unsigned type value, int port) \ +{ \ + asm volatile("out" #bwl " %" #bw "0, %w1" \ + : : "a"(value), "Nd"(port)); \ +} \ + \ +static inline unsigned type in##bwl##_local(int port) \ +{ \ + unsigned type value; \ + asm volatile("in" #bwl " %w1, %" #bw "0" \ + : "=a"(value) : "Nd"(port)); \ + return value; \ +} \ + \ +static inline void out##bwl##_local_p(unsigned type value, int port) \ +{ \ + out##bwl##_local(value, port); \ + slow_down_io(); \ +} \ + \ +static inline unsigned type in##bwl##_local_p(int port) \ +{ \ + unsigned type value = in##bwl##_local(port); \ + slow_down_io(); \ + return value; \ +} \ + \ +__BUILDIO(bwl, bw, type) \ + \ +static inline void out##bwl##_p(unsigned type value, int port) \ +{ \ + out##bwl(value, port); \ + slow_down_io(); \ +} \ + \ +static inline unsigned type in##bwl##_p(int port) \ +{ \ + unsigned type value = in##bwl(port); \ + slow_down_io(); \ + return value; \ +} \ + \ +static inline void outs##bwl(int port, const void *addr, unsigned long count) \ +{ \ + asm volatile("rep; outs" #bwl \ + : "+S"(addr), "+c"(count) : "d"(port)); \ +} \ + \ +static inline void ins##bwl(int port, void *addr, unsigned long count) \ +{ \ + asm volatile("rep; ins" #bwl \ + : "+D"(addr), "+c"(count) : "d"(port)); \ } -BUILDIO(b,b,char) -BUILDIO(w,w,short) -BUILDIO(l,,int) +BUILDIO(b, b, char) +BUILDIO(w, w, short) +BUILDIO(l, , int) #endif -- cgit v1.2.3 From 126f5f35a36b5734026db58263fc1d60e4ae466f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:23 -0700 Subject: include/asm-x86/io_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/io_64.h | 110 ++++++++++++++++++++++++++++++------------------ 1 file changed, 68 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index db0be2011a3..c2f5eef47b8 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h @@ -58,60 +58,75 @@ static inline void slow_down_io(void) /* * Talk about misusing macros.. */ -#define __OUT1(s,x) \ +#define __OUT1(s, x) \ static inline void out##s(unsigned x value, unsigned short port) { -#define __OUT2(s,s1,s2) \ -__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" +#define __OUT2(s, s1, s2) \ +asm volatile ("out" #s " %" s1 "0,%" s2 "1" #ifndef REALLY_SLOW_IO #define REALLY_SLOW_IO #define UNSET_REALLY_SLOW_IO #endif -#define __OUT(s,s1,x) \ -__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ -__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ - slow_down_io(); } +#define __OUT(s, s1, x) \ + __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ + } \ + __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ + slow_down_io(); \ +} -#define __IN1(s) \ -static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; +#define __IN1(s) \ +static inline RETURN_TYPE in##s(unsigned short port) \ +{ \ + RETURN_TYPE _v; -#define __IN2(s,s1,s2) \ -__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" +#define __IN2(s, s1, s2) \ + asm volatile ("in" #s " %" s2 "1,%" s1 "0" -#define __IN(s,s1,i...) \ -__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \ -__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ - slow_down_io(); return _v; } +#define __IN(s, s1, i...) \ + __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ + return _v; \ + } \ + __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ + slow_down_io(); \ + return _v; } #ifdef UNSET_REALLY_SLOW_IO #undef REALLY_SLOW_IO #endif -#define __INS(s) \ -static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ -{ __asm__ __volatile__ ("rep ; ins" #s \ -: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } +#define __INS(s) \ +static inline void ins##s(unsigned short port, void *addr, \ + unsigned long count) \ +{ \ + asm volatile ("rep ; ins" #s \ + : "=D" (addr), "=c" (count) \ + : "d" (port), "0" (addr), "1" (count)); \ +} -#define __OUTS(s) \ -static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ -{ __asm__ __volatile__ ("rep ; outs" #s \ -: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } +#define __OUTS(s) \ +static inline void outs##s(unsigned short port, const void *addr, \ + unsigned long count) \ +{ \ + asm volatile ("rep ; outs" #s \ + : "=S" (addr), "=c" (count) \ + : "d" (port), "0" (addr), "1" (count)); \ +} #define RETURN_TYPE unsigned char -__IN(b,"") +__IN(b, "") #undef RETURN_TYPE #define RETURN_TYPE unsigned short -__IN(w,"") +__IN(w, "") #undef RETURN_TYPE #define RETURN_TYPE unsigned int -__IN(l,"") +__IN(l, "") #undef RETURN_TYPE -__OUT(b,"b",char) -__OUT(w,"w",short) -__OUT(l,,int) +__OUT(b, "b", char) +__OUT(w, "w", short) +__OUT(l, , int) __INS(b) __INS(w) @@ -132,12 +147,12 @@ __OUTS(l) * Change virtual addresses to physical addresses and vv. * These are pretty trivial */ -static inline unsigned long virt_to_phys(volatile void * address) +static inline unsigned long virt_to_phys(volatile void *address) { return __pa(address); } -static inline void * phys_to_virt(unsigned long address) +static inline void *phys_to_virt(unsigned long address) { return __va(address); } @@ -200,18 +215,22 @@ static inline __u8 __readb(const volatile void __iomem *addr) { return *(__force volatile __u8 *)addr; } + static inline __u16 __readw(const volatile void __iomem *addr) { return *(__force volatile __u16 *)addr; } + static __always_inline __u32 __readl(const volatile void __iomem *addr) { return *(__force volatile __u32 *)addr; } + static inline __u64 __readq(const volatile void __iomem *addr) { return *(__force volatile __u64 *)addr; } + #define readb(x) __readb(x) #define readw(x) __readw(x) #define readl(x) __readl(x) @@ -231,37 +250,44 @@ static inline void __writel(__u32 b, volatile void __iomem *addr) { *(__force volatile __u32 *)addr = b; } + static inline void __writeq(__u64 b, volatile void __iomem *addr) { *(__force volatile __u64 *)addr = b; } + static inline void __writeb(__u8 b, volatile void __iomem *addr) { *(__force volatile __u8 *)addr = b; } + static inline void __writew(__u16 b, volatile void __iomem *addr) { *(__force volatile __u16 *)addr = b; } -#define writeq(val,addr) __writeq((val),(addr)) -#define writel(val,addr) __writel((val),(addr)) -#define writew(val,addr) __writew((val),(addr)) -#define writeb(val,addr) __writeb((val),(addr)) + +#define writeq(val, addr) __writeq((val), (addr)) +#define writel(val, addr) __writel((val), (addr)) +#define writew(val, addr) __writew((val), (addr)) +#define writeb(val, addr) __writeb((val), (addr)) #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel #define __raw_writeq writeq -void __memcpy_fromio(void*,unsigned long,unsigned); -void __memcpy_toio(unsigned long,const void*,unsigned); +void __memcpy_fromio(void *, unsigned long, unsigned); +void __memcpy_toio(unsigned long, const void *, unsigned); -static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len) +static inline void memcpy_fromio(void *to, const volatile void __iomem *from, + unsigned len) { - __memcpy_fromio(to,(unsigned long)from,len); + __memcpy_fromio(to, (unsigned long)from, len); } -static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len) + +static inline void memcpy_toio(volatile void __iomem *to, const void *from, + unsigned len) { - __memcpy_toio((unsigned long)to,from,len); + __memcpy_toio((unsigned long)to, from, len); } void memset_io(volatile void __iomem *a, int b, size_t c); @@ -276,7 +302,7 @@ void memset_io(volatile void __iomem *a, int b, size_t c); */ #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) -#define flush_write_buffers() +#define flush_write_buffers() extern int iommu_bio_merge; #define BIO_VMERGE_BOUNDARY iommu_bio_merge -- cgit v1.2.3 From 88b4f6e98f79de31f49036da5bb1482872d512b5 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:24 -0700 Subject: include/asm-x86/ioctls.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ioctls.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h index 93c894dc515..c0c338bd406 100644 --- a/include/asm-x86/ioctls.h +++ b/include/asm-x86/ioctls.h @@ -47,12 +47,13 @@ #define TIOCSBRK 0x5427 /* BSD compatibility */ #define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCGSID 0x5429 /* Return the session ID of FD */ -#define TCGETS2 _IOR('T',0x2A, struct termios2) -#define TCSETS2 _IOW('T',0x2B, struct termios2) -#define TCSETSW2 _IOW('T',0x2C, struct termios2) -#define TCSETSF2 _IOW('T',0x2D, struct termios2) -#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ -#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TCGETS2 _IOR('T', 0x2A, struct termios2) +#define TCSETS2 _IOW('T', 0x2B, struct termios2) +#define TCSETSW2 _IOW('T', 0x2C, struct termios2) +#define TCSETSF2 _IOW('T', 0x2D, struct termios2) +#define TIOCGPTN _IOR('T', 0x30, unsigned int) + /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ #define FIONCLEX 0x5450 #define FIOCLEX 0x5451 -- cgit v1.2.3 From 1774a5bed33d85f250657f397b359b9837390a90 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:25 -0700 Subject: include/asm-x86/io.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/io.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 599cad3505c..7b292d38671 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h @@ -7,5 +7,5 @@ #endif extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, unsigned long prot_val); -extern void __iomem * ioremap_wc(unsigned long offset, unsigned long size); +extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); -- cgit v1.2.3 From 5ca22aaad9ea078306537b2ef6fb788d931e8502 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:26 -0700 Subject: include/asm-x86/ipcbuf.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ipcbuf.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h index 2adf8b39a40..ee678fd5159 100644 --- a/include/asm-x86/ipcbuf.h +++ b/include/asm-x86/ipcbuf.h @@ -11,8 +11,7 @@ * - 2 miscellaneous 32-bit values */ -struct ipc64_perm -{ +struct ipc64_perm { __kernel_key_t key; __kernel_uid32_t uid; __kernel_gid32_t gid; -- cgit v1.2.3 From 061b3d90bc3490af41c6756189b59713cd9ecaee Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:27 -0700 Subject: include/asm-x86/ipi.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ipi.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index 6d011bd6067..ecc80f341f3 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h @@ -27,7 +27,8 @@ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. */ -static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) +static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, + unsigned int dest) { unsigned int icr = shortcut | dest; @@ -42,12 +43,13 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns return icr; } -static inline int __prepare_ICR2 (unsigned int mask) +static inline int __prepare_ICR2(unsigned int mask) { return SET_APIC_DEST_FIELD(mask); } -static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) +static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, + unsigned int dest) { /* * Subtle. In the case of the 'never do double writes' workaround @@ -78,7 +80,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign * This is used to send an IPI with no shorthand notation (the destination is * specified in bits 56 to 63 of the ICR). */ -static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) +static inline void __send_IPI_dest_field(unsigned int mask, int vector, + unsigned int dest) { unsigned long cfg; -- cgit v1.2.3 From f4964d2ac51fcc8cd56975139422cdbb3a1e66dc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:28 -0700 Subject: include/asm-x86/irq_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/irq_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h index aca9c96e8e6..0b79f318524 100644 --- a/include/asm-x86/irq_32.h +++ b/include/asm-x86/irq_32.h @@ -15,7 +15,7 @@ #include "irq_vectors.h" #include -static __inline__ int irq_canonicalize(int irq) +static inline int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); } -- cgit v1.2.3 From 3ff3522497d7ad383df9770e812fbaf75d19b214 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:29 -0700 Subject: include/asm-x86/irq_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/irq_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h index 5006c6e7565..083d35a62c9 100644 --- a/include/asm-x86/irq_64.h +++ b/include/asm-x86/irq_64.h @@ -31,10 +31,10 @@ #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ -#define NR_IRQS (NR_VECTORS + (32 *NR_CPUS)) +#define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) #define NR_IRQ_VECTORS NR_IRQS -static __inline__ int irq_canonicalize(int irq) +static inline int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); } -- cgit v1.2.3 From cf7f7191cf20011e47243b594e433275a6db811b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:30 -0700 Subject: include/asm-x86/irqflags.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/irqflags.h | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 92021c1ffa3..c242527f970 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h @@ -12,25 +12,21 @@ static inline unsigned long native_save_fl(void) { unsigned long flags; - __asm__ __volatile__( - "# __raw_save_flags\n\t" - "pushf ; pop %0" - : "=g" (flags) - : /* no input */ - : "memory" - ); + asm volatile("# __raw_save_flags\n\t" + "pushf ; pop %0" + : "=g" (flags) + : /* no input */ + : "memory"); return flags; } static inline void native_restore_fl(unsigned long flags) { - __asm__ __volatile__( - "push %0 ; popf" - : /* no output */ - :"g" (flags) - :"memory", "cc" - ); + asm volatile("push %0 ; popf" + : /* no output */ + :"g" (flags) + :"memory", "cc"); } static inline void native_irq_disable(void) @@ -131,11 +127,11 @@ static inline unsigned long __raw_local_irq_save(void) #endif /* CONFIG_PARAVIRT */ #ifndef __ASSEMBLY__ -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) +#define raw_local_save_flags(flags) \ + do { (flags) = __raw_local_save_flags(); } while (0) -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) +#define raw_local_irq_save(flags) \ + do { (flags) = __raw_local_irq_save(); } while (0) static inline int raw_irqs_disabled_flags(unsigned long flags) { -- cgit v1.2.3 From f461f1372cef1853534df2115f9ff5b7fbfc6958 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:31 -0700 Subject: include/asm-x86/kdebug.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kdebug.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h index 99dcbafa151..0c4175390da 100644 --- a/include/asm-x86/kdebug.h +++ b/include/asm-x86/kdebug.h @@ -23,12 +23,12 @@ enum die_val { }; extern void printk_address(unsigned long address, int reliable); -extern void die(const char *,struct pt_regs *,long); +extern void die(const char *, struct pt_regs *,long); extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_registers(struct pt_regs *regs); extern void __show_registers(struct pt_regs *, int all); extern void show_trace(struct task_struct *t, struct pt_regs *regs, - unsigned long *sp, unsigned long bp); + unsigned long *sp, unsigned long bp); extern void __show_regs(struct pt_regs *regs); extern void show_regs(struct pt_regs *regs); extern unsigned long oops_begin(void); -- cgit v1.2.3 From b69a3f9dc0bbdbf9278ac5bc8d4b6347c11a701b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:32 -0700 Subject: include/asm-x86/kexec.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kexec.h | 71 ++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index c90d3c77afc..8f855a15f64 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h @@ -94,10 +94,9 @@ static inline void crash_fixup_ss_esp(struct pt_regs *newregs, { #ifdef CONFIG_X86_32 newregs->sp = (unsigned long)&(oldregs->sp); - __asm__ __volatile__( - "xorl %%eax, %%eax\n\t" - "movw %%ss, %%ax\n\t" - :"=a"(newregs->ss)); + asm volatile("xorl %%eax, %%eax\n\t" + "movw %%ss, %%ax\n\t" + :"=a"(newregs->ss)); #endif } @@ -114,39 +113,39 @@ static inline void crash_setup_regs(struct pt_regs *newregs, crash_fixup_ss_esp(newregs, oldregs); } else { #ifdef CONFIG_X86_32 - __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx)); - __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx)); - __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx)); - __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si)); - __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di)); - __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp)); - __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax)); - __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp)); - __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); - __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); - __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds)); - __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es)); - __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags)); + asm volatile("movl %%ebx,%0" : "=m"(newregs->bx)); + asm volatile("movl %%ecx,%0" : "=m"(newregs->cx)); + asm volatile("movl %%edx,%0" : "=m"(newregs->dx)); + asm volatile("movl %%esi,%0" : "=m"(newregs->si)); + asm volatile("movl %%edi,%0" : "=m"(newregs->di)); + asm volatile("movl %%ebp,%0" : "=m"(newregs->bp)); + asm volatile("movl %%eax,%0" : "=m"(newregs->ax)); + asm volatile("movl %%esp,%0" : "=m"(newregs->sp)); + asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); + asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); + asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds)); + asm volatile("movl %%es, %%eax;" :"=a"(newregs->es)); + asm volatile("pushfl; popl %0" :"=m"(newregs->flags)); #else - __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx)); - __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx)); - __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx)); - __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si)); - __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di)); - __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp)); - __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax)); - __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp)); - __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); - __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); - __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); - __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); - __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); - __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); - __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); - __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); - __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); - __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); - __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags)); + asm volatile("movq %%rbx,%0" : "=m"(newregs->bx)); + asm volatile("movq %%rcx,%0" : "=m"(newregs->cx)); + asm volatile("movq %%rdx,%0" : "=m"(newregs->dx)); + asm volatile("movq %%rsi,%0" : "=m"(newregs->si)); + asm volatile("movq %%rdi,%0" : "=m"(newregs->di)); + asm volatile("movq %%rbp,%0" : "=m"(newregs->bp)); + asm volatile("movq %%rax,%0" : "=m"(newregs->ax)); + asm volatile("movq %%rsp,%0" : "=m"(newregs->sp)); + asm volatile("movq %%r8,%0" : "=m"(newregs->r8)); + asm volatile("movq %%r9,%0" : "=m"(newregs->r9)); + asm volatile("movq %%r10,%0" : "=m"(newregs->r10)); + asm volatile("movq %%r11,%0" : "=m"(newregs->r11)); + asm volatile("movq %%r12,%0" : "=m"(newregs->r12)); + asm volatile("movq %%r13,%0" : "=m"(newregs->r13)); + asm volatile("movq %%r14,%0" : "=m"(newregs->r14)); + asm volatile("movq %%r15,%0" : "=m"(newregs->r15)); + asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); + asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); + asm volatile("pushfq; popq %0" :"=m"(newregs->flags)); #endif newregs->ip = (unsigned long)current_text_addr(); } -- cgit v1.2.3 From 864dfa13bed444d2c6d76d584e81713405d60082 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:33 -0700 Subject: include/asm-x86/kprobes.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kprobes.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h index 61ad7b5d142..54980b0b389 100644 --- a/include/asm-x86/kprobes.h +++ b/include/asm-x86/kprobes.h @@ -35,12 +35,12 @@ typedef u8 kprobe_opcode_t; #define RELATIVEJUMP_INSTRUCTION 0xe9 #define MAX_INSN_SIZE 16 #define MAX_STACK_SIZE 64 -#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ - (((unsigned long)current_thread_info()) + THREAD_SIZE \ - - (unsigned long)(ADDR))) \ - ? (MAX_STACK_SIZE) \ - : (((unsigned long)current_thread_info()) + THREAD_SIZE \ - - (unsigned long)(ADDR))) +#define MIN_STACK_SIZE(ADDR) \ + (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ + THREAD_SIZE - (unsigned long)(ADDR))) \ + ? (MAX_STACK_SIZE) \ + : (((unsigned long)current_thread_info()) + \ + THREAD_SIZE - (unsigned long)(ADDR))) #define flush_insn_slot(p) do { } while (0) -- cgit v1.2.3 From 7d76b4d3767324fa44ac73032d6eeb428e46f004 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:34 -0700 Subject: include/asm-x86/kvm_host.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kvm_host.h | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 4702b04b979..68ee390b284 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -22,15 +22,16 @@ #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) -#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) +#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ + 0xFFFFFF0000000000ULL) -#define KVM_GUEST_CR0_MASK \ +#define KVM_GUEST_CR0_MASK \ (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ | X86_CR0_NW | X86_CR0_CD) -#define KVM_VM_CR0_ALWAYS_ON \ +#define KVM_VM_CR0_ALWAYS_ON \ (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ | X86_CR0_MP) -#define KVM_GUEST_CR4_MASK \ +#define KVM_GUEST_CR4_MASK \ (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) @@ -133,12 +134,12 @@ struct kvm_pte_chain { union kvm_mmu_page_role { unsigned word; struct { - unsigned glevels : 4; - unsigned level : 4; - unsigned quadrant : 2; - unsigned pad_for_nice_hex_output : 6; - unsigned metaphysical : 1; - unsigned access : 3; + unsigned glevels:4; + unsigned level:4; + unsigned quadrant:2; + unsigned pad_for_nice_hex_output:6; + unsigned metaphysical:1; + unsigned access:3; }; }; @@ -606,6 +607,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) #define TSS_BASE_SIZE 0x68 #define TSS_IOPB_SIZE (65536 / 8) #define TSS_REDIRECTION_SIZE (256 / 8) -#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) +#define RMODE_TSS_SIZE \ + (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) #endif -- cgit v1.2.3 From 0c7825e64d377409deaaf73cd6311da9df310db3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:35 -0700 Subject: include/asm-x86/kvm_x86_emulate.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kvm_x86_emulate.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h index 7db91b9bdcd..d6337f941c9 100644 --- a/include/asm-x86/kvm_x86_emulate.h +++ b/include/asm-x86/kvm_x86_emulate.h @@ -68,10 +68,10 @@ struct x86_emulate_ops { * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ - int (*read_emulated) (unsigned long addr, - void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu); + int (*read_emulated)(unsigned long addr, + void *val, + unsigned int bytes, + struct kvm_vcpu *vcpu); /* * write_emulated: Read bytes from emulated/special memory area. @@ -80,10 +80,10 @@ struct x86_emulate_ops { * required). * @bytes: [IN ] Number of bytes to write to memory. */ - int (*write_emulated) (unsigned long addr, - const void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu); + int (*write_emulated)(unsigned long addr, + const void *val, + unsigned int bytes, + struct kvm_vcpu *vcpu); /* * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an @@ -93,11 +93,11 @@ struct x86_emulate_ops { * @new: [IN ] Value to write to @addr. * @bytes: [IN ] Number of bytes to access using CMPXCHG. */ - int (*cmpxchg_emulated) (unsigned long addr, - const void *old, - const void *new, - unsigned int bytes, - struct kvm_vcpu *vcpu); + int (*cmpxchg_emulated)(unsigned long addr, + const void *old, + const void *new, + unsigned int bytes, + struct kvm_vcpu *vcpu); }; @@ -143,7 +143,7 @@ struct x86_emulate_ctxt { /* Register state before/after emulation. */ struct kvm_vcpu *vcpu; - /* Linear faulting address (if emulating a page-faulting instruction). */ + /* Linear faulting address (if emulating a page-faulting instruction) */ unsigned long eflags; /* Emulated execution mode, represented by an X86EMUL_MODE value. */ -- cgit v1.2.3 From fd1ea0c25ae00e2ac55881af55c3206664dd59a8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:36 -0700 Subject: include/asm-x86/lguest_hcall.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/lguest_hcall.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index f239e7069ca..a3241f28e34 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h @@ -46,7 +46,7 @@ hcall(unsigned long call, { /* "int" is the Intel instruction to trigger a trap. */ asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) - /* The call in %eax (aka "a") might be overwritten */ + /* The call in %eax (aka "a") might be overwritten */ : "=a"(call) /* The arguments are in %eax, %edx, %ebx & %ecx */ : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) @@ -62,8 +62,7 @@ hcall(unsigned long call, #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) #define LHCALL_RING_SIZE 64 -struct hcall_args -{ +struct hcall_args { /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ unsigned long arg0, arg2, arg3, arg1; }; -- cgit v1.2.3 From fb444c7b25420d57ce5e31cab486f734705bd278 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:37 -0700 Subject: include/asm-x86/lguest.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/lguest.h | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index 9b17571e9bc..be4a7247fa2 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h @@ -34,8 +34,7 @@ extern const char lgstart_iret[], lgend_iret[]; extern void lguest_iret(void); extern void lguest_init(void); -struct lguest_regs -{ +struct lguest_regs { /* Manually saved part. */ unsigned long eax, ebx, ecx, edx; unsigned long esi, edi, ebp; @@ -51,8 +50,7 @@ struct lguest_regs }; /* This is a guest-specific page (mapped ro) into the guest. */ -struct lguest_ro_state -{ +struct lguest_ro_state { /* Host information we need to restore when we switch back. */ u32 host_cr3; struct desc_ptr host_idt_desc; @@ -67,8 +65,7 @@ struct lguest_ro_state struct desc_struct guest_gdt[GDT_ENTRIES]; }; -struct lg_cpu_arch -{ +struct lg_cpu_arch { /* The GDT entries copied into lguest_ro_state when running. */ struct desc_struct gdt[GDT_ENTRIES]; @@ -85,7 +82,7 @@ static inline void lguest_set_ts(void) cr0 = read_cr0(); if (!(cr0 & 8)) - write_cr0(cr0|8); + write_cr0(cr0 | 8); } /* Full 4G segment descriptors, suitable for CS and DS. */ -- cgit v1.2.3 From 69cde6512c3a0227878869f9ba8a02cdc72fc253 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:39 -0700 Subject: include/asm-x86/local.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/local.h | 105 +++++++++++++++++++++++------------------------- 1 file changed, 50 insertions(+), 55 deletions(-) (limited to 'include') diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h index f852c62b331..330a72496ab 100644 --- a/include/asm-x86/local.h +++ b/include/asm-x86/local.h @@ -18,32 +18,28 @@ typedef struct { static inline void local_inc(local_t *l) { - __asm__ __volatile__( - _ASM_INC "%0" - :"+m" (l->a.counter)); + asm volatile(_ASM_INC "%0" + : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { - __asm__ __volatile__( - _ASM_DEC "%0" - :"+m" (l->a.counter)); + asm volatile(_ASM_DEC "%0" + : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { - __asm__ __volatile__( - _ASM_ADD "%1,%0" - :"+m" (l->a.counter) - :"ir" (i)); + asm volatile(_ASM_ADD "%1,%0" + : "+m" (l->a.counter) + : "ir" (i)); } static inline void local_sub(long i, local_t *l) { - __asm__ __volatile__( - _ASM_SUB "%1,%0" - :"+m" (l->a.counter) - :"ir" (i)); + asm volatile(_ASM_SUB "%1,%0" + : "+m" (l->a.counter) + : "ir" (i)); } /** @@ -59,10 +55,9 @@ static inline int local_sub_and_test(long i, local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_SUB "%2,%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(_ASM_SUB "%2,%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -78,10 +73,9 @@ static inline int local_dec_and_test(local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_DEC "%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); + asm volatile(_ASM_DEC "%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -97,10 +91,9 @@ static inline int local_inc_and_test(local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_INC "%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); + asm volatile(_ASM_INC "%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -117,10 +110,9 @@ static inline int local_add_negative(long i, local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_ADD "%2,%0; sets %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(_ASM_ADD "%2,%0; sets %1" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -141,10 +133,9 @@ static inline long local_add_return(long i, local_t *l) #endif /* Modern 486+ processor */ __i = i; - __asm__ __volatile__( - _ASM_XADD "%0, %1;" - :"+r" (i), "+m" (l->a.counter) - : : "memory"); + asm volatile(_ASM_XADD "%0, %1;" + : "+r" (i), "+m" (l->a.counter) + : : "memory"); return i + __i; #ifdef CONFIG_M386 @@ -182,11 +173,11 @@ static inline long local_sub_return(long i, local_t *l) #define local_add_unless(l, a, u) \ ({ \ long c, old; \ - c = local_read(l); \ + c = local_read((l)); \ for (;;) { \ if (unlikely(c == (u))) \ break; \ - old = local_cmpxchg((l), c, c + (a)); \ + old = local_cmpxchg((l), c, c + (a)); \ if (likely(old == c)) \ break; \ c = old; \ @@ -214,26 +205,30 @@ static inline long local_sub_return(long i, local_t *l) /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ -#define cpu_local_wrap_v(l) \ - ({ local_t res__; \ - preempt_disable(); \ - res__ = (l); \ - preempt_enable(); \ - res__; }) +#define cpu_local_wrap_v(l) \ +({ \ + local_t res__; \ + preempt_disable(); \ + res__ = (l); \ + preempt_enable(); \ + res__; \ +}) #define cpu_local_wrap(l) \ - ({ preempt_disable(); \ - l; \ - preempt_enable(); }) \ - -#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) -#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) -#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) -#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) -#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) -#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) - -#define __cpu_local_inc(l) cpu_local_inc(l) -#define __cpu_local_dec(l) cpu_local_dec(l) +({ \ + preempt_disable(); \ + (l); \ + preempt_enable(); \ +}) \ + +#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) +#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) +#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) +#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) +#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) +#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) + +#define __cpu_local_inc(l) cpu_local_inc((l)) +#define __cpu_local_dec(l) cpu_local_dec((l)) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) -- cgit v1.2.3 From 933a44155caeb4ff5b58fcf755e3381ae37e72d4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:40 -0700 Subject: include/asm-x86/mc146818rtc.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mc146818rtc.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h index cdd9f965835..daf1ccde77a 100644 --- a/include/asm-x86/mc146818rtc.h +++ b/include/asm-x86/mc146818rtc.h @@ -42,7 +42,7 @@ extern volatile unsigned long cmos_lock; static inline void lock_cmos(unsigned char reg) { unsigned long new; - new = ((smp_processor_id()+1) << 8) | reg; + new = ((smp_processor_id() + 1) << 8) | reg; for (;;) { if (cmos_lock) { cpu_relax(); @@ -57,22 +57,26 @@ static inline void unlock_cmos(void) { cmos_lock = 0; } + static inline int do_i_have_lock_cmos(void) { - return (cmos_lock >> 8) == (smp_processor_id()+1); + return (cmos_lock >> 8) == (smp_processor_id() + 1); } + static inline unsigned char current_lock_cmos_reg(void) { return cmos_lock & 0xff; } -#define lock_cmos_prefix(reg) \ + +#define lock_cmos_prefix(reg) \ do { \ unsigned long cmos_flags; \ local_irq_save(cmos_flags); \ lock_cmos(reg) -#define lock_cmos_suffix(reg) \ - unlock_cmos(); \ - local_irq_restore(cmos_flags); \ + +#define lock_cmos_suffix(reg) \ + unlock_cmos(); \ + local_irq_restore(cmos_flags); \ } while (0) #else #define lock_cmos_prefix(reg) do {} while (0) -- cgit v1.2.3 From 77d511ed744f7febcd2a3444f63e6c54ac32d0c3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:41 -0700 Subject: include/asm-x86/mca_dma.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mca_dma.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h index fbb1f3b7127..c3dca6edc6b 100644 --- a/include/asm-x86/mca_dma.h +++ b/include/asm-x86/mca_dma.h @@ -12,18 +12,18 @@ * count by 2 when using 16-bit dma; that is not handled by these functions. * * Ramen Noodles are yummy. - * - * 1998 Tymm Twillman + * + * 1998 Tymm Twillman */ /* - * Registers that are used by the DMA controller; FN is the function register + * Registers that are used by the DMA controller; FN is the function register * (tell the controller what to do) and EXE is the execution register (how * to do it) */ #define MCA_DMA_REG_FN 0x18 -#define MCA_DMA_REG_EXE 0x1A +#define MCA_DMA_REG_EXE 0x1A /* * Functions that the DMA controller can do @@ -43,9 +43,9 @@ /* * Modes (used by setting MCA_DMA_FN_MODE in the function register) - * + * * Note that the MODE_READ is read from memory (write to device), and - * MODE_WRITE is vice-versa. + * MODE_WRITE is vice-versa. */ #define MCA_DMA_MODE_XFER 0x04 /* read by default */ @@ -63,7 +63,7 @@ * IRQ context. */ -static __inline__ void mca_enable_dma(unsigned int dmanr) +static inline void mca_enable_dma(unsigned int dmanr) { outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); } @@ -76,7 +76,7 @@ static __inline__ void mca_enable_dma(unsigned int dmanr) * IRQ context. */ -static __inline__ void mca_disable_dma(unsigned int dmanr) +static inline void mca_disable_dma(unsigned int dmanr) { outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); } @@ -87,10 +87,10 @@ static __inline__ void mca_disable_dma(unsigned int dmanr) * @a: 24bit bus address * * Load the address register in the DMA controller. This has a 24bit - * limitation (16Mb). + * limitation (16Mb). */ -static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) +static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a) { outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); outb(a & 0xff, MCA_DMA_REG_EXE); @@ -106,14 +106,14 @@ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) * limitation (16Mb). The return is a bus address. */ -static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) +static inline unsigned int mca_get_dma_addr(unsigned int dmanr) { unsigned int addr; outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); addr = inb(MCA_DMA_REG_EXE); addr |= inb(MCA_DMA_REG_EXE) << 8; - addr |= inb(MCA_DMA_REG_EXE) << 16; + addr |= inb(MCA_DMA_REG_EXE) << 16; return addr; } @@ -127,7 +127,7 @@ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) * Setting a count of zero will not do what you expect. */ -static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) +static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count) { count--; /* transfers one more than count -- correct for this */ @@ -144,7 +144,7 @@ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) * on this DMA channel. */ -static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) +static inline unsigned int mca_get_dma_residue(unsigned int dmanr) { unsigned short count; @@ -164,12 +164,12 @@ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) * with an I/O port target. */ -static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) +static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) { /* * DMA from a port address -- set the io address */ - + outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); outb(io_addr & 0xff, MCA_DMA_REG_EXE); outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); @@ -192,7 +192,7 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) * %MCA_DMA_MODE_16 to do 16bit transfers. */ -static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) +static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) { outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); outb(mode, MCA_DMA_REG_EXE); -- cgit v1.2.3 From 55464da94a845e057ffb94a9fc7be1aa86ffcd89 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:42 -0700 Subject: include/asm-x86/mmu_context_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmu_context_32.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h index 8198d1cca1f..9756ae0f1dd 100644 --- a/include/asm-x86/mmu_context_32.h +++ b/include/asm-x86/mmu_context_32.h @@ -62,7 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { - /* We were in lazy tlb mode and leave_mm disabled + /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); @@ -75,10 +75,10 @@ static inline void switch_mm(struct mm_struct *prev, #define deactivate_mm(tsk, mm) \ asm("movl %0,%%gs": :"r" (0)); -#define activate_mm(prev, next) \ - do { \ - paravirt_activate_mm(prev, next); \ - switch_mm((prev),(next),NULL); \ - } while(0); +#define activate_mm(prev, next) \ +do { \ + paravirt_activate_mm((prev), (next)); \ + switch_mm((prev), (next), NULL); \ +} while (0); #endif -- cgit v1.2.3 From c4fe760efde84e52168a81bf125f25ba2f118b51 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:43 -0700 Subject: include/asm-x86/mmu_context_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmu_context_64.h | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h index ad6dc821ef9..ca44c71e7fb 100644 --- a/include/asm-x86/mmu_context_64.h +++ b/include/asm-x86/mmu_context_64.h @@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP - if (read_pda(mmu_state) == TLBSTATE_OK) + if (read_pda(mmu_state) == TLBSTATE_OK) write_pda(mmu_state, TLBSTATE_LAZY); #endif } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); @@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, cpu_set(cpu, next->cpu_vm_mask); load_cr3(next->pgd); - if (unlikely(next->context.ldt != prev->context.ldt)) + if (unlikely(next->context.ldt != prev->context.ldt)) load_LDT_nolock(&next->context); } #ifdef CONFIG_SMP @@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, if (read_pda(active_mm) != next) BUG(); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { - /* We were in lazy tlb mode and leave_mm disabled + /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. */ @@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, #endif } -#define deactivate_mm(tsk,mm) do { \ - load_gs_index(0); \ - asm volatile("movl %0,%%fs"::"r"(0)); \ -} while(0) +#define deactivate_mm(tsk, mm) \ +do { \ + load_gs_index(0); \ + asm volatile("movl %0,%%fs"::"r"(0)); \ +} while (0) -#define activate_mm(prev, next) \ - switch_mm((prev),(next),NULL) +#define activate_mm(prev, next) \ + switch_mm((prev), (next), NULL) #endif -- cgit v1.2.3 From 710d0e9cce8bfcfa821c5929c1d4dbf30a660ce6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:44 -0700 Subject: include/asm-x86/mmu.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index efa962c3889..00e88679e11 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h @@ -10,10 +10,10 @@ * * cpu_vm_mask is used to optimize ldt flushing. */ -typedef struct { +typedef struct { void *ldt; #ifdef CONFIG_X86_64 - rwlock_t ldtlock; + rwlock_t ldtlock; #endif int size; struct mutex lock; -- cgit v1.2.3 From f2334076890bbe3cddca2c053684653c614e9b48 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:45 -0700 Subject: include/asm-x86/mmx.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h index 46b71da9986..940881218ff 100644 --- a/include/asm-x86/mmx.h +++ b/include/asm-x86/mmx.h @@ -6,7 +6,7 @@ */ #include - + extern void *_mmx_memcpy(void *to, const void *from, size_t size); extern void mmx_clear_page(void *page); extern void mmx_copy_page(void *to, void *from); -- cgit v1.2.3 From 7491d33d9ab042f3fdb9ec00054f69737dcd180f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:46 -0700 Subject: include/asm-x86/mmzone_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmzone_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index b9f5be2f603..cb2cad0b65a 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h @@ -18,7 +18,7 @@ extern struct pglist_data *node_data[]; #include #endif -extern int get_memcfg_numa_flat(void ); +extern int get_memcfg_numa_flat(void); /* * This allows any one NUMA architecture to be compiled * for, and still fall back to the flat function if it -- cgit v1.2.3 From 60e9bfd1bd5be433e56b050e025d3d5c91c967a8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:47 -0700 Subject: include/asm-x86/mmzone_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmzone_64.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h index ebaf9663aa8..594bd0dc1d0 100644 --- a/include/asm-x86/mmzone_64.h +++ b/include/asm-x86/mmzone_64.h @@ -7,7 +7,7 @@ #ifdef CONFIG_NUMA -#define VIRTUAL_BUG_ON(x) +#define VIRTUAL_BUG_ON(x) #include @@ -16,7 +16,7 @@ struct memnode { int shift; unsigned int mapsize; s16 *map; - s16 embedded_map[64-8]; + s16 embedded_map[64 - 8]; } ____cacheline_aligned; /* total size = 128 bytes */ extern struct memnode memnode; #define memnode_shift memnode.shift @@ -25,27 +25,27 @@ extern struct memnode memnode; extern struct pglist_data *node_data[]; -static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) -{ - unsigned nid; +static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) +{ + unsigned nid; VIRTUAL_BUG_ON(!memnodemap); VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize); - nid = memnodemap[addr >> memnode_shift]; - VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); - return nid; -} + nid = memnodemap[addr >> memnode_shift]; + VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); + return nid; +} #define NODE_DATA(nid) (node_data[nid]) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ +#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ NODE_DATA(nid)->node_spanned_pages) extern int early_pfn_to_nid(unsigned long pfn); #ifdef CONFIG_NUMA_EMU -#define FAKE_NODE_MIN_SIZE (64*1024*1024) -#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL)) +#define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) +#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) #endif #endif -- cgit v1.2.3 From 8f08403e61a86c3179642239184aff3a5f636be1 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:48 -0700 Subject: include/asm-x86/mpspec_def.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec_def.h | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 1f35691b4f7..dc6ef85e362 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h @@ -11,7 +11,7 @@ * information is. */ -#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') +#define SMP_MAGIC_IDENT (('_'<<24) | ('P'<<16) | ('M'<<8) | '_') #ifdef CONFIG_X86_32 # define MAX_MPC_ENTRY 1024 @@ -23,8 +23,7 @@ # define MAX_APICS 255 #endif -struct intel_mp_floating -{ +struct intel_mp_floating { char mpf_signature[4]; /* "_MP_" */ unsigned int mpf_physptr; /* Configuration table address */ unsigned char mpf_length; /* Our length (paragraphs) */ @@ -39,14 +38,13 @@ struct intel_mp_floating #define MPC_SIGNATURE "PCMP" -struct mp_config_table -{ +struct mp_config_table { char mpc_signature[4]; unsigned short mpc_length; /* Size of table */ - char mpc_spec; /* 0x01 */ - char mpc_checksum; - char mpc_oem[8]; - char mpc_productid[12]; + char mpc_spec; /* 0x01 */ + char mpc_checksum; + char mpc_oem[8]; + char mpc_productid[12]; unsigned int mpc_oemptr; /* 0 if not present */ unsigned short mpc_oemsize; /* 0 if not present */ unsigned short mpc_oemcount; @@ -71,8 +69,7 @@ struct mp_config_table #define CPU_MODEL_MASK 0x00F0 #define CPU_FAMILY_MASK 0x0F00 -struct mpc_config_processor -{ +struct mpc_config_processor { unsigned char mpc_type; unsigned char mpc_apicid; /* Local APIC number */ unsigned char mpc_apicver; /* Its versions */ @@ -82,8 +79,7 @@ struct mpc_config_processor unsigned int mpc_reserved[2]; }; -struct mpc_config_bus -{ +struct mpc_config_bus { unsigned char mpc_type; unsigned char mpc_busid; unsigned char mpc_bustype[6]; @@ -111,8 +107,7 @@ struct mpc_config_bus #define MPC_APIC_USABLE 0x01 -struct mpc_config_ioapic -{ +struct mpc_config_ioapic { unsigned char mpc_type; unsigned char mpc_apicid; unsigned char mpc_apicver; @@ -120,8 +115,7 @@ struct mpc_config_ioapic unsigned int mpc_apicaddr; }; -struct mpc_config_intsrc -{ +struct mpc_config_intsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; @@ -144,8 +138,7 @@ enum mp_irq_source_types { #define MP_APIC_ALL 0xFF -struct mpc_config_lintsrc -{ +struct mpc_config_lintsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; @@ -157,8 +150,7 @@ struct mpc_config_lintsrc #define MPC_OEM_SIGNATURE "_OEM" -struct mp_config_oemtable -{ +struct mp_config_oemtable { char oem_signature[4]; unsigned short oem_length; /* Size of table */ char oem_rev; /* 0x01 */ @@ -185,4 +177,3 @@ enum mp_bustype { MP_BUS_MCA, }; #endif - -- cgit v1.2.3 From 30971e17ee484f72e081826a0bf3e489ef3b4c30 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:49 -0700 Subject: include/asm-x86/mpspec.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 99da0a59b43..eccbc581ec8 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -55,8 +55,7 @@ extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) -struct physid_mask -{ +struct physid_mask { unsigned long mask[PHYSID_ARRAY_SIZE]; }; @@ -65,34 +64,34 @@ typedef struct physid_mask physid_mask_t; #define physid_set(physid, map) set_bit(physid, (map).mask) #define physid_clear(physid, map) clear_bit(physid, (map).mask) #define physid_isset(physid, map) test_bit(physid, (map).mask) -#define physid_test_and_set(physid, map) \ +#define physid_test_and_set(physid, map) \ test_and_set_bit(physid, (map).mask) -#define physids_and(dst, src1, src2) \ +#define physids_and(dst, src1, src2) \ bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_or(dst, src1, src2) \ +#define physids_or(dst, src1, src2) \ bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_clear(map) \ +#define physids_clear(map) \ bitmap_zero((map).mask, MAX_APICS) -#define physids_complement(dst, src) \ +#define physids_complement(dst, src) \ bitmap_complement((dst).mask, (src).mask, MAX_APICS) -#define physids_empty(map) \ +#define physids_empty(map) \ bitmap_empty((map).mask, MAX_APICS) -#define physids_equal(map1, map2) \ +#define physids_equal(map1, map2) \ bitmap_equal((map1).mask, (map2).mask, MAX_APICS) -#define physids_weight(map) \ +#define physids_weight(map) \ bitmap_weight((map).mask, MAX_APICS) -#define physids_shift_right(d, s, n) \ +#define physids_shift_right(d, s, n) \ bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) -#define physids_shift_left(d, s, n) \ +#define physids_shift_left(d, s, n) \ bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) #define physids_coerce(map) ((map).mask[0]) -- cgit v1.2.3 From 934902b474bdb235a273985ad4c61eb136afe11d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:50 -0700 Subject: include/asm-x86/msidef.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/msidef.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h index 5b8acddb70f..296f29ce426 100644 --- a/include/asm-x86/msidef.h +++ b/include/asm-x86/msidef.h @@ -11,7 +11,8 @@ #define MSI_DATA_VECTOR_SHIFT 0 #define MSI_DATA_VECTOR_MASK 0x000000ff -#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) +#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \ + MSI_DATA_VECTOR_MASK) #define MSI_DATA_DELIVERY_MODE_SHIFT 8 #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) @@ -37,11 +38,14 @@ #define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) #define MSI_ADDR_REDIRECTION_SHIFT 3 -#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */ -#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */ +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) + /* dedicated cpu */ +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) + /* lowest priority */ #define MSI_ADDR_DEST_ID_SHIFT 12 #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 -#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) +#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ + MSI_ADDR_DEST_ID_MASK) #endif /* ASM_MSIDEF_H */ -- cgit v1.2.3 From abb0ade013507c93a9a0b263bbb7b0327d7c38db Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:51 -0700 Subject: include/asm-x86/msr.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/msr.h | 81 ++++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 3ca29ebebbb..2c698a2e81f 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -16,8 +16,8 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) { unsigned long low, high; - asm volatile (".byte 0x0f,0x01,0xf9" - : "=a" (low), "=d" (high), "=c" (*aux)); + asm volatile(".byte 0x0f,0x01,0xf9" + : "=a" (low), "=d" (high), "=c" (*aux)); return low | ((u64)high >> 32); } @@ -29,7 +29,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) */ #ifdef CONFIG_X86_64 #define DECLARE_ARGS(val, low, high) unsigned low, high -#define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32)) +#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) #define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) #else @@ -57,7 +57,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, ".section .fixup,\"ax\"\n\t" "3: mov %3,%0 ; jmp 1b\n\t" ".previous\n\t" - _ASM_EXTABLE(2b,3b) + _ASM_EXTABLE(2b, 3b) : "=r" (*err), EAX_EDX_RET(val, low, high) : "c" (msr), "i" (-EFAULT)); return EAX_EDX_VAL(val, low, high); @@ -78,10 +78,10 @@ static inline int native_write_msr_safe(unsigned int msr, ".section .fixup,\"ax\"\n\t" "3: mov %4,%0 ; jmp 1b\n\t" ".previous\n\t" - _ASM_EXTABLE(2b,3b) + _ASM_EXTABLE(2b, 3b) : "=a" (err) : "c" (msr), "0" (low), "d" (high), - "i" (-EFAULT)); + "i" (-EFAULT)); return err; } @@ -116,23 +116,23 @@ static inline unsigned long long native_read_pmc(int counter) * pointer indirection), this allows gcc to optimize better */ -#define rdmsr(msr,val1,val2) \ - do { \ - u64 __val = native_read_msr(msr); \ - (val1) = (u32)__val; \ - (val2) = (u32)(__val >> 32); \ - } while(0) +#define rdmsr(msr, val1, val2) \ +do { \ + u64 __val = native_read_msr((msr)); \ + (val1) = (u32)__val; \ + (val2) = (u32)(__val >> 32); \ +} while (0) static inline void wrmsr(unsigned msr, unsigned low, unsigned high) { native_write_msr(msr, low, high); } -#define rdmsrl(msr,val) \ - ((val) = native_read_msr(msr)) +#define rdmsrl(msr, val) \ + ((val) = native_read_msr((msr))) #define wrmsrl(msr, val) \ - native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32)) + native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) /* wrmsr with exception handling */ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) @@ -141,14 +141,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) } /* rdmsr with exception handling */ -#define rdmsr_safe(msr,p1,p2) \ - ({ \ - int __err; \ - u64 __val = native_read_msr_safe(msr, &__err); \ - (*p1) = (u32)__val; \ - (*p2) = (u32)(__val >> 32); \ - __err; \ - }) +#define rdmsr_safe(msr, p1, p2) \ +({ \ + int __err; \ + u64 __val = native_read_msr_safe((msr), &__err); \ + (*p1) = (u32)__val; \ + (*p2) = (u32)(__val >> 32); \ + __err; \ +}) #define rdtscl(low) \ ((low) = (u32)native_read_tsc()) @@ -156,35 +156,37 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) #define rdtscll(val) \ ((val) = native_read_tsc()) -#define rdpmc(counter,low,high) \ - do { \ - u64 _l = native_read_pmc(counter); \ - (low) = (u32)_l; \ - (high) = (u32)(_l >> 32); \ - } while(0) +#define rdpmc(counter, low, high) \ +do { \ + u64 _l = native_read_pmc((counter)); \ + (low) = (u32)_l; \ + (high) = (u32)(_l >> 32); \ +} while (0) -#define rdtscp(low, high, aux) \ - do { \ - unsigned long long _val = native_read_tscp(&(aux)); \ - (low) = (u32)_val; \ - (high) = (u32)(_val >> 32); \ - } while (0) +#define rdtscp(low, high, aux) \ +do { \ + unsigned long long _val = native_read_tscp(&(aux)); \ + (low) = (u32)_val; \ + (high) = (u32)(_val >> 32); \ +} while (0) #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) #endif /* !CONFIG_PARAVIRT */ -#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) +#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ + (u32)((val) >> 32)) -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) +#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2)) -#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) +#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) #ifdef CONFIG_SMP void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); + int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); #else /* CONFIG_SMP */ static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) @@ -195,7 +197,8 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { wrmsr(msr_no, l, h); } -static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) +static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, + u32 *l, u32 *h) { return rdmsr_safe(msr_no, l, h); } -- cgit v1.2.3 From 9969b4405469e12070c560ff27dbe587470fc945 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:52 -0700 Subject: include/asm-x86/mtrr.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mtrr.h | 64 +++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index d3d26625aea..a69a01a5172 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -28,8 +28,7 @@ #define MTRR_IOCTL_BASE 'M' -struct mtrr_sentry -{ +struct mtrr_sentry { unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int type; /* Type of region */ @@ -41,8 +40,7 @@ struct mtrr_sentry will break. */ #ifdef __i386__ -struct mtrr_gentry -{ +struct mtrr_gentry { unsigned int regnum; /* Register number */ unsigned long base; /* Base address */ unsigned int size; /* Size of region */ @@ -51,8 +49,7 @@ struct mtrr_gentry #else /* __i386__ */ -struct mtrr_gentry -{ +struct mtrr_gentry { unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int regnum; /* Register number */ @@ -89,12 +86,12 @@ struct mtrr_gentry extern u8 mtrr_type_lookup(u64 addr, u64 end); extern void mtrr_save_fixed_ranges(void *); extern void mtrr_save_state(void); -extern int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, bool increment); -extern int mtrr_add_page (unsigned long base, unsigned long size, - unsigned int type, bool increment); -extern int mtrr_del (int reg, unsigned long base, unsigned long size); -extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); +extern int mtrr_add(unsigned long base, unsigned long size, + unsigned int type, bool increment); +extern int mtrr_add_page(unsigned long base, unsigned long size, + unsigned int type, bool increment); +extern int mtrr_del(int reg, unsigned long base, unsigned long size); +extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); @@ -110,23 +107,21 @@ static inline u8 mtrr_type_lookup(u64 addr, u64 end) } #define mtrr_save_fixed_ranges(arg) do {} while (0) #define mtrr_save_state() do {} while (0) -static __inline__ int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, bool increment) +static inline int mtrr_add(unsigned long base, unsigned long size, + unsigned int type, bool increment) { return -ENODEV; } -static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, +static inline int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, bool increment) { return -ENODEV; } -static __inline__ int mtrr_del (int reg, unsigned long base, - unsigned long size) +static inline int mtrr_del(int reg, unsigned long base, unsigned long size) { return -ENODEV; } -static __inline__ int mtrr_del_page (int reg, unsigned long base, - unsigned long size) +static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size) { return -ENODEV; } @@ -134,7 +129,9 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) { return 0; } -static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} +static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) +{ +} #define mtrr_ap_init() do {} while (0) #define mtrr_bp_init() do {} while (0) @@ -143,15 +140,13 @@ static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} #ifdef CONFIG_COMPAT #include -struct mtrr_sentry32 -{ +struct mtrr_sentry32 { compat_ulong_t base; /* Base address */ compat_uint_t size; /* Size of region */ compat_uint_t type; /* Type of region */ }; -struct mtrr_gentry32 -{ +struct mtrr_gentry32 { compat_ulong_t regnum; /* Register number */ compat_uint_t base; /* Base address */ compat_uint_t size; /* Size of region */ @@ -160,16 +155,17 @@ struct mtrr_gentry32 #define MTRR_IOCTL_BASE 'M' -#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) -#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) -#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) -#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) -#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) -#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) -#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) -#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) -#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) -#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) +#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) +#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) +#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) +#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) +#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) +#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) +#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) +#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) +#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) +#define MTRRIOC32_KILL_PAGE_ENTRY \ + _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) #endif /* CONFIG_COMPAT */ #endif /* __KERNEL__ */ -- cgit v1.2.3 From b2347fad517f61553e03135db60def2392d9c2bc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:53 -0700 Subject: include/asm-x86/mutex_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mutex_32.h | 64 ++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index 9a6b3da2591..73e928ef5f0 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h @@ -21,22 +21,20 @@ * wasn't 1 originally. This function MUST leave the value lower than 1 * even when the "1" assertion wasn't true. */ -#define __mutex_fastpath_lock(count, fail_fn) \ -do { \ - unsigned int dummy; \ - \ - typecheck(atomic_t *, count); \ +#define __mutex_fastpath_lock(count, fail_fn) \ +do { \ + unsigned int dummy; \ + \ + typecheck(atomic_t *, count); \ typecheck_fn(void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " decl (%%eax) \n" \ - " jns 1f \n" \ - " call "#fail_fn" \n" \ - "1: \n" \ - \ - :"=a" (dummy) \ - : "a" (count) \ - : "memory", "ecx", "edx"); \ + \ + asm volatile(LOCK_PREFIX " decl (%%eax)\n" \ + " jns 1f \n" \ + " call " #fail_fn "\n" \ + "1:\n" \ + : "=a" (dummy) \ + : "a" (count) \ + : "memory", "ecx", "edx"); \ } while (0) @@ -50,8 +48,8 @@ do { \ * wasn't 1 originally. This function returns 0 if the fastpath succeeds, * or anything the slow path function returns */ -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +static inline int __mutex_fastpath_lock_retval(atomic_t *count, + int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); @@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs * to return 0 otherwise. */ -#define __mutex_fastpath_unlock(count, fail_fn) \ -do { \ - unsigned int dummy; \ - \ - typecheck(atomic_t *, count); \ +#define __mutex_fastpath_unlock(count, fail_fn) \ +do { \ + unsigned int dummy; \ + \ + typecheck(atomic_t *, count); \ typecheck_fn(void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " incl (%%eax) \n" \ - " jg 1f \n" \ - " call "#fail_fn" \n" \ - "1: \n" \ - \ - :"=a" (dummy) \ - : "a" (count) \ - : "memory", "ecx", "edx"); \ + \ + asm volatile(LOCK_PREFIX " incl (%%eax)\n" \ + " jg 1f\n" \ + " call " #fail_fn "\n" \ + "1:\n" \ + : "=a" (dummy) \ + : "a" (count) \ + : "memory", "ecx", "edx"); \ } while (0) #define __mutex_slowpath_needs_to_unlock() 1 @@ -104,8 +100,8 @@ do { \ * Additionally, if the value was < 0 originally, this function must not leave * it to 0 on failure. */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +static inline int __mutex_fastpath_trylock(atomic_t *count, + int (*fail_fn)(atomic_t *)) { /* * We have two variants here. The cmpxchg based one is the best one -- cgit v1.2.3 From 2c4e8830414de84cc969a1f9685f4c48b91ca6e7 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:54 -0700 Subject: include/asm-x86/mutex_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mutex_64.h | 73 +++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h index 6c2949a3c67..f3fae9becb3 100644 --- a/include/asm-x86/mutex_64.h +++ b/include/asm-x86/mutex_64.h @@ -16,23 +16,21 @@ * * Atomically decrements @v and calls if the result is negative. */ -#define __mutex_fastpath_lock(v, fail_fn) \ -do { \ - unsigned long dummy; \ - \ - typecheck(atomic_t *, v); \ - typecheck_fn(void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " decl (%%rdi) \n" \ - " jns 1f \n" \ - " call "#fail_fn" \n" \ - "1:" \ - \ - :"=D" (dummy) \ - : "D" (v) \ - : "rax", "rsi", "rdx", "rcx", \ - "r8", "r9", "r10", "r11", "memory"); \ +#define __mutex_fastpath_lock(v, fail_fn) \ +do { \ + unsigned long dummy; \ + \ + typecheck(atomic_t *, v); \ + typecheck_fn(void (*)(atomic_t *), fail_fn); \ + \ + asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \ + " jns 1f \n" \ + " call " #fail_fn "\n" \ + "1:" \ + : "=D" (dummy) \ + : "D" (v) \ + : "rax", "rsi", "rdx", "rcx", \ + "r8", "r9", "r10", "r11", "memory"); \ } while (0) /** @@ -45,9 +43,8 @@ do { \ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * or anything the slow path function returns */ -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, - int (*fail_fn)(atomic_t *)) +static inline int __mutex_fastpath_lock_retval(atomic_t *count, + int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); @@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count, * * Atomically increments @v and calls if the result is nonpositive. */ -#define __mutex_fastpath_unlock(v, fail_fn) \ -do { \ - unsigned long dummy; \ - \ - typecheck(atomic_t *, v); \ - typecheck_fn(void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " incl (%%rdi) \n" \ - " jg 1f \n" \ - " call "#fail_fn" \n" \ - "1: " \ - \ - :"=D" (dummy) \ - : "D" (v) \ - : "rax", "rsi", "rdx", "rcx", \ - "r8", "r9", "r10", "r11", "memory"); \ +#define __mutex_fastpath_unlock(v, fail_fn) \ +do { \ + unsigned long dummy; \ + \ + typecheck(atomic_t *, v); \ + typecheck_fn(void (*)(atomic_t *), fail_fn); \ + \ + asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \ + " jg 1f\n" \ + " call " #fail_fn "\n" \ + "1:" \ + : "=D" (dummy) \ + : "D" (v) \ + : "rax", "rsi", "rdx", "rcx", \ + "r8", "r9", "r10", "r11", "memory"); \ } while (0) #define __mutex_slowpath_needs_to_unlock() 1 @@ -93,8 +88,8 @@ do { \ * if it wasn't 1 originally. [the fallback function is never used on * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +static inline int __mutex_fastpath_trylock(atomic_t *count, + int (*fail_fn)(atomic_t *)) { if (likely(atomic_cmpxchg(count, 1, 0) == 1)) return 1; -- cgit v1.2.3 From cb046eed76b7f74e619479f1aba17e74ce6f5159 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:55 -0700 Subject: include/asm-x86/numa_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/numa_64.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h index 15fe07cde58..32c22ae0709 100644 --- a/include/asm-x86/numa_64.h +++ b/include/asm-x86/numa_64.h @@ -1,11 +1,12 @@ -#ifndef _ASM_X8664_NUMA_H +#ifndef _ASM_X8664_NUMA_H #define _ASM_X8664_NUMA_H 1 #include #include struct bootnode { - u64 start,end; + u64 start; + u64 end; }; extern int compute_hash_shift(struct bootnode *nodes, int numnodes); -- cgit v1.2.3 From 5f4e4b7209deb3cd7cf16ebb7bf84917e4b6682a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:56 -0700 Subject: include/asm-x86/numaq.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/numaq.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h index 38f710dc37f..94b86c31239 100644 --- a/include/asm-x86/numaq.h +++ b/include/asm-x86/numaq.h @@ -3,7 +3,7 @@ * * Copyright (C) 2002, IBM Corp. * - * All rights reserved. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,7 +33,8 @@ extern int get_memcfg_numaq(void); /* * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the */ -#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */ +#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private + quad space */ /* * Communication area for each processor on lynxer-processor tests. @@ -139,7 +140,7 @@ struct sys_cfg_data { unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ /* may not be totally populated */ - unsigned int split_mem_enbl; /* 0 for no low shared memory */ + unsigned int split_mem_enbl; /* 0 for no low shared memory */ unsigned int mmio_sz; /* Size of total system memory mapped I/O */ /* (in MB). */ unsigned int quad_spin_lock; /* Spare location used for quad */ @@ -152,7 +153,7 @@ struct sys_cfg_data { /* * memory configuration area for each quad */ - struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ + struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ }; static inline unsigned long *get_zholes_size(int nid) -- cgit v1.2.3 From 095d1c4e61d77de4b33ea3b202aa6342f69d9891 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:57 -0700 Subject: include/asm-x86/page_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/page_32.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index 5f7257fd589..424e82f8ae2 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h @@ -47,7 +47,10 @@ typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; typedef unsigned long phys_addr_t; -typedef union { pteval_t pte, pte_low; } pte_t; +typedef union { + pteval_t pte; + pteval_t pte_low; +} pte_t; #endif /* __ASSEMBLY__ */ #endif /* CONFIG_X86_PAE */ @@ -61,7 +64,7 @@ typedef struct page *pgtable_t; #endif #ifndef __ASSEMBLY__ -#define __phys_addr(x) ((x)-PAGE_OFFSET) +#define __phys_addr(x) ((x) - PAGE_OFFSET) #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) #ifdef CONFIG_FLATMEM @@ -78,7 +81,7 @@ extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) -#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) +#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) #ifdef CONFIG_X86_USE_3DNOW #include -- cgit v1.2.3 From b20a4615944a0c106fce2aecb7ea1dbc8eefc71b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:58 -0700 Subject: include/asm-x86/page_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/page_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index aee05c616e0..f156778f707 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -5,7 +5,7 @@ #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) -#define CURRENT_MASK (~(THREAD_SIZE-1)) +#define CURRENT_MASK (~(THREAD_SIZE - 1)) #define EXCEPTION_STACK_ORDER 0 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) @@ -51,7 +51,7 @@ * Kernel image size is limited to 512 MB (see level2_kernel_pgt in * arch/x86/kernel/head_64.S), and it is mapped here: */ -#define KERNEL_IMAGE_SIZE (512*1024*1024) +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) #ifndef __ASSEMBLY__ -- cgit v1.2.3 From fad599854e3997a3e93559e19759a26b18c906c6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:59 -0700 Subject: include/asm-x86/param.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/param.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h index c996ec4da0c..6f0d0422f4c 100644 --- a/include/asm-x86/param.h +++ b/include/asm-x86/param.h @@ -3,8 +3,8 @@ #ifdef __KERNEL__ # define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ +# define USER_HZ 100 /* some user interfaces are */ +# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ #endif #ifndef HZ -- cgit v1.2.3 From 49cd740bb0b5796f34699a0f945b977f6ff34c64 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:00 -0700 Subject: include/asm-x86/paravirt.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/paravirt.h | 47 +++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index d6236eb4646..0c23f7940bc 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -231,7 +231,8 @@ struct pv_mmu_ops { void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (*pte_update)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); void (*pte_update_defer)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); @@ -246,7 +247,8 @@ struct pv_mmu_ops { void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); - void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (*pte_clear)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); void (*pmd_clear)(pmd_t *pmdp); #endif /* CONFIG_X86_PAE */ @@ -274,8 +276,7 @@ struct pv_mmu_ops { /* This contains all the paravirt structures: we get a convenient * number for each function using the offset which we use to indicate * what to patch. */ -struct paravirt_patch_template -{ +struct paravirt_patch_template { struct pv_init_ops pv_init_ops; struct pv_time_ops pv_time_ops; struct pv_cpu_ops pv_cpu_ops; @@ -660,32 +661,37 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) } /* These should all do BUG_ON(_err), but our headers are too tangled. */ -#define rdmsr(msr,val1,val2) do { \ +#define rdmsr(msr, val1, val2) \ +do { \ int _err; \ u64 _l = paravirt_read_msr(msr, &_err); \ val1 = (u32)_l; \ val2 = _l >> 32; \ -} while(0) +} while (0) -#define wrmsr(msr,val1,val2) do { \ +#define wrmsr(msr, val1, val2) \ +do { \ paravirt_write_msr(msr, val1, val2); \ -} while(0) +} while (0) -#define rdmsrl(msr,val) do { \ +#define rdmsrl(msr, val) \ +do { \ int _err; \ val = paravirt_read_msr(msr, &_err); \ -} while(0) +} while (0) -#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) -#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b) +#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) +#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) /* rdmsr with exception handling */ -#define rdmsr_safe(msr,a,b) ({ \ +#define rdmsr_safe(msr, a, b) \ +({ \ int _err; \ u64 _l = paravirt_read_msr(msr, &_err); \ (*a) = (u32)_l; \ (*b) = _l >> 32; \ - _err; }) + _err; \ +}) static inline u64 paravirt_read_tsc(void) @@ -693,10 +699,11 @@ static inline u64 paravirt_read_tsc(void) return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); } -#define rdtscl(low) do { \ +#define rdtscl(low) \ +do { \ u64 _l = paravirt_read_tsc(); \ low = (int)_l; \ -} while(0) +} while (0) #define rdtscll(val) (val = paravirt_read_tsc()) @@ -711,11 +718,12 @@ static inline unsigned long long paravirt_read_pmc(int counter) return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); } -#define rdpmc(counter,low,high) do { \ +#define rdpmc(counter, low, high) \ +do { \ u64 _l = paravirt_read_pmc(counter); \ low = (u32)_l; \ high = _l >> 32; \ -} while(0) +} while (0) static inline unsigned long long paravirt_rdtscp(unsigned int *aux) { @@ -794,7 +802,8 @@ static inline void set_iopl_mask(unsigned mask) } /* The paravirtualized I/O functions */ -static inline void slow_down_io(void) { +static inline void slow_down_io(void) +{ pv_cpu_ops.io_delay(); #ifdef REALLY_SLOW_IO pv_cpu_ops.io_delay(); -- cgit v1.2.3 From 5269354231c6e960941f6e1fd0737acc003d2353 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:01 -0700 Subject: include/asm-x86/parport.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/parport.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h index 019cbca24a3..3c4ffeb467e 100644 --- a/include/asm-x86/parport.h +++ b/include/asm-x86/parport.h @@ -1,10 +1,10 @@ #ifndef _ASM_X86_PARPORT_H #define _ASM_X86_PARPORT_H -static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) +static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); +static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) { - return parport_pc_find_isa_ports (autoirq, autodma); + return parport_pc_find_isa_ports(autoirq, autodma); } #endif /* _ASM_X86_PARPORT_H */ -- cgit v1.2.3 From 3cb47d79e99ad7b3a02badd7f64fbb57a1b125b3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:02 -0700 Subject: include/asm-x86/pci_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pci_64.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h index 37469031453..df867e5d80b 100644 --- a/include/asm-x86/pci_64.h +++ b/include/asm-x86/pci_64.h @@ -1,12 +1,10 @@ #ifndef __x8664_PCI_H #define __x8664_PCI_H - #ifdef __KERNEL__ - #ifdef CONFIG_CALGARY_IOMMU -static inline void* pci_iommu(struct pci_bus *bus) +static inline void *pci_iommu(struct pci_bus *bus) { struct pci_sysdata *sd = bus->sysdata; return sd->iommu; @@ -19,11 +17,10 @@ static inline void set_pci_iommu(struct pci_bus *bus, void *val) } #endif /* CONFIG_CALGARY_IOMMU */ - -extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); -extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value); - - +extern int (*pci_config_read)(int seg, int bus, int dev, int fn, + int reg, int len, u32 *value); +extern int (*pci_config_write)(int seg, int bus, int dev, int fn, + int reg, int len, u32 value); extern void pci_iommu_alloc(void); @@ -65,5 +62,4 @@ extern void pci_iommu_alloc(void); #endif /* __KERNEL__ */ - #endif /* __x8664_PCI_H */ -- cgit v1.2.3 From 565c6402db09b1d1b7623ebd629f3adf6b86feaa Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:03 -0700 Subject: include/asm-x86/pci-direct.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pci-direct.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h index 6823fa4f1af..5b21485be57 100644 --- a/include/asm-x86/pci-direct.h +++ b/include/asm-x86/pci-direct.h @@ -4,7 +4,7 @@ #include /* Direct PCI access. This is used for PCI accesses in early boot before - the PCI subsystem works. */ + the PCI subsystem works. */ extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); -- cgit v1.2.3 From 69bdb7bcc6b9494b1738c6f6dd3fe553c9c6978e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:04 -0700 Subject: include/asm-x86/pci.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pci.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h index c61190cb9e1..ddd8e248fc0 100644 --- a/include/asm-x86/pci.h +++ b/include/asm-x86/pci.h @@ -8,14 +8,13 @@ #include #include - #ifdef __KERNEL__ struct pci_sysdata { int domain; /* PCI domain */ int node; /* NUMA node */ #ifdef CONFIG_X86_64 - void* iommu; /* IOMMU private data */ + void *iommu; /* IOMMU private data */ #endif }; @@ -52,7 +51,7 @@ extern unsigned long pci_mem_start; #define PCIBIOS_MIN_CARDBUS_IO 0x4000 void pcibios_config_init(void); -struct pci_bus * pcibios_scan_root(int bus); +struct pci_bus *pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); @@ -62,7 +61,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); + enum pci_mmap_state mmap_state, + int write_combine); #ifdef CONFIG_PCI -- cgit v1.2.3 From 46e1abc63e736644265e7ec2897f963a4ace5993 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:05 -0700 Subject: include/asm-x86/pda.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pda.h | 80 +++++++++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index d9dc209c24a..101fb9e1195 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h @@ -57,34 +57,36 @@ extern struct x8664_pda _proxy_pda; #define pda_offset(field) offsetof(struct x8664_pda, field) -#define pda_to_op(op, field, val) do { \ - typedef typeof(_proxy_pda.field) T__; \ - if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ - switch (sizeof(_proxy_pda.field)) { \ - case 2: \ - asm(op "w %1,%%gs:%c2" : \ - "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ - "i"(pda_offset(field))); \ - break; \ - case 4: \ - asm(op "l %1,%%gs:%c2" : \ - "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ - "i" (pda_offset(field))); \ - break; \ - case 8: \ - asm(op "q %1,%%gs:%c2": \ - "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ - "i"(pda_offset(field))); \ - break; \ - default: \ - __bad_pda_field(); \ - } \ - } while (0) +#define pda_to_op(op, field, val) \ +do { \ + typedef typeof(_proxy_pda.field) T__; \ + if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ + switch (sizeof(_proxy_pda.field)) { \ + case 2: \ + asm(op "w %1,%%gs:%c2" : \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i"(pda_offset(field))); \ + break; \ + case 4: \ + asm(op "l %1,%%gs:%c2" : \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i" (pda_offset(field))); \ + break; \ + case 8: \ + asm(op "q %1,%%gs:%c2": \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i"(pda_offset(field))); \ + break; \ + default: \ + __bad_pda_field(); \ + } \ +} while (0) -#define pda_from_op(op,field) ({ \ +#define pda_from_op(op, field) \ +({ \ typeof(_proxy_pda.field) ret__; \ switch (sizeof(_proxy_pda.field)) { \ case 2: \ @@ -92,23 +94,24 @@ extern struct x8664_pda _proxy_pda; "=r" (ret__) : \ "i" (pda_offset(field)), \ "m" (_proxy_pda.field)); \ - break; \ + break; \ case 4: \ asm(op "l %%gs:%c1,%0": \ "=r" (ret__): \ "i" (pda_offset(field)), \ "m" (_proxy_pda.field)); \ - break; \ + break; \ case 8: \ asm(op "q %%gs:%c1,%0": \ "=r" (ret__) : \ "i" (pda_offset(field)), \ "m" (_proxy_pda.field)); \ - break; \ + break; \ default: \ __bad_pda_field(); \ - } \ - ret__; }) + } \ + ret__; \ +}) #define read_pda(field) pda_from_op("mov", field) #define write_pda(field, val) pda_to_op("mov", field, val) @@ -117,12 +120,13 @@ extern struct x8664_pda _proxy_pda; #define or_pda(field, val) pda_to_op("or", field, val) /* This is not atomic against other CPUs -- CPU preemption needs to be off */ -#define test_and_clear_bit_pda(bit, field) ({ \ - int old__; \ - asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ - : "=r" (old__), "+m" (_proxy_pda.field) \ - : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \ - old__; \ +#define test_and_clear_bit_pda(bit, field) \ +({ \ + int old__; \ + asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ + : "=r" (old__), "+m" (_proxy_pda.field) \ + : "dIr" (bit), "i" (pda_offset(field)) : "memory");\ + old__; \ }) #endif -- cgit v1.2.3 From bc9e3be20bab447519e864c4f9e09aae82ed0376 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:06 -0700 Subject: include/asm-x86/percpu.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/percpu.h | 104 ++++++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index 0dec00f27eb..736fc3bb8e1 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h @@ -85,58 +85,62 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); * don't give an lvalue though). */ extern void __bad_percpu_size(void); -#define percpu_to_op(op,var,val) \ - do { \ - typedef typeof(var) T__; \ - if (0) { T__ tmp__; tmp__ = (val); } \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - case 2: \ - asm(op "w %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - case 4: \ - asm(op "l %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - } while (0) - -#define percpu_from_op(op,var) \ - ({ \ - typeof(var) ret__; \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - case 2: \ - asm(op "w "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - case 4: \ - asm(op "l "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - ret__; }) +#define percpu_to_op(op, var, val) \ +do { \ + typedef typeof(var) T__; \ + if (0) { \ + T__ tmp__; \ + tmp__ = (val); \ + } \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b %1,"__percpu_seg"%0" \ + : "+m" (var) \ + : "ri" ((T__)val)); \ + break; \ + case 2: \ + asm(op "w %1,"__percpu_seg"%0" \ + : "+m" (var) \ + : "ri" ((T__)val)); \ + break; \ + case 4: \ + asm(op "l %1,"__percpu_seg"%0" \ + : "+m" (var) \ + : "ri" ((T__)val)); \ + break; \ + default: __bad_percpu_size(); \ + } \ +} while (0) + +#define percpu_from_op(op, var) \ +({ \ + typeof(var) ret__; \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + case 2: \ + asm(op "w "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + case 4: \ + asm(op "l "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + default: __bad_percpu_size(); \ + } \ + ret__; \ +}) #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) -#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) -#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) -#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) -#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) +#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) +#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) +#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) +#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) #endif /* !__ASSEMBLY__ */ #endif /* !CONFIG_X86_64 */ #endif /* _ASM_X86_PERCPU_H_ */ -- cgit v1.2.3 From 65e05d15edfdd6ecb4426894cf6e6b5ae97602e4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:08 -0700 Subject: include/asm-x86/pgtable-2level.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable-2level.h | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 701404fab30..46bc52c0eae 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h @@ -26,7 +26,8 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) native_set_pte(ptep, pte); } -static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, +static inline void native_set_pte_present(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte) { native_set_pte(ptep, pte); @@ -37,7 +38,8 @@ static inline void native_pmd_clear(pmd_t *pmdp) native_set_pmd(pmdp, __pmd(0)); } -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) +static inline void native_pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *xp) { *xp = native_make_pte(0); } @@ -61,16 +63,18 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) */ #define PTE_FILE_MAX_BITS 29 -#define pte_to_pgoff(pte) \ - ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) +#define pte_to_pgoff(pte) \ + ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5)) -#define pgoff_to_pte(off) \ - ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) +#define pgoff_to_pte(off) \ + ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \ + (((off) >> 5) << 8) + _PAGE_FILE }) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x1f) #define __swp_offset(x) ((x).val >> 8) -#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) +#define __swp_entry(type, offset) \ + ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) -- cgit v1.2.3 From cf840147d48626d5d86d617cbc5b7cddc1bcae14 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:09 -0700 Subject: include/asm-x86/pgtable_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable_32.h | 102 ++++++++++++++++++++++--------------------- 1 file changed, 53 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 1e2c0d83952..c4a64367445 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -40,13 +40,13 @@ void paging_init(void); #ifdef CONFIG_X86_PAE # include # define PMD_SIZE (1UL << PMD_SHIFT) -# define PMD_MASK (~(PMD_SIZE-1)) +# define PMD_MASK (~(PMD_SIZE - 1)) #else # include #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) @@ -58,21 +58,22 @@ void paging_init(void); * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long) high_memory + \ - 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) +#define VMALLOC_OFFSET (8 * 1024 * 1024) +#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ + & ~(VMALLOC_OFFSET - 1)) #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else #define LAST_PKMAP 1024 #endif -#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) +#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ + & PMD_MASK) #ifdef CONFIG_HIGHMEM -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) +# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) #else -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) +# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) #endif /* @@ -88,16 +89,16 @@ extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ -#define pmd_none(x) (!(unsigned long)pmd_val(x)) -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_none(x) (!(unsigned long)pmd_val((x))) +#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) extern int pmd_bad(pmd_t pmd); -#define pmd_bad_v1(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) -#define pmd_bad_v2(x) ((pmd_val(x) \ - & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \ - != _KERNPG_TABLE) - +#define pmd_bad_v1(x) \ + (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER))) +#define pmd_bad_v2(x) \ + (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \ + _PAGE_PSE | _PAGE_NX))) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) @@ -123,17 +124,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) } /* - * Macro to mark a page protection value as "uncacheable". On processors which do not support - * it, this is a no-op. + * Macro to mark a page protection value as "uncacheable". + * On processors which do not support it, this is a no-op. */ -#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ - ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) +#define pgprot_noncached(prot) \ + ((boot_cpu_data.x86 > 3) \ + ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \ + : (prot)) /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ - #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) /* @@ -142,20 +144,20 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) * this macro returns the index of the entry in the pgd page which would * control the given virtual address */ -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define pgd_index_k(addr) pgd_index(addr) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) +#define pgd_index_k(addr) pgd_index((addr)) /* * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) static inline int pud_large(pud_t pud) { return 0; } @@ -165,8 +167,8 @@ static inline int pud_large(pud_t pud) { return 0; } * this macro returns the index of the entry in the pmd page which would * control the given virtual address */ -#define pmd_index(address) \ - (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) +#define pmd_index(address) \ + (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) /* * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] @@ -174,43 +176,45 @@ static inline int pud_large(pud_t pud) { return 0; } * this macro returns the index of the entry in the pte page which would * control the given virtual address */ -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) +#define pte_index(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) -#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) +#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) -#define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) +#define pmd_page_vaddr(pmd) \ + ((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK)) #if defined(CONFIG_HIGHPTE) -#define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) +#define pte_offset_map(dir, address) \ + ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ + pte_index((address))) +#define pte_offset_map_nested(dir, address) \ + ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ + pte_index((address))) +#define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0) +#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) #else -#define pte_offset_map(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) +#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #endif /* Clear a kernel PTE and flush it from the TLB */ -#define kpte_clear_flush(ptep, vaddr) \ -do { \ - pte_clear(&init_mm, vaddr, ptep); \ - __flush_tlb_one(vaddr); \ +#define kpte_clear_flush(ptep, vaddr) \ +do { \ + pte_clear(&init_mm, (vaddr), (ptep)); \ + __flush_tlb_one((vaddr)); \ } while (0) /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ -#define update_mmu_cache(vma,address,pte) do { } while (0) +#define update_mmu_cache(vma, address, pte) do { } while (0) void native_pagetable_setup_start(pgd_t *base); void native_pagetable_setup_done(pgd_t *base); @@ -239,7 +243,7 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) #define kern_addr_valid(kaddr) (0) #endif -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) #endif /* _I386_PGTABLE_H */ -- cgit v1.2.3 From 4b01fef89a10cedbae9857e76283616af3f177cd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:10 -0700 Subject: include/asm-x86/pgtable-3level.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable-3level.h | 48 +++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 1d763eec740..8b4a9d44b7f 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -8,22 +8,26 @@ * Copyright (C) 1999 Ingo Molnar */ -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) - +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%08lx%08lx).\n", \ + __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %p(%016Lx).\n", \ + __FILE__, __LINE__, &(e), pmd_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%016Lx).\n", \ + __FILE__, __LINE__, &(e), pgd_val(e)) static inline int pud_none(pud_t pud) { return pud_val(pud) == 0; } + static inline int pud_bad(pud_t pud) { return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; } + static inline int pud_present(pud_t pud) { return pud_val(pud) & _PAGE_PRESENT; @@ -48,7 +52,8 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) * we are justified in merely clearing the PTE present bit, followed * by a set. The ordering here is important. */ -static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, +static inline void native_set_pte_present(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte) { ptep->pte_low = 0; @@ -60,15 +65,17 @@ static inline void native_set_pte_present(struct mm_struct *mm, unsigned long ad static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) { - set_64bit((unsigned long long *)(ptep),native_pte_val(pte)); + set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); } + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { - set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); + set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); } + static inline void native_set_pud(pud_t *pudp, pud_t pud) { - set_64bit((unsigned long long *)(pudp),native_pud_val(pud)); + set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); } /* @@ -76,7 +83,8 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud) * entry, so clear the bottom half first and enforce ordering with a compiler * barrier. */ -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { ptep->pte_low = 0; smp_wmb(); @@ -107,20 +115,19 @@ static inline void pud_clear(pud_t *pudp) * current pgd to avoid unnecessary TLB flushes. */ pgd = read_cr3(); - if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) + if (__pa(pudp) >= pgd && __pa(pudp) < + (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) write_cr3(pgd); } -#define pud_page(pud) \ -((struct page *) __va(pud_val(pud) & PAGE_MASK)) +#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK)) -#define pud_page_vaddr(pud) \ -((unsigned long) __va(pud_val(pud) & PAGE_MASK)) +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) /* Find an entry in the second-level page table.. */ -#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ - pmd_index(address)) +#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ + pmd_index(address)) #ifdef CONFIG_SMP static inline pte_t native_ptep_get_and_clear(pte_t *ptep) @@ -161,7 +168,8 @@ static inline unsigned long pte_pfn(pte_t pte) * put the 32 bits of offset into the high part. */ #define pte_to_pgoff(pte) ((pte).pte_high) -#define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) +#define pgoff_to_pte(off) \ + ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ -- cgit v1.2.3 From 7f94401e439dc1137319c48dfec0285f681eb3ad Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:11 -0700 Subject: include/asm-x86/pgtable_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable_64.h | 141 +++++++++++++++++++++++-------------------- 1 file changed, 76 insertions(+), 65 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 0a5081c98ae..9fd87d0b647 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -52,14 +52,18 @@ extern void paging_init(void); #ifndef __ASSEMBLY__ -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) -#define pud_ERROR(e) \ - printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%016lx).\n", \ + __FILE__, __LINE__, &(e), pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %p(%016lx).\n", \ + __FILE__, __LINE__, &(e), pmd_val(e)) +#define pud_ERROR(e) \ + printk("%s:%d: bad pud %p(%016lx).\n", \ + __FILE__, __LINE__, &(e), pud_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%016lx).\n", \ + __FILE__, __LINE__, &(e), pgd_val(e)) #define pgd_none(x) (!pgd_val(x)) #define pud_none(x) (!pud_val(x)) @@ -87,7 +91,8 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #ifdef CONFIG_SMP return native_make_pte(xchg(&xp->pte, 0)); #else - /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */ + /* native_local_ptep_get_and_clear, + but duplicated because of cyclic dependency */ pte_t ret = *xp; native_pte_clear(NULL, 0, xp); return ret; @@ -119,7 +124,7 @@ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) *pgdp = pgd; } -static inline void native_pgd_clear(pgd_t * pgd) +static inline void native_pgd_clear(pgd_t *pgd) { native_set_pgd(pgd, native_make_pgd(0)); } @@ -128,15 +133,15 @@ static inline void native_pgd_clear(pgd_t * pgd) #endif /* !__ASSEMBLY__ */ -#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) -#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) -#define PUD_MASK (~(PUD_SIZE-1)) -#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) +#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE - 1)) +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) -#define MAXMEM _AC(0x3fffffffffff, UL) +#define MAXMEM _AC(0x00003fffffffffff, UL) #define VMALLOC_START _AC(0xffffc20000000000, UL) #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) #define VMEMMAP_START _AC(0xffffe20000000000, UL) @@ -163,18 +168,18 @@ static inline unsigned long pmd_bad(pmd_t pmd) ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); } -#define pte_none(x) (!pte_val(x)) -#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) +#define pte_none(x) (!pte_val((x))) +#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ -#define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) +#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ +#define pte_page(x) pfn_to_page(pte_pfn((x))) +#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) /* * Macro to mark a page protection value as "uncacheable". */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) - +#define pgprot_noncached(prot) \ + (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT)) /* * Conversion functions: convert a page and protection to a page entry, @@ -184,77 +189,81 @@ static inline unsigned long pmd_bad(pmd_t pmd) /* * Level 4 access. */ -#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK)) -#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)) -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) -#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) +#define pgd_page_vaddr(pgd) \ + ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK)) +#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset_k(address) (init_level4_pgt + pgd_index((address))) #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) static inline int pgd_large(pgd_t pgd) { return 0; } #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) /* PUD - Level3 access */ /* to find an entry in a page-table-directory. */ -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) -#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT)) -#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) -#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address)) -#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) +#define pud_page_vaddr(pud) \ + ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK)) +#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT)) +#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) +#define pud_offset(pgd, address) \ + ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) +#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT) static inline int pud_large(pud_t pte) { - return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == - (_PAGE_PSE|_PAGE_PRESENT); + return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == + (_PAGE_PSE | _PAGE_PRESENT); } /* PMD - Level 2 access */ -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) -#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) - -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) -#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \ - pmd_index(address)) -#define pmd_none(x) (!pmd_val(x)) -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) -#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) - -#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) -#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE }) +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK)) +#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) + +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) +#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ + pmd_index(address)) +#define pmd_none(x) (!pmd_val((x))) +#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) +#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) +#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) + +#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) +#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ + _PAGE_FILE }) #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT /* PTE - Level 1 access. */ /* page, protection -> pte */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot)) + +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ - pte_index(address)) + pte_index((address))) /* x86-64 always has all page tables mapped. */ -#define pte_offset_map(dir,address) pte_offset_kernel(dir,address) -#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address) +#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) +#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) #define pte_unmap(pte) /* NOP */ -#define pte_unmap_nested(pte) /* NOP */ +#define pte_unmap_nested(pte) /* NOP */ -#define update_mmu_cache(vma,address,pte) do { } while (0) +#define update_mmu_cache(vma, address, pte) do { } while (0) extern int direct_gbpages; /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) -#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \ + ((offset) << 8) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) -extern int kern_addr_valid(unsigned long addr); +extern int kern_addr_valid(unsigned long addr); extern void cleanup_highmap(void); -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN @@ -267,8 +276,10 @@ extern void cleanup_highmap(void); /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) -#define kc_offset_to_vaddr(o) \ - (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) +#define kc_offset_to_vaddr(o) \ + (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \ + ? ((o) | ~__VIRTUAL_MASK) \ + : (o)) #define __HAVE_ARCH_PTE_SAME #endif /* !__ASSEMBLY__ */ -- cgit v1.2.3 From 3cbaeafeb10e38bce6c8d4764a254260d5a564bd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:12 -0700 Subject: include/asm-x86/pgtable.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable.h | 156 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 120 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index e814cfe96af..2ce76507046 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -48,12 +48,15 @@ #endif /* If _PAGE_PRESENT is clear, we use these: */ -#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */ +#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, + * saved PTE; unset:swap */ #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; pte_present gives true */ -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_DIRTY) +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ + _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) @@ -64,14 +67,20 @@ #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) - -#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) + +#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ + _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) +#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) #define PAGE_COPY PAGE_COPY_NOEXEC -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) +#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) #ifdef CONFIG_X86_32 #define _PAGE_KERNEL_EXEC \ @@ -142,7 +151,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ -extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern spinlock_t pgd_lock; @@ -152,30 +161,101 @@ extern struct list_head pgd_list; * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } -static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } -static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; } -static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } - -static inline int pmd_large(pmd_t pte) { - return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == - (_PAGE_PSE|_PAGE_PRESENT); +static inline int pte_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_DIRTY; +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +static inline int pte_write(pte_t pte) +{ + return pte_val(pte) & _PAGE_RW; +} + +static inline int pte_file(pte_t pte) +{ + return pte_val(pte) & _PAGE_FILE; +} + +static inline int pte_huge(pte_t pte) +{ + return pte_val(pte) & _PAGE_PSE; } -static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); } -static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); } -static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); } -static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); } -static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); } -static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); } -static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } -static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } -static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); } -static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); } -static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); } +static inline int pte_global(pte_t pte) +{ + return pte_val(pte) & _PAGE_GLOBAL; +} + +static inline int pte_exec(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_NX); +} + +static inline int pmd_large(pmd_t pte) +{ + return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == + (_PAGE_PSE | _PAGE_PRESENT); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); +} + +static inline pte_t pte_mkexec(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_PSE); +} + +static inline pte_t pte_clrhuge(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); +} + +static inline pte_t pte_mkglobal(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_GLOBAL); +} + +static inline pte_t pte_clrglobal(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); +} extern pteval_t __supported_pte_mask; @@ -342,7 +422,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, }) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { pte_t pte = native_ptep_get_and_clear(ptep); pte_update(mm, addr, ptep); @@ -350,7 +431,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, } #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + int full) { pte_t pte; if (full) { @@ -366,7 +449,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long } #define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); pte_update(mm, addr, ptep); -- cgit v1.2.3 From 2c5d516ca70641e26463e8d24344b515a2973c11 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:13 -0700 Subject: include/asm-x86/posix_types_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/posix_types_32.h | 47 +++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h index 015e539cdef..b031efda37e 100644 --- a/include/asm-x86/posix_types_32.h +++ b/include/asm-x86/posix_types_32.h @@ -45,32 +45,39 @@ typedef struct { #if defined(__KERNEL__) #undef __FD_SET -#define __FD_SET(fd,fdsetp) \ - __asm__ __volatile__("btsl %1,%0": \ - "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) +#define __FD_SET(fd,fdsetp) \ + asm volatile("btsl %1,%0": \ + "+m" (*(__kernel_fd_set *)(fdsetp)) \ + : "r" ((int)(fd))) #undef __FD_CLR -#define __FD_CLR(fd,fdsetp) \ - __asm__ __volatile__("btrl %1,%0": \ - "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) +#define __FD_CLR(fd,fdsetp) \ + asm volatile("btrl %1,%0": \ + "+m" (*(__kernel_fd_set *)(fdsetp)) \ + : "r" ((int) (fd))) #undef __FD_ISSET -#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ - unsigned char __result; \ - __asm__ __volatile__("btl %1,%2 ; setb %0" \ - :"=q" (__result) :"r" ((int) (fd)), \ - "m" (*(__kernel_fd_set *) (fdsetp))); \ - __result; })) +#define __FD_ISSET(fd,fdsetp) \ + (__extension__ \ + ({ \ + unsigned char __result; \ + asm volatile("btl %1,%2 ; setb %0" \ + : "=q" (__result) \ + : "r" ((int)(fd)), \ + "m" (*(__kernel_fd_set *)(fdsetp))); \ + __result; \ +})) #undef __FD_ZERO -#define __FD_ZERO(fdsetp) \ -do { \ - int __d0, __d1; \ - __asm__ __volatile__("cld ; rep ; stosl" \ - :"=m" (*(__kernel_fd_set *) (fdsetp)), \ - "=&c" (__d0), "=&D" (__d1) \ - :"a" (0), "1" (__FDSET_LONGS), \ - "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \ +#define __FD_ZERO(fdsetp) \ +do { \ + int __d0, __d1; \ + asm volatile("cld ; rep ; stosl" \ + : "=m" (*(__kernel_fd_set *)(fdsetp)), \ + "=&c" (__d0), "=&D" (__d1) \ + : "a" (0), "1" (__FDSET_LONGS), \ + "2" ((__kernel_fd_set *)(fdsetp)) \ + : "memory"); \ } while (0) #endif /* defined(__KERNEL__) */ -- cgit v1.2.3 From 4943aa4ec25ccc7161f4f4fcdd0018a4c1f6d4e8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:14 -0700 Subject: include/asm-x86/posix_types_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/posix_types_64.h | 54 ++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h index 9926aa43775..d6624c95854 100644 --- a/include/asm-x86/posix_types_64.h +++ b/include/asm-x86/posix_types_64.h @@ -46,7 +46,7 @@ typedef unsigned long __kernel_old_dev_t; #ifdef __KERNEL__ #undef __FD_SET -static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) +static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; @@ -54,7 +54,7 @@ static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) } #undef __FD_CLR -static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) +static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; @@ -62,7 +62,7 @@ static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) } #undef __FD_ISSET -static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) +static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; @@ -74,36 +74,36 @@ static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) * for 256 and 1024-bit fd_sets respectively) */ #undef __FD_ZERO -static __inline__ void __FD_ZERO(__kernel_fd_set *p) +static inline void __FD_ZERO(__kernel_fd_set *p) { unsigned long *tmp = p->fds_bits; int i; if (__builtin_constant_p(__FDSET_LONGS)) { switch (__FDSET_LONGS) { - case 32: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; - tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; - tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; - tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; - tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; - tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; - tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; - return; - case 16: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; - tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; - tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; - return; - case 8: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; - return; - case 4: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - return; + case 32: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; + tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; + tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; + tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; + tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; + tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; + return; + case 16: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; + tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; + return; + case 8: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + return; + case 4: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + return; } } i = __FDSET_LONGS; -- cgit v1.2.3 From cca2e6f87e3856953503aae2c0b8a1d5628796ef Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:15 -0700 Subject: include/asm-x86/processor.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 73 +++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index b0dece41dbc..6e26c7c717a 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -175,12 +175,12 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { /* ecx is often an input as well as an output. */ - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (*eax), "2" (*ecx)); + asm("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (*eax), "2" (*ecx)); } static inline void load_cr3(pgd_t *pgdir) @@ -427,17 +427,23 @@ static inline unsigned long native_get_debugreg(int regno) switch (regno) { case 0: - asm("mov %%db0, %0" :"=r" (val)); break; + asm("mov %%db0, %0" :"=r" (val)); + break; case 1: - asm("mov %%db1, %0" :"=r" (val)); break; + asm("mov %%db1, %0" :"=r" (val)); + break; case 2: - asm("mov %%db2, %0" :"=r" (val)); break; + asm("mov %%db2, %0" :"=r" (val)); + break; case 3: - asm("mov %%db3, %0" :"=r" (val)); break; + asm("mov %%db3, %0" :"=r" (val)); + break; case 6: - asm("mov %%db6, %0" :"=r" (val)); break; + asm("mov %%db6, %0" :"=r" (val)); + break; case 7: - asm("mov %%db7, %0" :"=r" (val)); break; + asm("mov %%db7, %0" :"=r" (val)); + break; default: BUG(); } @@ -478,14 +484,14 @@ static inline void native_set_iopl_mask(unsigned mask) #ifdef CONFIG_X86_32 unsigned int reg; - __asm__ __volatile__ ("pushfl;" - "popl %0;" - "andl %1, %0;" - "orl %2, %0;" - "pushl %0;" - "popfl" - : "=&r" (reg) - : "i" (~X86_EFLAGS_IOPL), "r" (mask)); + asm volatile ("pushfl;" + "popl %0;" + "andl %1, %0;" + "orl %2, %0;" + "pushl %0;" + "popfl" + : "=&r" (reg) + : "i" (~X86_EFLAGS_IOPL), "r" (mask)); #endif } @@ -523,8 +529,8 @@ static inline void native_swapgs(void) #define set_debugreg(value, register) \ native_set_debugreg(register, value) -static inline void -load_sp0(struct tss_struct *tss, struct thread_struct *thread) +static inline void load_sp0(struct tss_struct *tss, + struct thread_struct *thread) { native_load_sp0(tss, thread); } @@ -680,7 +686,7 @@ static inline unsigned int cpuid_edx(unsigned int op) /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { - __asm__ __volatile__("rep; nop" ::: "memory"); + asm volatile("rep; nop" ::: "memory"); } static inline void cpu_relax(void) @@ -694,32 +700,29 @@ static inline void sync_core(void) int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) - : "ebx", "ecx", "edx", "memory"); + : "ebx", "ecx", "edx", "memory"); } -static inline void -__monitor(const void *eax, unsigned long ecx, unsigned long edx) +static inline void __monitor(const void *eax, unsigned long ecx, + unsigned long edx) { /* "monitor %eax, %ecx, %edx;" */ - asm volatile( - ".byte 0x0f, 0x01, 0xc8;" - :: "a" (eax), "c" (ecx), "d"(edx)); + asm volatile(".byte 0x0f, 0x01, 0xc8;" + :: "a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ - asm volatile( - ".byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); } static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ - asm volatile( - "sti; .byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); } extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); -- cgit v1.2.3 From 708c5662975518eeea04de10cb11075d48636180 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:16 -0700 Subject: include/asm-x86/proto.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/proto.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index 68563c0709a..9da46af3b0e 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h @@ -26,7 +26,7 @@ extern int reboot_force; long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); -#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) -#define round_down(x,y) ((x) & ~((y)-1)) +#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) +#define round_down(x, y) ((x) & ~((y) - 1)) #endif -- cgit v1.2.3 From 72f74fa25a468f40781e452f1d2528395090d55f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:17 -0700 Subject: include/asm-x86/ptrace.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ptrace.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index bc442461ac6..e779f2b26b3 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -140,7 +140,8 @@ extern unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); #ifdef CONFIG_X86_32 -extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); +extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, + int error_code); #else void signal_fault(struct pt_regs *regs, void __user *frame, char *where); #endif @@ -169,8 +170,8 @@ static inline int user_mode(struct pt_regs *regs) static inline int user_mode_vm(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return ((regs->cs & SEGMENT_RPL_MASK) | - (regs->flags & VM_MASK)) >= USER_RPL; + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & VM_MASK)) >= + USER_RPL; #else return user_mode(regs); #endif -- cgit v1.2.3 From 78db4c6be439cd446c94181c73f5e06a89a5aaf3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:18 -0700 Subject: include/asm-x86/reboot.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/reboot.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h index e9e3ffc22c0..6b5233b4f84 100644 --- a/include/asm-x86/reboot.h +++ b/include/asm-x86/reboot.h @@ -3,8 +3,7 @@ struct pt_regs; -struct machine_ops -{ +struct machine_ops { void (*restart)(char *cmd); void (*halt)(void); void (*power_off)(void); -- cgit v1.2.3 From c6fd5d49ec578e1078331b81ca09008fb361a8ba Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:19 -0700 Subject: include/asm-x86/resume-trace.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/resume-trace.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 46f725b0bc8..2557514d7ef 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h @@ -3,16 +3,17 @@ #include -#define TRACE_RESUME(user) do { \ +#define TRACE_RESUME(user) \ +do { \ if (pm_trace_enabled) { \ void *tracedata; \ asm volatile(_ASM_MOV_UL " $1f,%0\n" \ - ".section .tracedata,\"a\"\n" \ - "1:\t.word %c1\n\t" \ - _ASM_PTR " %c2\n" \ - ".previous" \ - :"=r" (tracedata) \ - : "i" (__LINE__), "i" (__FILE__)); \ + ".section .tracedata,\"a\"\n" \ + "1:\t.word %c1\n\t" \ + _ASM_PTR " %c2\n" \ + ".previous" \ + :"=r" (tracedata) \ + : "i" (__LINE__), "i" (__FILE__)); \ generate_resume_trace(tracedata, user); \ } \ } while (0) -- cgit v1.2.3 From 0f4fc8c1dca86b519fed50be0962c8def8d3d446 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:20 -0700 Subject: include/asm-x86/rio.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/rio.h | 76 +++++++++++++++++++++++++-------------------------- 1 file changed, 38 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h index 97cdcc9887b..3451c576e6a 100644 --- a/include/asm-x86/rio.h +++ b/include/asm-x86/rio.h @@ -11,53 +11,53 @@ #define RIO_TABLE_VERSION 3 struct rio_table_hdr { - u8 version; /* Version number of this data structure */ - u8 num_scal_dev; /* # of Scalability devices */ - u8 num_rio_dev; /* # of RIO I/O devices */ + u8 version; /* Version number of this data structure */ + u8 num_scal_dev; /* # of Scalability devices */ + u8 num_rio_dev; /* # of RIO I/O devices */ } __attribute__((packed)); struct scal_detail { - u8 node_id; /* Scalability Node ID */ - u32 CBAR; /* Address of 1MB register space */ - u8 port0node; /* Node ID port connected to: 0xFF=None */ - u8 port0port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 port1node; /* Node ID port connected to: 0xFF = None */ - u8 port1port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 port2node; /* Node ID port connected to: 0xFF = None */ - u8 port2port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ + u8 node_id; /* Scalability Node ID */ + u32 CBAR; /* Address of 1MB register space */ + u8 port0node; /* Node ID port connected to: 0xFF=None */ + u8 port0port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port1node; /* Node ID port connected to: 0xFF = None */ + u8 port1port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port2node; /* Node ID port connected to: 0xFF = None */ + u8 port2port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ } __attribute__((packed)); struct rio_detail { - u8 node_id; /* RIO Node ID */ - u32 BBAR; /* Address of 1MB register space */ - u8 type; /* Type of device */ - u8 owner_id; /* Node ID of Hurricane that owns this */ - /* node */ - u8 port0node; /* Node ID port connected to: 0xFF=None */ - u8 port0port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 port1node; /* Node ID port connected to: 0xFF=None */ - u8 port1port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 first_slot; /* Lowest slot number below this Calgary */ - u8 status; /* Bit 0 = 1 : the XAPIC is used */ - /* = 0 : the XAPIC is not used, ie: */ - /* ints fwded to another XAPIC */ - /* Bits1:7 Reserved */ - u8 WP_index; /* instance index - lower ones have */ - /* lower slot numbers/PCI bus numbers */ - u8 chassis_num; /* 1 based Chassis number */ + u8 node_id; /* RIO Node ID */ + u32 BBAR; /* Address of 1MB register space */ + u8 type; /* Type of device */ + u8 owner_id; /* Node ID of Hurricane that owns this */ + /* node */ + u8 port0node; /* Node ID port connected to: 0xFF=None */ + u8 port0port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port1node; /* Node ID port connected to: 0xFF=None */ + u8 port1port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 first_slot; /* Lowest slot number below this Calgary */ + u8 status; /* Bit 0 = 1 : the XAPIC is used */ + /* = 0 : the XAPIC is not used, ie: */ + /* ints fwded to another XAPIC */ + /* Bits1:7 Reserved */ + u8 WP_index; /* instance index - lower ones have */ + /* lower slot numbers/PCI bus numbers */ + u8 chassis_num; /* 1 based Chassis number */ } __attribute__((packed)); enum { - HURR_SCALABILTY = 0, /* Hurricane Scalability info */ - HURR_RIOIB = 2, /* Hurricane RIOIB info */ - COMPAT_CALGARY = 4, /* Compatibility Calgary */ - ALT_CALGARY = 5, /* Second Planar Calgary */ + HURR_SCALABILTY = 0, /* Hurricane Scalability info */ + HURR_RIOIB = 2, /* Hurricane RIOIB info */ + COMPAT_CALGARY = 4, /* Compatibility Calgary */ + ALT_CALGARY = 5, /* Second Planar Calgary */ }; /* -- cgit v1.2.3 From 6e5609a97acef44440f233ad435dd0ab563608f9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:21 -0700 Subject: include/asm-x86/rwsem.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/rwsem.h | 169 ++++++++++++++++++++++++------------------------ 1 file changed, 86 insertions(+), 83 deletions(-) (limited to 'include') diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h index 520a379f4b8..750f2a3542b 100644 --- a/include/asm-x86/rwsem.h +++ b/include/asm-x86/rwsem.h @@ -56,14 +56,16 @@ extern asmregparm struct rw_semaphore * /* * the semaphore definition */ -struct rw_semaphore { - signed long count; + #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 #define RWSEM_ACTIVE_MASK 0x0000ffff #define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +struct rw_semaphore { + signed long count; spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -78,11 +80,13 @@ struct rw_semaphore { #endif -#define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } +#define __RWSEM_INITIALIZER(name) \ +{ \ + RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ +} -#define DECLARE_RWSEM(name) \ +#define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) extern void __init_rwsem(struct rw_semaphore *sem, const char *name, @@ -100,16 +104,16 @@ do { \ */ static inline void __down_read(struct rw_semaphore *sem) { - __asm__ __volatile__( - "# beginning down_read\n\t" -LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ - " jns 1f\n" - " call call_rwsem_down_read_failed\n" - "1:\n\t" - "# ending down_read\n\t" - : "+m" (sem->count) - : "a" (sem) - : "memory", "cc"); + asm volatile("# beginning down_read\n\t" + LOCK_PREFIX " incl (%%eax)\n\t" + /* adds 0x00000001, returns the old value */ + " jns 1f\n" + " call call_rwsem_down_read_failed\n" + "1:\n\t" + "# ending down_read\n\t" + : "+m" (sem->count) + : "a" (sem) + : "memory", "cc"); } /* @@ -118,21 +122,20 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value static inline int __down_read_trylock(struct rw_semaphore *sem) { __s32 result, tmp; - __asm__ __volatile__( - "# beginning __down_read_trylock\n\t" - " movl %0,%1\n\t" - "1:\n\t" - " movl %1,%2\n\t" - " addl %3,%2\n\t" - " jle 2f\n\t" -LOCK_PREFIX " cmpxchgl %2,%0\n\t" - " jnz 1b\n\t" - "2:\n\t" - "# ending __down_read_trylock\n\t" - : "+m" (sem->count), "=&a" (result), "=&r" (tmp) - : "i" (RWSEM_ACTIVE_READ_BIAS) - : "memory", "cc"); - return result>=0 ? 1 : 0; + asm volatile("# beginning __down_read_trylock\n\t" + " movl %0,%1\n\t" + "1:\n\t" + " movl %1,%2\n\t" + " addl %3,%2\n\t" + " jle 2f\n\t" + LOCK_PREFIX " cmpxchgl %2,%0\n\t" + " jnz 1b\n\t" + "2:\n\t" + "# ending __down_read_trylock\n\t" + : "+m" (sem->count), "=&a" (result), "=&r" (tmp) + : "i" (RWSEM_ACTIVE_READ_BIAS) + : "memory", "cc"); + return result >= 0 ? 1 : 0; } /* @@ -143,17 +146,18 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) int tmp; tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ - " testl %%edx,%%edx\n\t" /* was the count 0 before? */ - " jz 1f\n" - " call call_rwsem_down_write_failed\n" - "1:\n" - "# ending down_write" - : "+m" (sem->count), "=d" (tmp) - : "a" (sem), "1" (tmp) - : "memory", "cc"); + asm volatile("# beginning down_write\n\t" + LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" + /* subtract 0x0000ffff, returns the old value */ + " testl %%edx,%%edx\n\t" + /* was the count 0 before? */ + " jz 1f\n" + " call call_rwsem_down_write_failed\n" + "1:\n" + "# ending down_write" + : "+m" (sem->count), "=d" (tmp) + : "a" (sem), "1" (tmp) + : "memory", "cc"); } static inline void __down_write(struct rw_semaphore *sem) @@ -167,7 +171,7 @@ static inline void __down_write(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem) { signed long ret = cmpxchg(&sem->count, - RWSEM_UNLOCKED_VALUE, + RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); if (ret == RWSEM_UNLOCKED_VALUE) return 1; @@ -180,16 +184,16 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) static inline void __up_read(struct rw_semaphore *sem) { __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; - __asm__ __volatile__( - "# beginning __up_read\n\t" -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ - " jns 1f\n\t" - " call call_rwsem_wake\n" - "1:\n" - "# ending __up_read\n" - : "+m" (sem->count), "=d" (tmp) - : "a" (sem), "1" (tmp) - : "memory", "cc"); + asm volatile("# beginning __up_read\n\t" + LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" + /* subtracts 1, returns the old value */ + " jns 1f\n\t" + " call call_rwsem_wake\n" + "1:\n" + "# ending __up_read\n" + : "+m" (sem->count), "=d" (tmp) + : "a" (sem), "1" (tmp) + : "memory", "cc"); } /* @@ -197,17 +201,18 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu */ static inline void __up_write(struct rw_semaphore *sem) { - __asm__ __volatile__( - "# beginning __up_write\n\t" - " movl %2,%%edx\n\t" -LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ - " jz 1f\n" - " call call_rwsem_wake\n" - "1:\n\t" - "# ending __up_write\n" - : "+m" (sem->count) - : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) - : "memory", "cc", "edx"); + asm volatile("# beginning __up_write\n\t" + " movl %2,%%edx\n\t" + LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" + /* tries to transition + 0xffff0001 -> 0x00000000 */ + " jz 1f\n" + " call call_rwsem_wake\n" + "1:\n\t" + "# ending __up_write\n" + : "+m" (sem->count) + : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) + : "memory", "cc", "edx"); } /* @@ -215,16 +220,16 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> */ static inline void __downgrade_write(struct rw_semaphore *sem) { - __asm__ __volatile__( - "# beginning __downgrade_write\n\t" -LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ - " jns 1f\n\t" - " call call_rwsem_downgrade_wake\n" - "1:\n\t" - "# ending __downgrade_write\n" - : "+m" (sem->count) - : "a" (sem), "i" (-RWSEM_WAITING_BIAS) - : "memory", "cc"); + asm volatile("# beginning __downgrade_write\n\t" + LOCK_PREFIX " addl %2,(%%eax)\n\t" + /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ + " jns 1f\n\t" + " call call_rwsem_downgrade_wake\n" + "1:\n\t" + "# ending __downgrade_write\n" + : "+m" (sem->count) + : "a" (sem), "i" (-RWSEM_WAITING_BIAS) + : "memory", "cc"); } /* @@ -232,10 +237,9 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) { - __asm__ __volatile__( -LOCK_PREFIX "addl %1,%0" - : "+m" (sem->count) - : "ir" (delta)); + asm volatile(LOCK_PREFIX "addl %1,%0" + : "+m" (sem->count) + : "ir" (delta)); } /* @@ -245,12 +249,11 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) { int tmp = delta; - __asm__ __volatile__( -LOCK_PREFIX "xadd %0,%1" - : "+r" (tmp), "+m" (sem->count) - : : "memory"); + asm volatile(LOCK_PREFIX "xadd %0,%1" + : "+r" (tmp), "+m" (sem->count) + : : "memory"); - return tmp+delta; + return tmp + delta; } static inline int rwsem_is_locked(struct rw_semaphore *sem) -- cgit v1.2.3 From 915cd5aa0adbe0f62b7a56d6eaf6908b47f80395 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:25 -0700 Subject: include/asm-x86/setup.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/setup.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index f745de21119..fa6763af8d2 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h @@ -55,8 +55,8 @@ struct e820entry; char * __init machine_specific_memory_setup(void); char *memory_setup(void); -int __init copy_e820_map(struct e820entry * biosmap, int nr_map); -int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); +int __init copy_e820_map(struct e820entry *biosmap, int nr_map); +int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map); void __init add_memory_region(unsigned long long start, unsigned long long size, int type); -- cgit v1.2.3 From 895b7643d6d2d04761344e18a2473255f2551e9e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:26 -0700 Subject: include/asm-x86/sigcontext32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/sigcontext32.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 6ffab4fd593..57a9686fb49 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h @@ -26,7 +26,7 @@ struct _fpstate_ia32 { __u32 cw; __u32 sw; __u32 tag; /* not compatible to 64bit twd */ - __u32 ipoff; + __u32 ipoff; __u32 cssel; __u32 dataoff; __u32 datasel; @@ -39,7 +39,7 @@ struct _fpstate_ia32 { __u32 mxcsr; __u32 reserved; struct _fpxreg _fxsr_st[8]; - struct _xmmreg _xmm[8]; /* It's actually 16 */ + struct _xmmreg _xmm[8]; /* It's actually 16 */ __u32 padding[56]; }; -- cgit v1.2.3 From af1fec15de17086864fc3917e21a31e303ec0e91 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:27 -0700 Subject: include/asm-x86/sigcontext.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/sigcontext.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index d743947f4c7..2f9c884d2c0 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h @@ -79,7 +79,7 @@ struct sigcontext { unsigned long flags; unsigned long sp_at_signal; unsigned short ss, __ssh; - struct _fpstate __user * fpstate; + struct _fpstate __user *fpstate; unsigned long oldmask; unsigned long cr2; }; @@ -107,7 +107,7 @@ struct sigcontext { unsigned long eflags; unsigned long esp_at_signal; unsigned short ss, __ssh; - struct _fpstate __user * fpstate; + struct _fpstate __user *fpstate; unsigned long oldmask; unsigned long cr2; }; @@ -121,7 +121,8 @@ struct sigcontext { struct _fpstate { __u16 cwd; __u16 swd; - __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ + __u16 twd; /* Note this is not the same as the + 32bit/x87/FSAVE twd */ __u16 fop; __u64 rip; __u64 rdp; -- cgit v1.2.3 From 9551b12a51bab7058ad486ba96fd0c27fdafe922 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:28 -0700 Subject: include/asm-x86/signal.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/signal.h | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h index aee7eca585a..f15186d39c6 100644 --- a/include/asm-x86/signal.h +++ b/include/asm-x86/signal.h @@ -185,61 +185,61 @@ typedef struct sigaltstack { #define __HAVE_ARCH_SIG_BITOPS -#define sigaddset(set,sig) \ - (__builtin_constantp(sig) ? \ - __const_sigaddset((set),(sig)) : \ - __gen_sigaddset((set),(sig))) +#define sigaddset(set,sig) \ + (__builtin_constantp(sig) \ + ? __const_sigaddset((set), (sig)) \ + : __gen_sigaddset((set), (sig))) -static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) +static inline void __gen_sigaddset(sigset_t *set, int _sig) { - __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); + asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } -static __inline__ void __const_sigaddset(sigset_t *set, int _sig) +static inline void __const_sigaddset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); } -#define sigdelset(set,sig) \ - (__builtin_constant_p(sig) ? \ - __const_sigdelset((set),(sig)) : \ - __gen_sigdelset((set),(sig))) +#define sigdelset(set, sig) \ + (__builtin_constant_p(sig) \ + ? __const_sigdelset((set), (sig)) \ + : __gen_sigdelset((set), (sig))) -static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) +static inline void __gen_sigdelset(sigset_t *set, int _sig) { - __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); + asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } -static __inline__ void __const_sigdelset(sigset_t *set, int _sig) +static inline void __const_sigdelset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); } -static __inline__ int __const_sigismember(sigset_t *set, int _sig) +static inline int __const_sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); } -static __inline__ int __gen_sigismember(sigset_t *set, int _sig) +static inline int __gen_sigismember(sigset_t *set, int _sig) { int ret; - __asm__("btl %2,%1\n\tsbbl %0,%0" - : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); + asm("btl %2,%1\n\tsbbl %0,%0" + : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); return ret; } -#define sigismember(set,sig) \ - (__builtin_constant_p(sig) ? \ - __const_sigismember((set),(sig)) : \ - __gen_sigismember((set),(sig))) +#define sigismember(set, sig) \ + (__builtin_constant_p(sig) \ + ? __const_sigismember((set), (sig)) \ + : __gen_sigismember((set), (sig))) -static __inline__ int sigfindinword(unsigned long word) +static inline int sigfindinword(unsigned long word) { - __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); + asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); return word; } -- cgit v1.2.3 From 2fec394adf9445ae6a21c5e67d9ad3f6f5b7c8d1 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:29 -0700 Subject: include/asm-x86/smp_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/smp_32.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index f861d041517..cb3ada2fedb 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -18,8 +18,8 @@ extern cpumask_t cpu_callin_map; -extern void (*mtrr_hook) (void); -extern void zap_low_mappings (void); +extern void (*mtrr_hook)(void); +extern void zap_low_mappings(void); #ifdef CONFIG_SMP /* @@ -44,7 +44,7 @@ static inline int num_booting_cpus(void) #ifdef CONFIG_X86_LOCAL_APIC -static __inline int logical_smp_processor_id(void) +static inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); -- cgit v1.2.3 From ceb7ce1052a9087bd4752424f253b883ec5e1cec Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:30 -0700 Subject: include/asm-x86/smp_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index fd709cbba4d..c53a011bb91 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -24,9 +24,9 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), #define raw_smp_processor_id() read_pda(cpunumber) #define stack_smp_processor_id() \ - ({ \ +({ \ struct thread_info *ti; \ - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ + asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ ti->cpu; \ }) @@ -46,7 +46,7 @@ static inline int num_booting_cpus(void) #define safe_smp_processor_id() smp_processor_id() -static __inline int logical_smp_processor_id(void) +static inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); -- cgit v1.2.3 From d3bf60a6e48c9a451cac345c0ad57552bb299992 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:31 -0700 Subject: include/asm-x86/spinlock.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/spinlock.h | 105 +++++++++++++++++++++------------------------ 1 file changed, 50 insertions(+), 55 deletions(-) (limited to 'include') diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 23804c1890f..47dfe2607bb 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) { short inc = 0x0100; - __asm__ __volatile__ ( + asm volatile ( LOCK_PREFIX "xaddw %w0, %1\n" "1:\t" "cmpb %h0, %b0\n\t" @@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) /* don't need lfence here, because loads are in-order */ "jmp 1b\n" "2:" - :"+Q" (inc), "+m" (lock->slock) + : "+Q" (inc), "+m" (lock->slock) : - :"memory", "cc"); + : "memory", "cc"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) @@ -104,30 +104,28 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) int tmp; short new; - asm volatile( - "movw %2,%w0\n\t" - "cmpb %h0,%b0\n\t" - "jne 1f\n\t" - "movw %w0,%w1\n\t" - "incb %h1\n\t" - "lock ; cmpxchgw %w1,%2\n\t" - "1:" - "sete %b1\n\t" - "movzbl %b1,%0\n\t" - :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) - : - : "memory", "cc"); + asm volatile("movw %2,%w0\n\t" + "cmpb %h0,%b0\n\t" + "jne 1f\n\t" + "movw %w0,%w1\n\t" + "incb %h1\n\t" + "lock ; cmpxchgw %w1,%2\n\t" + "1:" + "sete %b1\n\t" + "movzbl %b1,%0\n\t" + : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) + : + : "memory", "cc"); return tmp; } static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - UNLOCK_LOCK_PREFIX "incb %0" - :"+m" (lock->slock) - : - :"memory", "cc"); + asm volatile(UNLOCK_LOCK_PREFIX "incb %0" + : "+m" (lock->slock) + : + : "memory", "cc"); } #else static inline int __raw_spin_is_locked(raw_spinlock_t *lock) @@ -149,21 +147,20 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) int inc = 0x00010000; int tmp; - __asm__ __volatile__ ( - "lock ; xaddl %0, %1\n" - "movzwl %w0, %2\n\t" - "shrl $16, %0\n\t" - "1:\t" - "cmpl %0, %2\n\t" - "je 2f\n\t" - "rep ; nop\n\t" - "movzwl %1, %2\n\t" - /* don't need lfence here, because loads are in-order */ - "jmp 1b\n" - "2:" - :"+Q" (inc), "+m" (lock->slock), "=r" (tmp) - : - :"memory", "cc"); + asm volatile("lock ; xaddl %0, %1\n" + "movzwl %w0, %2\n\t" + "shrl $16, %0\n\t" + "1:\t" + "cmpl %0, %2\n\t" + "je 2f\n\t" + "rep ; nop\n\t" + "movzwl %1, %2\n\t" + /* don't need lfence here, because loads are in-order */ + "jmp 1b\n" + "2:" + : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) + : + : "memory", "cc"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) @@ -173,31 +170,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) int tmp; int new; - asm volatile( - "movl %2,%0\n\t" - "movl %0,%1\n\t" - "roll $16, %0\n\t" - "cmpl %0,%1\n\t" - "jne 1f\n\t" - "addl $0x00010000, %1\n\t" - "lock ; cmpxchgl %1,%2\n\t" - "1:" - "sete %b1\n\t" - "movzbl %b1,%0\n\t" - :"=&a" (tmp), "=r" (new), "+m" (lock->slock) - : - : "memory", "cc"); + asm volatile("movl %2,%0\n\t" + "movl %0,%1\n\t" + "roll $16, %0\n\t" + "cmpl %0,%1\n\t" + "jne 1f\n\t" + "addl $0x00010000, %1\n\t" + "lock ; cmpxchgl %1,%2\n\t" + "1:" + "sete %b1\n\t" + "movzbl %b1,%0\n\t" + : "=&a" (tmp), "=r" (new), "+m" (lock->slock) + : + : "memory", "cc"); return tmp; } static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - UNLOCK_LOCK_PREFIX "incw %0" - :"+m" (lock->slock) - : - :"memory", "cc"); + asm volatile(UNLOCK_LOCK_PREFIX "incw %0" + : "+m" (lock->slock) + : + : "memory", "cc"); } #endif -- cgit v1.2.3 From 7f3a9508b5778034091e00e696dc17ca98b8a84c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:32 -0700 Subject: include/asm-x86/srat.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/srat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h index 165ab4bdc02..f4bba131d06 100644 --- a/include/asm-x86/srat.h +++ b/include/asm-x86/srat.h @@ -1,5 +1,5 @@ /* - * Some of the code in this file has been gleaned from the 64 bit + * Some of the code in this file has been gleaned from the 64 bit * discontigmem support code base. * * Copyright (C) 2002, IBM Corp. -- cgit v1.2.3 From 06b0f574eafdd26d325ce5ab3c38522d0cdd7b7c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:33 -0700 Subject: include/asm-x86/string_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/string_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index c5d13a86dea..b49369ad9a6 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h @@ -3,7 +3,7 @@ #ifdef __KERNEL__ -/* Let gcc decide wether to inline or use the out of line functions */ +/* Let gcc decide whether to inline or use the out of line functions */ #define __HAVE_ARCH_STRCPY extern char *strcpy(char *dest, const char *src); -- cgit v1.2.3 From 953b2f1ed6f2642e58e7c5b8c1385d132f1b5685 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:34 -0700 Subject: include/asm-x86/string_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/string_64.h | 66 ++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h index e583da7918f..52b5ab38339 100644 --- a/include/asm-x86/string_64.h +++ b/include/asm-x86/string_64.h @@ -3,26 +3,24 @@ #ifdef __KERNEL__ -/* Written 2002 by Andi Kleen */ +/* Written 2002 by Andi Kleen */ -/* Only used for special circumstances. Stolen from i386/string.h */ -static __always_inline void * -__inline_memcpy(void * to, const void * from, size_t n) +/* Only used for special circumstances. Stolen from i386/string.h */ +static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n) { -unsigned long d0, d1, d2; -__asm__ __volatile__( - "rep ; movsl\n\t" - "testb $2,%b4\n\t" - "je 1f\n\t" - "movsw\n" - "1:\ttestb $1,%b4\n\t" - "je 2f\n\t" - "movsb\n" - "2:" - : "=&c" (d0), "=&D" (d1), "=&S" (d2) - :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) - : "memory"); -return (to); + unsigned long d0, d1, d2; + asm volatile("rep ; movsl\n\t" + "testb $2,%b4\n\t" + "je 1f\n\t" + "movsw\n" + "1:\ttestb $1,%b4\n\t" + "je 2f\n\t" + "movsb\n" + "2:" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) + : "memory"); + return to; } /* Even with __builtin_ the compiler may decide to use the out of line @@ -32,28 +30,30 @@ return (to); #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 extern void *memcpy(void *to, const void *from, size_t len); #else -extern void *__memcpy(void *to, const void *from, size_t len); -#define memcpy(dst,src,len) \ - ({ size_t __len = (len); \ - void *__ret; \ - if (__builtin_constant_p(len) && __len >= 64) \ - __ret = __memcpy((dst),(src),__len); \ - else \ - __ret = __builtin_memcpy((dst),(src),__len); \ - __ret; }) +extern void *__memcpy(void *to, const void *from, size_t len); +#define memcpy(dst, src, len) \ +({ \ + size_t __len = (len); \ + void *__ret; \ + if (__builtin_constant_p(len) && __len >= 64) \ + __ret = __memcpy((dst), (src), __len); \ + else \ + __ret = __builtin_memcpy((dst), (src), __len); \ + __ret; \ +}) #endif #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); #define __HAVE_ARCH_MEMMOVE -void * memmove(void * dest,const void *src,size_t count); +void *memmove(void *dest, const void *src, size_t count); -int memcmp(const void * cs,const void * ct,size_t count); -size_t strlen(const char * s); -char *strcpy(char * dest,const char *src); -char *strcat(char * dest, const char * src); -int strcmp(const char * cs,const char * ct); +int memcmp(const void *cs, const void *ct, size_t count); +size_t strlen(const char *s); +char *strcpy(char *dest, const char *src); +char *strcat(char *dest, const char *src); +int strcmp(const char *cs, const char *ct); #endif /* __KERNEL__ */ -- cgit v1.2.3 From cf030ebd40e37cb11d1efa6677890f40f21e16f4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:35 -0700 Subject: include/asm-x86/suspend_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/suspend_32.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index 1bbda3ad779..24e1c080aa8 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h @@ -10,7 +10,7 @@ static inline int arch_prepare_suspend(void) { return 0; } /* image of the saved processor state */ struct saved_context { - u16 es, fs, gs, ss; + u16 es, fs, gs, ss; unsigned long cr0, cr2, cr3, cr4; struct desc_ptr gdt; struct desc_ptr idt; @@ -32,11 +32,11 @@ extern unsigned long saved_edi; static inline void acpi_save_register_state(unsigned long return_point) { saved_eip = return_point; - asm volatile ("movl %%esp,%0" : "=m" (saved_esp)); - asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp)); - asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx)); - asm volatile ("movl %%edi,%0" : "=m" (saved_edi)); - asm volatile ("movl %%esi,%0" : "=m" (saved_esi)); + asm volatile("movl %%esp,%0" : "=m" (saved_esp)); + asm volatile("movl %%ebp,%0" : "=m" (saved_ebp)); + asm volatile("movl %%ebx,%0" : "=m" (saved_ebx)); + asm volatile("movl %%edi,%0" : "=m" (saved_edi)); + asm volatile("movl %%esi,%0" : "=m" (saved_esi)); } #define acpi_restore_register_state() do {} while (0) -- cgit v1.2.3 From 1b17fce6078ab0672f6702097680b65124de5f05 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:36 -0700 Subject: include/asm-x86/suspend_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/suspend_64.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h index 2eb92cb81a0..dc3262b4307 100644 --- a/include/asm-x86/suspend_64.h +++ b/include/asm-x86/suspend_64.h @@ -9,8 +9,7 @@ #include #include -static inline int -arch_prepare_suspend(void) +static inline int arch_prepare_suspend(void) { return 0; } @@ -25,7 +24,7 @@ arch_prepare_suspend(void) */ struct saved_context { struct pt_regs regs; - u16 ds, es, fs, gs, ss; + u16 ds, es, fs, gs, ss; unsigned long gs_base, gs_kernel_base, fs_base; unsigned long cr0, cr2, cr3, cr4, cr8; unsigned long efer; -- cgit v1.2.3 From a4c2d7d9285500a9b229bb7ddc7abe0212a0dab0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:37 -0700 Subject: include/asm-x86/swiotlb.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/swiotlb.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index f9c589539a8..f5d9e74b1e4 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h @@ -8,15 +8,15 @@ extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags); + dma_addr_t *dma_handle, gfp_t flags); extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir); + size_t size, int dir); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); + dma_addr_t dev_addr, + size_t size, int dir); extern void swiotlb_sync_single_for_device(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); + dma_addr_t dev_addr, + size_t size, int dir); extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, @@ -26,18 +26,18 @@ extern void swiotlb_sync_single_range_for_device(struct device *hwdev, unsigned long offset, size_t size, int dir); extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); + struct scatterlist *sg, int nelems, + int dir); extern void swiotlb_sync_sg_for_device(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); + struct scatterlist *sg, int nelems, + int dir); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); + int nents, int direction); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); + int nents, int direction); extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); -extern void swiotlb_free_coherent (struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); +extern void swiotlb_free_coherent(struct device *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle); extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); extern void swiotlb_init(void); -- cgit v1.2.3 From 26b7fcc4bde28237a906597a809b149fb06713b0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:38 -0700 Subject: include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/sync_bitops.h | 56 +++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index bc249f40e0e..f1078a5e4ed 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -13,7 +13,7 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#define ADDR (*(volatile long *) addr) +#define ADDR (*(volatile long *)addr) /** * sync_set_bit - Atomically set a bit in memory @@ -26,12 +26,12 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void sync_set_bit(int nr, volatile unsigned long * addr) +static inline void sync_set_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btsl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void sync_clear_bit(int nr, volatile unsigned long * addr) +static inline void sync_clear_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btrl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void sync_change_bit(int nr, volatile unsigned long * addr) +static inline void sync_change_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btcl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) +static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } @@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) +static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } @@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) +static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } -- cgit v1.2.3 From c5386c200f55940eeeb827df172edf2e0305f23b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:39 -0700 Subject: include/asm-x86/system.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/system.h | 104 +++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 33b0017156a..a2f04cd79b2 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -38,35 +38,33 @@ do { \ */ \ unsigned long ebx, ecx, edx, esi, edi; \ \ - asm volatile( \ - "pushfl \n\t" /* save flags */ \ - "pushl %%ebp \n\t" /* save EBP */ \ - "movl %%esp,%[prev_sp] \n\t" /* save ESP */ \ - "movl %[next_sp],%%esp \n\t" /* restore ESP */ \ - "movl $1f,%[prev_ip] \n\t" /* save EIP */ \ - "pushl %[next_ip] \n\t" /* restore EIP */ \ - "jmp __switch_to \n" /* regparm call */ \ - "1: \t" \ - "popl %%ebp \n\t" /* restore EBP */ \ - "popfl \n" /* restore flags */ \ + asm volatile("pushfl\n\t" /* save flags */ \ + "pushl %%ebp\n\t" /* save EBP */ \ + "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ + "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ + "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ + "pushl %[next_ip]\n\t" /* restore EIP */ \ + "jmp __switch_to\n" /* regparm call */ \ + "1:\t" \ + "popl %%ebp\n\t" /* restore EBP */ \ + "popfl\n" /* restore flags */ \ \ - /* output parameters */ \ - : [prev_sp] "=m" (prev->thread.sp), \ - [prev_ip] "=m" (prev->thread.ip), \ - "=a" (last), \ + /* output parameters */ \ + : [prev_sp] "=m" (prev->thread.sp), \ + [prev_ip] "=m" (prev->thread.ip), \ + "=a" (last), \ \ - /* clobbered output registers: */ \ - "=b" (ebx), "=c" (ecx), "=d" (edx), \ - "=S" (esi), "=D" (edi) \ - \ - /* input parameters: */ \ - : [next_sp] "m" (next->thread.sp), \ - [next_ip] "m" (next->thread.ip), \ - \ - /* regparm parameters for __switch_to(): */ \ - [prev] "a" (prev), \ - [next] "d" (next) \ - ); \ + /* clobbered output registers: */ \ + "=b" (ebx), "=c" (ecx), "=d" (edx), \ + "=S" (esi), "=D" (edi) \ + \ + /* input parameters: */ \ + : [next_sp] "m" (next->thread.sp), \ + [next_ip] "m" (next->thread.ip), \ + \ + /* regparm parameters for __switch_to(): */ \ + [prev] "a" (prev), \ + [next] "d" (next)); \ } while (0) /* @@ -146,35 +144,34 @@ extern void load_gs_index(unsigned); */ #define loadsegment(seg, value) \ asm volatile("\n" \ - "1:\t" \ - "movl %k0,%%" #seg "\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\t" \ - "movl %k1, %%" #seg "\n\t" \ - "jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : :"r" (value), "r" (0)) + "1:\t" \ + "movl %k0,%%" #seg "\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\t" \ + "movl %k1, %%" #seg "\n\t" \ + "jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b,3b) \ + : :"r" (value), "r" (0)) /* * Save a segment register away */ -#define savesegment(seg, value) \ +#define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; - __asm__("lsll %1,%0" - :"=r" (__limit):"r" (segment)); - return __limit+1; + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); + return __limit + 1; } static inline void native_clts(void) { - asm volatile ("clts"); + asm volatile("clts"); } /* @@ -189,43 +186,43 @@ static unsigned long __force_order; static inline unsigned long native_read_cr0(void) { unsigned long val; - asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr0(unsigned long val) { - asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr2(void) { unsigned long val; - asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr2(unsigned long val) { - asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr3(void) { unsigned long val; - asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr3(unsigned long val) { - asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr4(void) { unsigned long val; - asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); return val; } @@ -237,7 +234,7 @@ static inline unsigned long native_read_cr4_safe(void) #ifdef CONFIG_X86_32 asm volatile("1: mov %%cr4, %0\n" "2:\n" - _ASM_EXTABLE(1b,2b) + _ASM_EXTABLE(1b, 2b) : "=r" (val), "=m" (__force_order) : "0" (0)); #else val = native_read_cr4(); @@ -247,7 +244,7 @@ static inline unsigned long native_read_cr4_safe(void) static inline void native_write_cr4(unsigned long val) { - asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); } #ifdef CONFIG_X86_64 @@ -268,6 +265,7 @@ static inline void native_wbinvd(void) { asm volatile("wbinvd": : :"memory"); } + #ifdef CONFIG_PARAVIRT #include #else @@ -300,7 +298,7 @@ static inline void clflush(volatile void *__p) asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); } -#define nop() __asm__ __volatile__ ("nop") +#define nop() asm volatile ("nop") void disable_hlt(void); void enable_hlt(void); @@ -399,7 +397,7 @@ void default_idle(void); # define smp_wmb() barrier() #endif #define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #else #define smp_mb() barrier() #define smp_rmb() barrier() -- cgit v1.2.3 From 7c4d4784db93496791b798e0abab1d056b192ad0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:40 -0700 Subject: include/asm-x86/tce.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/tce.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h index cd955d3d112..b1a4ea00df7 100644 --- a/include/asm-x86/tce.h +++ b/include/asm-x86/tce.h @@ -39,7 +39,7 @@ struct iommu_table; #define TCE_RPN_MASK 0x0000fffffffff000ULL extern void tce_build(struct iommu_table *tbl, unsigned long index, - unsigned int npages, unsigned long uaddr, int direction); + unsigned int npages, unsigned long uaddr, int direction); extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); extern void * __init alloc_tce_table(void); extern void __init free_tce_table(void *tbl); -- cgit v1.2.3 From 89917f28f3377fa7e38a51e9208e83b7b92542ee Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:42 -0700 Subject: include/asm-x86/thread_info_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/thread_info_32.h | 88 +++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h index 5bd508260ff..4e053fa561a 100644 --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h @@ -20,7 +20,8 @@ * low level task data that entry.S needs immediate access to * - this struct should fit entirely inside of one cache line * - this struct shares the supervisor stack pages - * - if the contents of this structure are changed, the assembly constants must also be changed + * - if the contents of this structure are changed, + * the assembly constants must also be changed */ #ifndef __ASSEMBLY__ @@ -30,18 +31,16 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long status; /* thread-synchronous flags */ __u32 cpu; /* current CPU */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - - + int preempt_count; /* 0 => preemptable, + <0 => BUG */ mm_segment_t addr_limit; /* thread address space: - 0-0xBFFFFFFF for user-thead - 0-0xFFFFFFFF for kernel-thread + 0-0xBFFFFFFF user-thread + 0-0xFFFFFFFF kernel-thread */ void *sysenter_return; struct restart_block restart_block; - - unsigned long previous_esp; /* ESP of the previous stack in case - of nested (IRQ) stacks + unsigned long previous_esp; /* ESP of the previous stack in + case of nested (IRQ) stacks */ __u8 supervisor_stack[0]; }; @@ -90,15 +89,16 @@ register unsigned long current_stack_pointer asm("esp") __used; /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { - return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); + return (struct thread_info *) + (current_stack_pointer & ~(THREAD_SIZE - 1)); } /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE -#define alloc_thread_info(tsk) ((struct thread_info *) \ - __get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE))) +#define alloc_thread_info(tsk) ((struct thread_info *) \ + __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE))) #else -#define alloc_thread_info(tsk) ((struct thread_info *) \ +#define alloc_thread_info(tsk) ((struct thread_info *) \ __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) #endif @@ -107,7 +107,7 @@ static inline struct thread_info *current_thread_info(void) #else /* !__ASSEMBLY__ */ /* how to get the thread information struct from ASM */ -#define GET_THREAD_INFO(reg) \ +#define GET_THREAD_INFO(reg) \ movl $-THREAD_SIZE, reg; \ andl %esp, reg @@ -119,14 +119,16 @@ static inline struct thread_info *current_thread_info(void) /* * thread information flags - * - these are process state flags that various assembly files may need to access + * - these are process state flags that various + * assembly files may need to access * - pending work-to-be-done flags are in LSW * - other flags in MSW */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ +#define TIF_SINGLESTEP 3 /* restore singlestep on return to + user mode */ #define TIF_IRET 4 /* return with iret */ #define TIF_SYSCALL_EMU 5 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ @@ -143,36 +145,36 @@ static inline struct thread_info *current_thread_info(void) #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ -#define _TIF_SYSCALL_TRACE (1<status & TS_POLLING) -- cgit v1.2.3 From b98fff30223799c5df444fef1ebcfcddf310f740 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:43 -0700 Subject: include/asm-x86/thread_info_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/thread_info_64.h | 78 +++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h index 6c9b214b8fc..1e5c6f6152c 100644 --- a/include/asm-x86/thread_info_64.h +++ b/include/asm-x86/thread_info_64.h @@ -29,9 +29,9 @@ struct thread_info { __u32 flags; /* low level flags */ __u32 status; /* thread synchronous flags */ __u32 cpu; /* current CPU */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - - mm_segment_t addr_limit; + int preempt_count; /* 0 => preemptable, + <0 => BUG */ + mm_segment_t addr_limit; struct restart_block restart_block; #ifdef CONFIG_IA32_EMULATION void __user *sysenter_return; @@ -61,17 +61,17 @@ struct thread_info { #define init_stack (init_thread_union.stack) static inline struct thread_info *current_thread_info(void) -{ +{ struct thread_info *ti; ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); - return ti; + return ti; } /* do not use in interrupt context */ static inline struct thread_info *stack_thread_info(void) { struct thread_info *ti; - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); + asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); return ti; } @@ -82,8 +82,8 @@ static inline struct thread_info *stack_thread_info(void) #define THREAD_FLAGS GFP_KERNEL #endif -#define alloc_thread_info(tsk) \ - ((struct thread_info *) __get_free_pages(THREAD_FLAGS, THREAD_ORDER)) +#define alloc_thread_info(tsk) \ + ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) @@ -98,7 +98,8 @@ static inline struct thread_info *stack_thread_info(void) /* * thread information flags - * - these are process state flags that various assembly files may need to access + * - these are process state flags that various assembly files + * may need to access * - pending work-to-be-done flags are in LSW * - other flags in MSW * Warning: layout of LSW is hardcoded in entry.S @@ -114,7 +115,7 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ /* 16 free */ -#define TIF_IA32 17 /* 32bit process */ +#define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ #define TIF_ABI_PENDING 19 #define TIF_MEMDIE 20 @@ -126,39 +127,40 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ -#define _TIF_SYSCALL_TRACE (1<status & TS_POLLING) -- cgit v1.2.3 From 94cf8de0a0aff39c7b7785af4fc938ecacb79b7c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:45 -0700 Subject: include/asm-x86/tlbflush.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/tlbflush.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 3998709ed63..0c0674d9425 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h @@ -32,7 +32,7 @@ static inline void __native_flush_tlb_global(void) static inline void __native_flush_tlb_single(unsigned long addr) { - __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } static inline void __flush_tlb_all(void) @@ -134,8 +134,7 @@ void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, #define TLBSTATE_LAZY 2 #ifdef CONFIG_X86_32 -struct tlb_state -{ +struct tlb_state { struct mm_struct *active_mm; int state; char __cacheline_padding[L1_CACHE_BYTES-8]; -- cgit v1.2.3 From 5d7d03b81af05f3c291b5c6be621a2b53d187e09 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:46 -0700 Subject: include/asm-x86/topology.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/topology.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index dada89e5b15..8d1a1f3d21b 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -71,7 +71,7 @@ static inline int cpu_to_node(int cpu) #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (x86_cpu_to_node_map_early_ptr) { printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", - (int)cpu); + (int)cpu); dump_stack(); return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; } -- cgit v1.2.3 From 2d86e637d15984e363e8c3f14c8f0470b4a10a3d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:47 -0700 Subject: include/asm-x86/tsc.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/tsc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 7d3e27f7d48..d2d8eb5b55f 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h @@ -42,7 +42,7 @@ static inline cycles_t vget_cycles(void) if (!cpu_has_tsc) return 0; #endif - return (cycles_t) __native_read_tsc(); + return (cycles_t)__native_read_tsc(); } extern void tsc_init(void); -- cgit v1.2.3 From b1fcec7f2296c4b9126e1b85b52494ac8910d528 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:48 -0700 Subject: include/asm-x86/uaccess_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/uaccess_32.h | 316 +++++++++++++++++++++++++------------------ 1 file changed, 187 insertions(+), 129 deletions(-) (limited to 'include') diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index fcc570ec4fe..8e7595c1f34 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h @@ -32,7 +32,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) /* * movsl can be slow when source and dest are not both 8-byte aligned @@ -43,7 +43,9 @@ extern struct movsl_mask { } ____cacheline_aligned_in_smp movsl_mask; #endif -#define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) +#define __addr_ok(addr) \ + ((unsigned long __force)(addr) < \ + (current_thread_info()->addr_limit.seg)) /* * Test whether a block of memory is a valid user space address. @@ -54,13 +56,16 @@ extern struct movsl_mask { * * This needs 33-bit arithmetic. We have a carry... */ -#define __range_ok(addr,size) ({ \ - unsigned long flag,roksum; \ - __chk_user_ptr(addr); \ - asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ - :"=&r" (flag), "=r" (roksum) \ - :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ - flag; }) +#define __range_ok(addr, size) \ +({ \ + unsigned long flag, roksum; \ + __chk_user_ptr(addr); \ + asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ + :"=&r" (flag), "=r" (roksum) \ + :"1" (addr), "g" ((int)(size)), \ + "rm" (current_thread_info()->addr_limit.seg)); \ + flag; \ +}) /** * access_ok: - Checks if a user space pointer is valid @@ -81,7 +86,7 @@ extern struct movsl_mask { * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) +#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) /* * The exception table consists of pairs of addresses: the first is the @@ -96,8 +101,7 @@ extern struct movsl_mask { * on our cache or tlb entries. */ -struct exception_table_entry -{ +struct exception_table_entry { unsigned long insn, fixup; }; @@ -122,13 +126,15 @@ extern void __get_user_1(void); extern void __get_user_2(void); extern void __get_user_4(void); -#define __get_user_x(size,ret,x,ptr) \ - __asm__ __volatile__("call __get_user_" #size \ - :"=a" (ret),"=d" (x) \ - :"0" (ptr)) +#define __get_user_x(size, ret, x, ptr) \ + asm volatile("call __get_user_" #size \ + :"=a" (ret),"=d" (x) \ + :"0" (ptr)) + +/* Careful: we have to cast the result to the type of the pointer + * for sign reasons */ -/* Careful: we have to cast the result to the type of the pointer for sign reasons */ /** * get_user: - Get a simple variable from user space. * @x: Variable to store result. @@ -146,15 +152,24 @@ extern void __get_user_4(void); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ -({ int __ret_gu; \ +#define get_user(x, ptr) \ +({ \ + int __ret_gu; \ unsigned long __val_gu; \ __chk_user_ptr(ptr); \ - switch(sizeof (*(ptr))) { \ - case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ - case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ - case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ - default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_x(1, __ret_gu, __val_gu, ptr); \ + break; \ + case 2: \ + __get_user_x(2, __ret_gu, __val_gu, ptr); \ + break; \ + case 4: \ + __get_user_x(4, __ret_gu, __val_gu, ptr); \ + break; \ + default: \ + __get_user_x(X, __ret_gu, __val_gu, ptr); \ + break; \ } \ (x) = (__typeof__(*(ptr)))__val_gu; \ __ret_gu; \ @@ -171,11 +186,25 @@ extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); -#define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr)) +#define __put_user_1(x, ptr) \ + asm volatile("call __put_user_1" : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) + +#define __put_user_2(x, ptr) \ + asm volatile("call __put_user_2" : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) + +#define __put_user_4(x, ptr) \ + asm volatile("call __put_user_4" : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) + +#define __put_user_8(x, ptr) \ + asm volatile("call __put_user_8" : "=a" (__ret_pu) \ + : "A" ((typeof(*(ptr)))(x)), "c" (ptr)) + +#define __put_user_X(x, ptr) \ + asm volatile("call __put_user_X" : "=a" (__ret_pu) \ + : "c" (ptr)) /** * put_user: - Write a simple value into user space. @@ -195,32 +224,43 @@ extern void __put_user_8(void); */ #ifdef CONFIG_X86_WP_WORKS_OK -#define put_user(x,ptr) \ -({ int __ret_pu; \ +#define put_user(x, ptr) \ +({ \ + int __ret_pu; \ __typeof__(*(ptr)) __pu_val; \ __chk_user_ptr(ptr); \ __pu_val = x; \ - switch(sizeof(*(ptr))) { \ - case 1: __put_user_1(__pu_val, ptr); break; \ - case 2: __put_user_2(__pu_val, ptr); break; \ - case 4: __put_user_4(__pu_val, ptr); break; \ - case 8: __put_user_8(__pu_val, ptr); break; \ - default:__put_user_X(__pu_val, ptr); break; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __put_user_1(__pu_val, ptr); \ + break; \ + case 2: \ + __put_user_2(__pu_val, ptr); \ + break; \ + case 4: \ + __put_user_4(__pu_val, ptr); \ + break; \ + case 8: \ + __put_user_8(__pu_val, ptr); \ + break; \ + default: \ + __put_user_X(__pu_val, ptr); \ + break; \ } \ __ret_pu; \ }) #else -#define put_user(x,ptr) \ +#define put_user(x, ptr) \ ({ \ - int __ret_pu; \ - __typeof__(*(ptr)) __pus_tmp = x; \ - __ret_pu=0; \ - if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ - sizeof(*(ptr))) != 0)) \ - __ret_pu=-EFAULT; \ - __ret_pu; \ - }) + int __ret_pu; \ + __typeof__(*(ptr))__pus_tmp = x; \ + __ret_pu = 0; \ + if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ + sizeof(*(ptr))) != 0)) \ + __ret_pu = -EFAULT; \ + __ret_pu; \ +}) #endif @@ -245,8 +285,8 @@ extern void __put_user_8(void); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) /** @@ -268,54 +308,62 @@ extern void __put_user_8(void); * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ + __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ __pu_err; \ }) -#define __put_user_u64(x, addr, err) \ - __asm__ __volatile__( \ - "1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ - "3:\n" \ - ".section .fixup,\"ax\"\n" \ - "4: movl %3,%0\n" \ - " jmp 3b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,4b) \ - _ASM_EXTABLE(2b,4b) \ - : "=r"(err) \ - : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) +#define __put_user_u64(x, addr, err) \ + asm volatile("1: movl %%eax,0(%2)\n" \ + "2: movl %%edx,4(%2)\n" \ + "3:\n" \ + ".section .fixup,\"ax\"\n" \ + "4: movl %3,%0\n" \ + " jmp 3b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=r" (err) \ + : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) #ifdef CONFIG_X86_WP_WORKS_OK -#define __put_user_size(x,ptr,size,retval,errret) \ +#define __put_user_size(x, ptr, size, retval, errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \ - case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \ - case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \ - case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ - default: __put_user_bad(); \ + case 1: \ + __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ + break; \ + case 2: \ + __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ + break; \ + case 4: \ + __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \ + break; \ + case 8: \ + __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ + break; \ + default: \ + __put_user_bad(); \ } \ } while (0) #else -#define __put_user_size(x,ptr,size,retval,errret) \ +#define __put_user_size(x, ptr, size, retval, errret) \ do { \ - __typeof__(*(ptr)) __pus_tmp = x; \ + __typeof__(*(ptr))__pus_tmp = x; \ retval = 0; \ \ - if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ + if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ retval = errret; \ } while (0) @@ -329,65 +377,70 @@ struct __large_struct { unsigned long buf[100]; }; * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - __asm__ __volatile__( \ - "1: mov"itype" %"rtype"1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r"(err) \ - : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) - - -#define __get_user_nocheck(x,ptr,size) \ -({ \ - long __gu_err; \ - unsigned long __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ - (x) = (__typeof__(*(ptr)))__gu_val; \ - __gu_err; \ + asm volatile("1: mov"itype" %"rtype"1,%2\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl %3,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r"(err) \ + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) + + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err; \ + unsigned long __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ }) extern long __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval,errret) \ +#define __get_user_size(x, ptr, size, retval, errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \ - case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ - case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ - default: (x) = __get_user_bad(); \ + case 1: \ + __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ + break; \ + case 2: \ + __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ + break; \ + case 4: \ + __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \ + break; \ + default: \ + (x) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - __asm__ __volatile__( \ - "1: mov"itype" %2,%"rtype"1\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3,%0\n" \ - " xor"itype" %"rtype"1,%"rtype"1\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r"(err), ltype (x) \ - : "m"(__m(addr)), "i"(errret), "0"(err)) - - -unsigned long __must_check __copy_to_user_ll(void __user *to, - const void *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nozero(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, - const void __user *from, unsigned long n); + asm volatile("1: mov"itype" %2,%"rtype"1\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl %3,%0\n" \ + " xor"itype" %"rtype"1,%"rtype"1\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (err), ltype (x) \ + : "m" (__m(addr)), "i" (errret), "0" (err)) + + +unsigned long __must_check __copy_to_user_ll + (void __user *to, const void *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll + (void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nozero + (void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache + (void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache_nozero + (void *to, const void __user *from, unsigned long n); /** * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. @@ -416,13 +469,16 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) switch (n) { case 1: - __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); + __put_user_size(*(u8 *)from, (u8 __user *)to, + 1, ret, 1); return ret; case 2: - __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); + __put_user_size(*(u16 *)from, (u16 __user *)to, + 2, ret, 2); return ret; case 4: - __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); + __put_user_size(*(u32 *)from, (u32 __user *)to, + 4, ret, 4); return ret; } } @@ -545,19 +601,21 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, } static __always_inline unsigned long -__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) +__copy_from_user_inatomic_nocache(void *to, const void __user *from, + unsigned long n) { return __copy_from_user_ll_nocache_nozero(to, from, n); } unsigned long __must_check copy_to_user(void __user *to, - const void *from, unsigned long n); + const void *from, unsigned long n); unsigned long __must_check copy_from_user(void *to, - const void __user *from, unsigned long n); + const void __user *from, + unsigned long n); long __must_check strncpy_from_user(char *dst, const char __user *src, - long count); + long count); long __must_check __strncpy_from_user(char *dst, - const char __user *src, long count); + const char __user *src, long count); /** * strlen_user: - Get the size of a string in user space. -- cgit v1.2.3 From b896313e53344e79cc8bbc69f0a7d5c2b1735895 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:49 -0700 Subject: include/asm-x86/uaccess_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/uaccess_64.h | 376 ++++++++++++++++++++++++++----------------- 1 file changed, 227 insertions(+), 149 deletions(-) (limited to 'include') diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index b87eb4ba8f9..b8a2f433990 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@ -29,23 +29,27 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) -#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg))) +#define __addr_ok(addr) (!((unsigned long)(addr) & \ + (current_thread_info()->addr_limit.seg))) /* * Uhhuh, this needs 65-bit arithmetic. We have a carry.. */ -#define __range_not_ok(addr,size) ({ \ - unsigned long flag,roksum; \ - __chk_user_ptr(addr); \ - asm("# range_ok\n\r" \ - "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ - :"=&r" (flag), "=r" (roksum) \ - :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \ - flag; }) +#define __range_not_ok(addr, size) \ +({ \ + unsigned long flag, roksum; \ + __chk_user_ptr(addr); \ + asm("# range_ok\n\r" \ + "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ + : "=&r" (flag), "=r" (roksum) \ + : "1" (addr), "g" ((long)(size)), \ + "g" (current_thread_info()->addr_limit.seg)); \ + flag; \ +}) -#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) +#define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0) /* * The exception table consists of pairs of addresses: the first is the @@ -60,8 +64,7 @@ * on our cache or tlb entries. */ -struct exception_table_entry -{ +struct exception_table_entry { unsigned long insn, fixup; }; @@ -84,23 +87,36 @@ extern int fixup_exception(struct pt_regs *regs); * accesses to the same area of user memory). */ -#define __get_user_x(size,ret,x,ptr) \ - asm volatile("call __get_user_" #size \ - :"=a" (ret),"=d" (x) \ - :"c" (ptr) \ - :"r8") +#define __get_user_x(size, ret, x, ptr) \ + asm volatile("call __get_user_" #size \ + : "=a" (ret),"=d" (x) \ + : "c" (ptr) \ + : "r8") + +/* Careful: we have to cast the result to the type of the pointer + * for sign reasons */ -/* Careful: we have to cast the result to the type of the pointer for sign reasons */ -#define get_user(x,ptr) \ -({ unsigned long __val_gu; \ - int __ret_gu; \ +#define get_user(x, ptr) \ +({ \ + unsigned long __val_gu; \ + int __ret_gu; \ __chk_user_ptr(ptr); \ - switch(sizeof (*(ptr))) { \ - case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ - case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ - case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ - case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ - default: __get_user_bad(); break; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_x(1, __ret_gu, __val_gu, ptr); \ + break; \ + case 2: \ + __get_user_x(2, __ret_gu, __val_gu, ptr); \ + break; \ + case 4: \ + __get_user_x(4, __ret_gu, __val_gu, ptr); \ + break; \ + case 8: \ + __get_user_x(8, __ret_gu, __val_gu, ptr); \ + break; \ + default: \ + __get_user_bad(); \ + break; \ } \ (x) = (__force typeof(*(ptr)))__val_gu; \ __ret_gu; \ @@ -112,55 +128,73 @@ extern void __put_user_4(void); extern void __put_user_8(void); extern void __put_user_bad(void); -#define __put_user_x(size,ret,x,ptr) \ - asm volatile("call __put_user_" #size \ - :"=a" (ret) \ - :"c" (ptr),"d" (x) \ - :"r8") +#define __put_user_x(size, ret, x, ptr) \ + asm volatile("call __put_user_" #size \ + :"=a" (ret) \ + :"c" (ptr),"d" (x) \ + :"r8") -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ int __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err); \ + __put_user_size((x), (ptr), (size), __pu_err); \ __pu_err; \ }) -#define __put_user_check(x,ptr,size) \ -({ \ - int __pu_err; \ - typeof(*(ptr)) __user *__pu_addr = (ptr); \ - switch (size) { \ - case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ - case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ - case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \ - case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \ - default: __put_user_bad(); \ - } \ - __pu_err; \ +#define __put_user_check(x, ptr, size) \ +({ \ + int __pu_err; \ + typeof(*(ptr)) __user *__pu_addr = (ptr); \ + switch (size) { \ + case 1: \ + __put_user_x(1, __pu_err, x, __pu_addr); \ + break; \ + case 2: \ + __put_user_x(2, __pu_err, x, __pu_addr); \ + break; \ + case 4: \ + __put_user_x(4, __pu_err, x, __pu_addr); \ + break; \ + case 8: \ + __put_user_x(8, __pu_err, x, __pu_addr); \ + break; \ + default: \ + __put_user_bad(); \ + } \ + __pu_err; \ }) -#define __put_user_size(x,ptr,size,retval) \ +#define __put_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\ - case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\ - case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\ - case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\ - default: __put_user_bad(); \ + case 1: \ + __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\ + break; \ + case 2: \ + __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\ + break; \ + case 4: \ + __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\ + break; \ + case 8: \ + __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \ + break; \ + default: \ + __put_user_bad(); \ } \ } while (0) @@ -174,23 +208,22 @@ struct __large_struct { unsigned long buf[100]; }; * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ - asm volatile( \ - "1: mov"itype" %"rtype"1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: mov %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r"(err) \ - : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) - - -#define __get_user_nocheck(x,ptr,size) \ + asm volatile("1: mov"itype" %"rtype"1,%2\n" \ + "2:\n" \ + ".section .fixup, \"ax\"\n" \ + "3: mov %3,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r"(err) \ + : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err)) + + +#define __get_user_nocheck(x, ptr, size) \ ({ \ int __gu_err; \ unsigned long __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ (x) = (__force typeof(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -201,31 +234,39 @@ extern int __get_user_4(void); extern int __get_user_8(void); extern int __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval) \ +#define __get_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\ - case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\ - case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\ - case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\ - default: (x) = __get_user_bad(); \ + case 1: \ + __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\ + break; \ + case 2: \ + __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\ + break; \ + case 4: \ + __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\ + break; \ + case 8: \ + __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \ + break; \ + default: \ + (x) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ - asm volatile( \ - "1: mov"itype" %2,%"rtype"1\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: mov %3,%0\n" \ - " xor"itype" %"rtype"1,%"rtype"1\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r"(err), ltype (x) \ - : "m"(__m(addr)), "i"(errno), "0"(err)) + asm volatile("1: mov"itype" %2,%"rtype"1\n" \ + "2:\n" \ + ".section .fixup, \"ax\"\n" \ + "3: mov %3,%0\n" \ + " xor"itype" %"rtype"1,%"rtype"1\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (err), ltype (x) \ + : "m" (__m(addr)), "i"(errno), "0"(err)) /* * Copy To/From Userspace @@ -244,110 +285,142 @@ copy_in_user(void __user *to, const void __user *from, unsigned len); static __always_inline __must_check int __copy_from_user(void *dst, const void __user *src, unsigned size) -{ +{ int ret = 0; if (!__builtin_constant_p(size)) - return copy_user_generic(dst,(__force void *)src,size); - switch (size) { - case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); + return copy_user_generic(dst, (__force void *)src, size); + switch (size) { + case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, + ret, "b", "b", "=q", 1); return ret; - case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2); + case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, + ret, "w", "w", "=r", 2); return ret; - case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4); + case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, + ret, "l", "k", "=r", 4); + return ret; + case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, + ret, "q", "", "=r", 8); return ret; - case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8); - return ret; case 10: - __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); - if (unlikely(ret)) return ret; - __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2); - return ret; + __get_user_asm(*(u64 *)dst, (u64 __user *)src, + ret, "q", "", "=r", 16); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u16 *)(8 + (char *)dst), + (u16 __user *)(8 + (char __user *)src), + ret, "w", "w", "=r", 2); + return ret; case 16: - __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); - if (unlikely(ret)) return ret; - __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8); - return ret; + __get_user_asm(*(u64 *)dst, (u64 __user *)src, + ret, "q", "", "=r", 16); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u64 *)(8 + (char *)dst), + (u64 __user *)(8 + (char __user *)src), + ret, "q", "", "=r", 8); + return ret; default: - return copy_user_generic(dst,(__force void *)src,size); + return copy_user_generic(dst, (__force void *)src, size); } -} +} static __always_inline __must_check int __copy_to_user(void __user *dst, const void *src, unsigned size) -{ +{ int ret = 0; if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst,src,size); - switch (size) { - case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); + return copy_user_generic((__force void *)dst, src, size); + switch (size) { + case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, + ret, "b", "b", "iq", 1); return ret; - case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2); + case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, + ret, "w", "w", "ir", 2); return ret; - case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4); + case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, + ret, "l", "k", "ir", 4); + return ret; + case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, + ret, "q", "", "ir", 8); return ret; - case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8); - return ret; case 10: - __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10); - if (unlikely(ret)) return ret; + __put_user_asm(*(u64 *)src, (u64 __user *)dst, + ret, "q", "", "ir", 10); + if (unlikely(ret)) + return ret; asm("":::"memory"); - __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2); - return ret; + __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, + ret, "w", "w", "ir", 2); + return ret; case 16: - __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16); - if (unlikely(ret)) return ret; + __put_user_asm(*(u64 *)src, (u64 __user *)dst, + ret, "q", "", "ir", 16); + if (unlikely(ret)) + return ret; asm("":::"memory"); - __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8); - return ret; + __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, + ret, "q", "", "ir", 8); + return ret; default: - return copy_user_generic((__force void *)dst,src,size); + return copy_user_generic((__force void *)dst, src, size); } -} +} static __always_inline __must_check int __copy_in_user(void __user *dst, const void __user *src, unsigned size) -{ +{ int ret = 0; if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst,(__force void *)src,size); - switch (size) { - case 1: { + return copy_user_generic((__force void *)dst, + (__force void *)src, size); + switch (size) { + case 1: { u8 tmp; - __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); + __get_user_asm(tmp, (u8 __user *)src, + ret, "b", "b", "=q", 1); if (likely(!ret)) - __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); + __put_user_asm(tmp, (u8 __user *)dst, + ret, "b", "b", "iq", 1); return ret; } - case 2: { + case 2: { u16 tmp; - __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); + __get_user_asm(tmp, (u16 __user *)src, + ret, "w", "w", "=r", 2); if (likely(!ret)) - __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); + __put_user_asm(tmp, (u16 __user *)dst, + ret, "w", "w", "ir", 2); return ret; } - case 4: { + case 4: { u32 tmp; - __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); + __get_user_asm(tmp, (u32 __user *)src, + ret, "l", "k", "=r", 4); if (likely(!ret)) - __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); + __put_user_asm(tmp, (u32 __user *)dst, + ret, "l", "k", "ir", 4); return ret; } - case 8: { + case 8: { u64 tmp; - __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); + __get_user_asm(tmp, (u64 __user *)src, + ret, "q", "", "=r", 8); if (likely(!ret)) - __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); + __put_user_asm(tmp, (u64 __user *)dst, + ret, "q", "", "ir", 8); return ret; } default: - return copy_user_generic((__force void *)dst,(__force void *)src,size); + return copy_user_generic((__force void *)dst, + (__force void *)src, size); } -} +} -__must_check long +__must_check long strncpy_from_user(char *dst, const char __user *src, long count); -__must_check long +__must_check long __strncpy_from_user(char *dst, const char __user *src, long count); __must_check long strnlen_user(const char __user *str, long n); __must_check long __strnlen_user(const char __user *str, long n); @@ -355,7 +428,8 @@ __must_check long strlen_user(const char __user *str); __must_check unsigned long clear_user(void __user *mem, unsigned long len); __must_check unsigned long __clear_user(void __user *mem, unsigned long len); -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size); +__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, + unsigned size); static __must_check __always_inline int __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) @@ -364,15 +438,19 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) } #define ARCH_HAS_NOCACHE_UACCESS 1 -extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); +extern long __copy_user_nocache(void *dst, const void __user *src, + unsigned size, int zerorest); -static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) +static inline int __copy_from_user_nocache(void *dst, const void __user *src, + unsigned size) { might_sleep(); return __copy_user_nocache(dst, src, size, 1); } -static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) +static inline int __copy_from_user_inatomic_nocache(void *dst, + const void __user *src, + unsigned size) { return __copy_user_nocache(dst, src, size, 0); } -- cgit v1.2.3 From 6e714b37978bbd2c7a7eb89b4474b6c2133b7796 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:50 -0700 Subject: include/asm-x86/unaligned.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/unaligned.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h index 913598d4f76..d270ffe7275 100644 --- a/include/asm-x86/unaligned.h +++ b/include/asm-x86/unaligned.h @@ -32,6 +32,6 @@ * * Note that unaligned accesses can be very expensive on some architectures. */ -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) +#define put_unaligned(val, ptr) ((void)(*(ptr) = (val))) #endif /* _ASM_X86_UNALIGNED_H */ -- cgit v1.2.3 From 687fc16b65e96d72a680291670584090207cadf8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:51 -0700 Subject: include/asm-x86/unistd_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/unistd_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index 984123a68f7..8317d94771d 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -81,7 +81,7 @@ #define __NR_sigpending 73 #define __NR_sethostname 74 #define __NR_setrlimit 75 -#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ +#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ #define __NR_getrusage 77 #define __NR_gettimeofday 78 #define __NR_settimeofday 79 -- cgit v1.2.3 From c489f4451965f4d355340ea1e60a5863c9ed2890 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:52 -0700 Subject: include/asm-x86/unistd_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/unistd_64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index 3883ceb54ef..fe26e36d0f5 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -2,7 +2,7 @@ #define _ASM_X86_64_UNISTD_H_ #ifndef __SYSCALL -#define __SYSCALL(a,b) +#define __SYSCALL(a, b) #endif /* -- cgit v1.2.3 From 826700dc9b483d0d0de8ff4901043d54ed8b64f0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:53 -0700 Subject: include/asm-x86/user_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/user_32.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h index 6157da6f882..d6e51edc259 100644 --- a/include/asm-x86/user_32.h +++ b/include/asm-x86/user_32.h @@ -100,10 +100,10 @@ struct user_regs_struct { struct user{ /* We start with the registers, to mimic the way that "memory" is returned from the ptrace(3,...) function. */ - struct user_regs_struct regs; /* Where the registers are actually stored */ + struct user_regs_struct regs; /* Where the registers are actually stored */ /* ptrace does not yet supply these. Someday.... */ int u_fpvalid; /* True if math co-processor being used. */ - /* for this mess. Not yet used. */ + /* for this mess. Not yet used. */ struct user_i387_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ unsigned long int u_tsize; /* Text segment size (pages). */ @@ -118,7 +118,7 @@ struct user{ int reserved; /* No longer used */ unsigned long u_ar0; /* Used by gdb to help find the values for */ /* the registers. */ - struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ + struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ int u_debugreg[8]; -- cgit v1.2.3 From a31216194c84cfa72515db4f9365ce2c68ed5791 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:54 -0700 Subject: include/asm-x86/user32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/user32.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h index f769872debe..a3d91004787 100644 --- a/include/asm-x86/user32.h +++ b/include/asm-x86/user32.h @@ -1,7 +1,8 @@ #ifndef USER32_H #define USER32_H 1 -/* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */ +/* IA32 compatible user structures for ptrace. + * These should be used for 32bit coredumps too. */ struct user_i387_ia32_struct { u32 cwd; @@ -42,9 +43,9 @@ struct user_regs_struct32 { }; struct user32 { - struct user_regs_struct32 regs; /* Where the registers are actually stored */ + struct user_regs_struct32 regs; /* Where the registers are actually stored */ int u_fpvalid; /* True if math co-processor being used. */ - /* for this mess. Not yet used. */ + /* for this mess. Not yet used. */ struct user_i387_ia32_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ __u32 u_tsize; /* Text segment size (pages). */ -- cgit v1.2.3 From a206ea11b665cfb5360d05367eea1e9cfd3f3c8b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:55 -0700 Subject: include/asm-x86/user_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/user_64.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h index 96361645560..6037b634c77 100644 --- a/include/asm-x86/user_64.h +++ b/include/asm-x86/user_64.h @@ -45,12 +45,13 @@ */ /* This matches the 64bit FXSAVE format as defined by AMD. It is the same - as the 32bit format defined by Intel, except that the selector:offset pairs for - data and eip are replaced with flat 64bit pointers. */ + as the 32bit format defined by Intel, except that the selector:offset pairs + for data and eip are replaced with flat 64bit pointers. */ struct user_i387_struct { unsigned short cwd; unsigned short swd; - unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ + unsigned short twd; /* Note this is not the same as + the 32bit/x87/FSAVE twd */ unsigned short fop; __u64 rip; __u64 rdp; @@ -97,13 +98,14 @@ struct user_regs_struct { /* When the kernel dumps core, it starts by dumping the user struct - this will be used by gdb to figure out where the data and stack segments are within the file, and what virtual addresses to use. */ -struct user{ + +struct user { /* We start with the registers, to mimic the way that "memory" is returned from the ptrace(3,...) function. */ - struct user_regs_struct regs; /* Where the registers are actually stored */ + struct user_regs_struct regs; /* Where the registers are actually stored */ /* ptrace does not yet supply these. Someday.... */ int u_fpvalid; /* True if math co-processor being used. */ - /* for this mess. Not yet used. */ + /* for this mess. Not yet used. */ int pad0; struct user_i387_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ @@ -120,7 +122,7 @@ struct user{ int pad1; unsigned long u_ar0; /* Used by gdb to help find the values for */ /* the registers. */ - struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ + struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ unsigned long u_debugreg[8]; -- cgit v1.2.3 From ac1a7b0eaa1db04143ab6132c6ce4489afbb8a18 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:56 -0700 Subject: include/asm-x86/vdso.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/vdso.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h index 9bb86899abf..86e085e003d 100644 --- a/include/asm-x86/vdso.h +++ b/include/asm-x86/vdso.h @@ -8,9 +8,11 @@ extern const char VDSO64_PRELINK[]; * Given a pointer to the vDSO image, find the pointer to VDSO64_name * as that symbol is defined in the vDSO sources or linker script. */ -#define VDSO64_SYMBOL(base, name) ({ \ - extern const char VDSO64_##name[]; \ - (void *) (VDSO64_##name - VDSO64_PRELINK + (unsigned long) (base)); }) +#define VDSO64_SYMBOL(base, name) \ +({ \ + extern const char VDSO64_##name[]; \ + (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \ +}) #endif #if defined CONFIG_X86_32 || defined CONFIG_COMPAT @@ -20,9 +22,11 @@ extern const char VDSO32_PRELINK[]; * Given a pointer to the vDSO image, find the pointer to VDSO32_name * as that symbol is defined in the vDSO sources or linker script. */ -#define VDSO32_SYMBOL(base, name) ({ \ - extern const char VDSO32_##name[]; \ - (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); }) +#define VDSO32_SYMBOL(base, name) \ +({ \ + extern const char VDSO32_##name[]; \ + (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ +}) #endif /* -- cgit v1.2.3 From 364fe5ef4725176324ec17f8dc3fd488d615b0de Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:57 -0700 Subject: include/asm-x86/vga.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/vga.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h index 0ecf68ac03a..0ccf804377e 100644 --- a/include/asm-x86/vga.h +++ b/include/asm-x86/vga.h @@ -12,9 +12,9 @@ * access the videoram directly without any black magic. */ -#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x) +#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) #define vga_readb(x) (*(x)) -#define vga_writeb(x,y) (*(y) = (x)) +#define vga_writeb(x, y) (*(y) = (x)) #endif -- cgit v1.2.3 From 9e8a935bcff6c8154eace12277e6a9d853ef790b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:58 -0700 Subject: include/asm-x86/vm86.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/vm86.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index c92fe4af52e..a2be241ed03 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h @@ -42,9 +42,11 @@ #define VM86_ARG(retval) ((retval) >> 8) #define VM86_SIGNAL 0 /* return due to signal */ -#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ +#define VM86_UNKNOWN 1 /* unhandled GP fault + - IO-instruction or similar */ #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ -#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ +#define VM86_STI 3 /* sti/popf/iret instruction enabled + virtual interrupts */ /* * Additional return values when invoking new vm86() @@ -205,7 +207,8 @@ void release_vm86_irqs(struct task_struct *); #define handle_vm86_fault(a, b) #define release_vm86_irqs(a) -static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) { +static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) +{ return 0; } -- cgit v1.2.3 From 8948584eb282c4dc5c54f6f6ebbaf447a665c653 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:59 -0700 Subject: include/asm-x86/vmi.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/vmi.h | 88 +++++++++++++++++++++++++-------------------------- 1 file changed, 44 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/include/asm-x86/vmi.h b/include/asm-x86/vmi.h index eb8bd892c01..b7c0dea119f 100644 --- a/include/asm-x86/vmi.h +++ b/include/asm-x86/vmi.h @@ -155,9 +155,9 @@ #ifndef __ASSEMBLY__ struct vmi_relocation_info { - unsigned char *eip; - unsigned char type; - unsigned char reserved[3]; + unsigned char *eip; + unsigned char type; + unsigned char reserved[3]; }; #endif @@ -173,53 +173,53 @@ struct vmi_relocation_info { #ifndef __ASSEMBLY__ struct vrom_header { - u16 rom_signature; // option ROM signature - u8 rom_length; // ROM length in 512 byte chunks - u8 rom_entry[4]; // 16-bit code entry point - u8 rom_pad0; // 4-byte align pad - u32 vrom_signature; // VROM identification signature - u8 api_version_min;// Minor version of API - u8 api_version_maj;// Major version of API - u8 jump_slots; // Number of jump slots - u8 reserved1; // Reserved for expansion - u32 virtual_top; // Hypervisor virtual address start - u16 reserved2; // Reserved for expansion - u16 license_offs; // Offset to License string - u16 pci_header_offs;// Offset to PCI OPROM header - u16 pnp_header_offs;// Offset to PnP OPROM header - u32 rom_pad3; // PnP reserverd / VMI reserved - u8 reserved[96]; // Reserved for headers - char vmi_init[8]; // VMI_Init jump point - char get_reloc[8]; // VMI_GetRelocationInfo jump point + u16 rom_signature; /* option ROM signature */ + u8 rom_length; /* ROM length in 512 byte chunks */ + u8 rom_entry[4]; /* 16-bit code entry point */ + u8 rom_pad0; /* 4-byte align pad */ + u32 vrom_signature; /* VROM identification signature */ + u8 api_version_min;/* Minor version of API */ + u8 api_version_maj;/* Major version of API */ + u8 jump_slots; /* Number of jump slots */ + u8 reserved1; /* Reserved for expansion */ + u32 virtual_top; /* Hypervisor virtual address start */ + u16 reserved2; /* Reserved for expansion */ + u16 license_offs; /* Offset to License string */ + u16 pci_header_offs;/* Offset to PCI OPROM header */ + u16 pnp_header_offs;/* Offset to PnP OPROM header */ + u32 rom_pad3; /* PnP reserverd / VMI reserved */ + u8 reserved[96]; /* Reserved for headers */ + char vmi_init[8]; /* VMI_Init jump point */ + char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ } __attribute__((packed)); struct pnp_header { - char sig[4]; - char rev; - char size; - short next; - short res; - long devID; - unsigned short manufacturer_offset; - unsigned short product_offset; + char sig[4]; + char rev; + char size; + short next; + short res; + long devID; + unsigned short manufacturer_offset; + unsigned short product_offset; } __attribute__((packed)); struct pci_header { - char sig[4]; - short vendorID; - short deviceID; - short vpdData; - short size; - char rev; - char class; - char subclass; - char interface; - short chunks; - char rom_version_min; - char rom_version_maj; - char codetype; - char lastRom; - short reserved; + char sig[4]; + short vendorID; + short deviceID; + short vpdData; + short size; + char rev; + char class; + char subclass; + char interface; + short chunks; + char rom_version_min; + char rom_version_maj; + char codetype; + char lastRom; + short reserved; } __attribute__((packed)); /* Function prototypes for bootstrapping */ -- cgit v1.2.3 From d6ae390a0be73e6b777c6171e6b6f616462f555d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:04:00 -0700 Subject: include/asm-x86/voyager.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/voyager.h | 51 ++++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/asm-x86/voyager.h b/include/asm-x86/voyager.h index 91a9932937a..9c811d2e6f9 100644 --- a/include/asm-x86/voyager.h +++ b/include/asm-x86/voyager.h @@ -91,8 +91,7 @@ #define VOYAGER_WRITE_CONFIG 0x2 #define VOYAGER_BYPASS 0xff -typedef struct voyager_asic -{ +typedef struct voyager_asic { __u8 asic_addr; /* ASIC address; Level 4 */ __u8 asic_type; /* ASIC type */ __u8 asic_id; /* ASIC id */ @@ -113,7 +112,7 @@ typedef struct voyager_module { __u16 largest_reg; /* Largest register in the scan path */ __u16 smallest_reg; /* Smallest register in the scan path */ voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ - struct voyager_module *submodule; /* Submodule pointer */ + struct voyager_module *submodule; /* Submodule pointer */ struct voyager_module *next; /* Next module in linked list */ } voyager_module_t; @@ -135,7 +134,7 @@ typedef struct voyager_eeprom_hdr { __u16 cct_offset; __u16 log_length; /* length of err log */ __u16 xsum_end; /* offset to end of - checksum */ + checksum */ __u8 reserved[4]; __u8 sflag; /* starting sentinal */ __u8 part_number[13]; /* prom part number */ @@ -148,7 +147,8 @@ typedef struct voyager_eeprom_hdr { -#define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) +#define VOYAGER_EPROM_SIZE_OFFSET \ + ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) #define VOYAGER_XSUM_END_OFFSET 0x2a /* the following three definitions are for internal table layouts @@ -199,7 +199,7 @@ typedef struct voyager_asic_data_table { #define VOYAGER_WCBIC_TOM_L 0x4 #define VOYAGER_WCBIC_TOM_H 0x5 -/* register defines for Voyager Memory Contol (VMC) +/* register defines for Voyager Memory Contol (VMC) * these are present on L4 machines only */ #define VOYAGER_VMC1 0x81 #define VOYAGER_VMC2 0x91 @@ -334,7 +334,7 @@ typedef struct { struct QuadDescription { __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields - * will be zero except for slot */ + * will be zero except for slot */ __u8 StructureVersion; __u32 CPI_BaseAddress; __u32 LARC_BankSize; @@ -342,7 +342,7 @@ struct QuadDescription { __u8 Slot; /* Processor slots 1 - 4 */ } __attribute__((packed)); -struct ProcBoardInfo { +struct ProcBoardInfo { __u8 Type; __u8 StructureVersion; __u8 NumberOfBoards; @@ -382,19 +382,30 @@ struct CPU_Info { * packed in it by our friend the compiler. */ typedef struct { - __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */ - __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */ - __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */ - __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */ - __u32 OS_Flags; /* Flags set by the OS as info for SUS */ - __u32 SUS_Flags; /* Flags set by SUS as info for the OS */ - __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */ + __u8 Mailbox_SUS; /* Written to by SUS to give + commands/response to the OS */ + __u8 Mailbox_OS; /* Written to by the OS to give + commands/response to SUS */ + __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the + interface SUS supports */ + __u8 OS_MailboxVersion; /* Tells SUS which iteration of the + interface the OS supports */ + __u32 OS_Flags; /* Flags set by the OS as info for + SUS */ + __u32 SUS_Flags; /* Flags set by SUS as info + for the OS */ + __u32 WatchDogPeriod; /* Watchdog period (in seconds) which + the DP uses to see if the OS + is dead */ __u32 WatchDogCount; /* Updated by the OS on every tic. */ - __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */ - MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */ + __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS + where to stuff the SUS error log + on a dump */ + MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; + /* Storage for MCA POS data */ /* All new SECOND_PASS_INTERFACE fields added from this point */ - struct ProcBoardInfo *BoardData; - struct CPU_Info *CPU_Data; + struct ProcBoardInfo *BoardData; + struct CPU_Info *CPU_Data; /* All new fields must be added from this point */ } Voyager_KernelSUS_Mbox_t; @@ -478,7 +489,7 @@ struct voyager_SUS { __u32 SUS_errorlog; /* lots of system configuration stuff under here */ }; - + /* Variables exported by voyager_smp */ extern __u32 voyager_extended_vic_processors; extern __u32 voyager_allowed_boot_processors; -- cgit v1.2.3 From 8fdf765527920e30d8fd57da3a83d6bf56799114 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:04:02 -0700 Subject: include/asm-x86/xor_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/xor_32.h | 494 ++++++++++++++++++++++++----------------------- 1 file changed, 248 insertions(+), 246 deletions(-) (limited to 'include') diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h index a41ef1bdd42..067b5c1835a 100644 --- a/include/asm-x86/xor_32.h +++ b/include/asm-x86/xor_32.h @@ -16,12 +16,12 @@ * Copyright (C) 1998 Ingo Molnar. */ -#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n" -#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n" -#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" -#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" -#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" -#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" +#define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n" +#define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n" +#define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" +#define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" +#define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" +#define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" #include @@ -32,24 +32,24 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - ST(i,0) \ - XO1(i+1,1) \ - ST(i+1,1) \ - XO1(i+2,2) \ - ST(i+2,2) \ - XO1(i+3,3) \ - ST(i+3,3) +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + XO1(i, 0) \ + ST(i, 0) \ + XO1(i+1, 1) \ + ST(i+1, 1) \ + XO1(i + 2, 2) \ + ST(i + 2, 2) \ + XO1(i + 3, 3) \ + ST(i + 3, 3) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) @@ -76,25 +76,25 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - ST(i,0) \ - XO2(i+1,1) \ - ST(i+1,1) \ - XO2(i+2,2) \ - ST(i+2,2) \ - XO2(i+3,3) \ - ST(i+3,3) +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + XO2(i, 0) \ + ST(i, 0) \ + XO2(i + 1, 1) \ + ST(i + 1, 1) \ + XO2(i + 2, 2) \ + ST(i + 2, 2) \ + XO2(i + 3, 3) \ + ST(i + 3, 3) " .align 32 ;\n" " 1: ;\n" @@ -125,29 +125,29 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - ST(i,0) \ - XO3(i+1,1) \ - ST(i+1,1) \ - XO3(i+2,2) \ - ST(i+2,2) \ - XO3(i+3,3) \ - ST(i+3,3) +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ + XO3(i, 0) \ + ST(i, 0) \ + XO3(i + 1, 1) \ + ST(i + 1, 1) \ + XO3(i + 2, 2) \ + ST(i + 2, 2) \ + XO3(i + 3, 3) \ + ST(i + 3, 3) " .align 32 ;\n" " 1: ;\n" @@ -186,35 +186,35 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); + asm("" : "+r" (p4), "+r" (p5)); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - XO4(i,0) \ - ST(i,0) \ - XO4(i+1,1) \ - ST(i+1,1) \ - XO4(i+2,2) \ - ST(i+2,2) \ - XO4(i+3,3) \ - ST(i+3,3) +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ + XO3(i, 0) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ + XO4(i, 0) \ + ST(i, 0) \ + XO4(i + 1, 1) \ + ST(i + 1, 1) \ + XO4(i + 2, 2) \ + ST(i + 2, 2) \ + XO4(i + 3, 3) \ + ST(i + 3, 3) " .align 32 ;\n" " 1: ;\n" @@ -233,13 +233,13 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) - : "r" (p4), "r" (p5) + : "r" (p4), "r" (p5) : "memory"); /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); + asm("" : "=r" (p4), "=r" (p5)); kernel_fpu_end(); } @@ -259,7 +259,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( " .align 32 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -286,7 +286,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) " pxor 56(%2), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" - + " addl $64, %1 ;\n" " addl $64, %2 ;\n" " decl %0 ;\n" @@ -307,7 +307,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -342,7 +342,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, " pxor 56(%3), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" - + " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" @@ -364,7 +364,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -407,7 +407,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, " pxor 56(%4), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" - + " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" @@ -436,9 +436,9 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); + asm("" : "+r" (p4), "+r" (p5)); - __asm__ __volatile__ ( + asm volatile( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -489,7 +489,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, " pxor 56(%5), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" - + " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" @@ -505,7 +505,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); + asm("" : "=r" (p4), "=r" (p5)); kernel_fpu_end(); } @@ -531,11 +531,12 @@ static struct xor_block_template xor_block_p5_mmx = { * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) */ -#define XMMS_SAVE do { \ +#define XMMS_SAVE \ +do { \ preempt_disable(); \ cr0 = read_cr0(); \ clts(); \ - __asm__ __volatile__ ( \ + asm volatile( \ "movups %%xmm0,(%0) ;\n\t" \ "movups %%xmm1,0x10(%0) ;\n\t" \ "movups %%xmm2,0x20(%0) ;\n\t" \ @@ -543,10 +544,11 @@ static struct xor_block_template xor_block_p5_mmx = { : \ : "r" (xmm_save) \ : "memory"); \ -} while(0) +} while (0) -#define XMMS_RESTORE do { \ - __asm__ __volatile__ ( \ +#define XMMS_RESTORE \ +do { \ + asm volatile( \ "sfence ;\n\t" \ "movups (%0),%%xmm0 ;\n\t" \ "movups 0x10(%0),%%xmm1 ;\n\t" \ @@ -557,76 +559,76 @@ static struct xor_block_template xor_block_p5_mmx = { : "memory"); \ write_cr0(cr0); \ preempt_enable(); \ -} while(0) +} while (0) #define ALIGN16 __attribute__((aligned(16))) #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" -#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" -#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" +#define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" +#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" -#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" -#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" -#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" -#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" -#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" +#define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" +#define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" +#define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" +#define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" +#define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { - unsigned long lines = bytes >> 8; + unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ PF1(i) \ - PF1(i+2) \ - LD(i+2,2) \ - LD(i+3,3) \ - PF0(i+4) \ - PF0(i+6) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF1(i + 2) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" + " addl $256, %1 ;\n" + " addl $256, %2 ;\n" + " decl %0 ;\n" + " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2) : - : "memory"); + : "memory"); XMMS_RESTORE; } @@ -635,59 +637,59 @@ static void xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { - unsigned long lines = bytes >> 8; + unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ + PF1(i + 2) \ LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ - PF0(i+4) \ - PF0(i+6) \ + PF2(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" + " addl $256, %1 ;\n" + " addl $256, %2 ;\n" + " addl $256, %3 ;\n" + " decl %0 ;\n" + " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r"(p2), "+r"(p3) : - : "memory" ); + : "memory" ); XMMS_RESTORE; } @@ -696,66 +698,66 @@ static void xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { - unsigned long lines = bytes >> 8; + unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ + PF1(i + 2) \ LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ + PF2(i + 2) \ XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ PF3(i) \ - PF3(i+2) \ - PF0(i+4) \ - PF0(i+6) \ + PF3(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " addl $256, %4 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" + " addl $256, %1 ;\n" + " addl $256, %2 ;\n" + " addl $256, %3 ;\n" + " addl $256, %4 ;\n" + " decl %0 ;\n" + " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : - : "memory" ); + : "memory" ); XMMS_RESTORE; } @@ -764,7 +766,7 @@ static void xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { - unsigned long lines = bytes >> 8; + unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; @@ -776,65 +778,65 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); + asm("" : "+r" (p4), "+r" (p5)); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ + PF1(i + 2) \ LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ + PF2(i + 2) \ XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ PF3(i) \ - PF3(i+2) \ + PF3(i + 2) \ XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ PF4(i) \ - PF4(i+2) \ - PF0(i+4) \ - PF0(i+6) \ + PF4(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ XO4(i,0) \ - XO4(i+1,1) \ - XO4(i+2,2) \ - XO4(i+3,3) \ + XO4(i + 1, 1) \ + XO4(i + 2, 2) \ + XO4(i + 3, 3) \ ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " addl $256, %4 ;\n" - " addl $256, %5 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" + " addl $256, %1 ;\n" + " addl $256, %2 ;\n" + " addl $256, %3 ;\n" + " addl $256, %4 ;\n" + " addl $256, %5 ;\n" + " decl %0 ;\n" + " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : "r" (p4), "r" (p5) @@ -843,17 +845,17 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); + asm("" : "=r" (p4), "=r" (p5)); XMMS_RESTORE; } static struct xor_block_template xor_block_pIII_sse = { - .name = "pIII_sse", - .do_2 = xor_sse_2, - .do_3 = xor_sse_3, - .do_4 = xor_sse_4, - .do_5 = xor_sse_5, + .name = "pIII_sse", + .do_2 = xor_sse_2, + .do_3 = xor_sse_3, + .do_4 = xor_sse_4, + .do_5 = xor_sse_5, }; /* Also try the generic routines. */ @@ -861,21 +863,21 @@ static struct xor_block_template xor_block_pIII_sse = { #undef XOR_TRY_TEMPLATES #define XOR_TRY_TEMPLATES \ - do { \ - xor_speed(&xor_block_8regs); \ - xor_speed(&xor_block_8regs_p); \ - xor_speed(&xor_block_32regs); \ - xor_speed(&xor_block_32regs_p); \ - if (cpu_has_xmm) \ - xor_speed(&xor_block_pIII_sse); \ - if (cpu_has_mmx) { \ - xor_speed(&xor_block_pII_mmx); \ - xor_speed(&xor_block_p5_mmx); \ - } \ - } while (0) +do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + if (cpu_has_xmm) \ + xor_speed(&xor_block_pIII_sse); \ + if (cpu_has_mmx) { \ + xor_speed(&xor_block_pII_mmx); \ + xor_speed(&xor_block_p5_mmx); \ + } \ +} while (0) /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ -#define XOR_SELECT_TEMPLATE(FASTEST) \ +#define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) -- cgit v1.2.3 From 687c80540975709844699d9a96070e2cf3bad39d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:04:03 -0700 Subject: include/asm-x86/xor_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/xor_64.h | 294 ++++++++++++++++++++++++----------------------- 1 file changed, 149 insertions(+), 145 deletions(-) (limited to 'include') diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h index 1eee7fcb242..24957e39ac8 100644 --- a/include/asm-x86/xor_64.h +++ b/include/asm-x86/xor_64.h @@ -24,20 +24,23 @@ */ /* - * x86-64 changes / gcc fixes from Andi Kleen. + * x86-64 changes / gcc fixes from Andi Kleen. * Copyright 2002 Andi Kleen, SuSE Labs. * * This hasn't been optimized for the hammer yet, but there are likely * no advantages to be gotten from x86-64 here anyways. */ -typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; +typedef struct { + unsigned long a, b; +} __attribute__((aligned(16))) xmm_store_t; -/* Doesn't use gcc to save the XMM registers, because there is no easy way to +/* Doesn't use gcc to save the XMM registers, because there is no easy way to tell it to do a clts before the register saving. */ -#define XMMS_SAVE do { \ +#define XMMS_SAVE \ +do { \ preempt_disable(); \ - asm volatile ( \ + asm volatile( \ "movq %%cr0,%0 ;\n\t" \ "clts ;\n\t" \ "movups %%xmm0,(%1) ;\n\t" \ @@ -47,10 +50,11 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; : "=&r" (cr0) \ : "r" (xmm_save) \ : "memory"); \ -} while(0) +} while (0) -#define XMMS_RESTORE do { \ - asm volatile ( \ +#define XMMS_RESTORE \ +do { \ + asm volatile( \ "sfence ;\n\t" \ "movups (%1),%%xmm0 ;\n\t" \ "movups 0x10(%1),%%xmm1 ;\n\t" \ @@ -61,72 +65,72 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; : "r" (cr0), "r" (xmm_save) \ : "memory"); \ preempt_enable(); \ -} while(0) +} while (0) #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" -#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" -#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" +#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" +#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" -#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" -#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" -#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" -#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" -#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" +#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" +#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" +#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" +#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" +#define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { - unsigned int lines = bytes >> 8; + unsigned int lines = bytes >> 8; unsigned long cr0; xmm_store_t xmm_save[4]; XMMS_SAVE; - asm volatile ( + asm volatile( #undef BLOCK #define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ + LD(i, 0) \ + LD(i + 1, 1) \ PF1(i) \ - PF1(i+2) \ - LD(i+2,2) \ - LD(i+3,3) \ - PF0(i+4) \ - PF0(i+6) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF1(i + 2) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addq %[inc], %[p1] ;\n" - " addq %[inc], %[p2] ;\n" + " addq %[inc], %[p1] ;\n" + " addq %[inc], %[p2] ;\n" " decl %[cnt] ; jnz 1b" : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) - : [inc] "r" (256UL) - : "memory"); + : [inc] "r" (256UL) + : "memory"); XMMS_RESTORE; } @@ -141,52 +145,52 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + PF1(i + 2) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF2(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addq %[inc], %[p1] ;\n" - " addq %[inc], %[p2] ;\n" - " addq %[inc], %[p3] ;\n" + " addq %[inc], %[p1] ;\n" + " addq %[inc], %[p2] ;\n" + " addq %[inc], %[p3] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) : [inc] "r" (256UL) - : "memory"); + : "memory"); XMMS_RESTORE; } @@ -195,64 +199,64 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned int lines = bytes >> 8; - xmm_store_t xmm_save[4]; + xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + PF1(i + 2) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + PF2(i + 2) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ PF3(i) \ - PF3(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF3(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ + XO3(i, 0) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addq %[inc], %[p1] ;\n" - " addq %[inc], %[p2] ;\n" - " addq %[inc], %[p3] ;\n" - " addq %[inc], %[p4] ;\n" + " addq %[inc], %[p1] ;\n" + " addq %[inc], %[p2] ;\n" + " addq %[inc], %[p3] ;\n" + " addq %[inc], %[p4] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+c" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) : [inc] "r" (256UL) - : "memory" ); + : "memory" ); XMMS_RESTORE; } @@ -261,70 +265,70 @@ static void xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { - unsigned int lines = bytes >> 8; + unsigned int lines = bytes >> 8; xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + PF1(i + 2) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + PF2(i + 2) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ PF3(i) \ - PF3(i+2) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ + PF3(i + 2) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ PF4(i) \ - PF4(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - XO4(i,0) \ - XO4(i+1,1) \ - XO4(i+2,2) \ - XO4(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF4(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO3(i, 0) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ + XO4(i, 0) \ + XO4(i + 1, 1) \ + XO4(i + 2, 2) \ + XO4(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addq %[inc], %[p1] ;\n" - " addq %[inc], %[p2] ;\n" - " addq %[inc], %[p3] ;\n" - " addq %[inc], %[p4] ;\n" - " addq %[inc], %[p5] ;\n" + " addq %[inc], %[p1] ;\n" + " addq %[inc], %[p2] ;\n" + " addq %[inc], %[p3] ;\n" + " addq %[inc], %[p4] ;\n" + " addq %[inc], %[p5] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+c" (lines), - [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), + [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) : [inc] "r" (256UL) : "memory"); @@ -333,18 +337,18 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static struct xor_block_template xor_block_sse = { - .name = "generic_sse", - .do_2 = xor_sse_2, - .do_3 = xor_sse_3, - .do_4 = xor_sse_4, - .do_5 = xor_sse_5, + .name = "generic_sse", + .do_2 = xor_sse_2, + .do_3 = xor_sse_3, + .do_4 = xor_sse_4, + .do_5 = xor_sse_5, }; #undef XOR_TRY_TEMPLATES -#define XOR_TRY_TEMPLATES \ - do { \ - xor_speed(&xor_block_sse); \ - } while (0) +#define XOR_TRY_TEMPLATES \ +do { \ + xor_speed(&xor_block_sse); \ +} while (0) /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu -- cgit v1.2.3 From 7fda20f146d5d217684ffbc37c6b6c5f82c2dffd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Feb 2008 10:29:38 +0100 Subject: x86: spinlock ops are always-inlined Signed-off-by: Ingo Molnar --- include/asm-x86/spinlock.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 47dfe2607bb..bc6376f1bc5 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -78,7 +78,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) { short inc = 0x0100; @@ -99,7 +99,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) { int tmp; short new; @@ -120,7 +120,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return tmp; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock) @@ -142,7 +142,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) { int inc = 0x00010000; int tmp; @@ -165,7 +165,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) { int tmp; int new; @@ -187,7 +187,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return tmp; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock) -- cgit v1.2.3 From 43cdf5d6e0a75c1069adc8d126b97b792ff53142 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 22 Mar 2008 18:50:22 +0100 Subject: x86: pgtable, document pde bits Some of pde bits weren't documented, add the short description to them. Signed-off-by: Jiri Slaby Cc: H. Peter Anvin Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 2ce76507046..f1d9f4a03f6 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -4,13 +4,13 @@ #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) #define FIRST_USER_ADDRESS 0 -#define _PAGE_BIT_PRESENT 0 -#define _PAGE_BIT_RW 1 -#define _PAGE_BIT_USER 2 -#define _PAGE_BIT_PWT 3 -#define _PAGE_BIT_PCD 4 -#define _PAGE_BIT_ACCESSED 5 -#define _PAGE_BIT_DIRTY 6 +#define _PAGE_BIT_PRESENT 0 /* is present */ +#define _PAGE_BIT_RW 1 /* writeable */ +#define _PAGE_BIT_USER 2 /* userspace addressable */ +#define _PAGE_BIT_PWT 3 /* page write through */ +#define _PAGE_BIT_PCD 4 /* page cache disabled */ +#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ +#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ #define _PAGE_BIT_FILE 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ -- cgit v1.2.3 From 5524ea320d80e3ac6aeeec44216660831c76da08 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 11 Mar 2008 02:23:20 +0100 Subject: x86: don't set up early exception handlers for external interrupts All of early setup runs with interrupts disabled, so there is no need to set up early exception handlers for vectors >= 32 This saves some minor text size. Signed-off-by: Andi Kleen Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/segment.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h index 23f0535fec6..ed5131dd7d9 100644 --- a/include/asm-x86/segment.h +++ b/include/asm-x86/segment.h @@ -191,13 +191,14 @@ #define SEGMENT_TI_MASK 0x4 #define IDT_ENTRIES 256 +#define NUM_EXCEPTION_VECTORS 32 #define GDT_SIZE (GDT_ENTRIES * 8) #define GDT_ENTRY_TLS_ENTRIES 3 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) #ifdef __KERNEL__ #ifndef __ASSEMBLY__ -extern const char early_idt_handlers[IDT_ENTRIES][10]; +extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; #endif #endif -- cgit v1.2.3 From 7d1116a92d709c22e7db910724c9fcd2001b0499 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:27 +0100 Subject: x86: implement true end_pfn_mapped for 32bit Even on 32bit 2MB pages can map more memory than is in the true max_low_pfn if end_pfn is not highmem and not aligned to 2MB. Add a end_pfn_map similar to x86-64 that accounts for this fact. This is important for code that really needs to know about all mapping aliases. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/page.h | 4 +++- include/asm-x86/page_64.h | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index a05b2896492..b734939916c 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -36,7 +36,7 @@ #define max_pfn_mapped end_pfn_map #else #include -#define max_pfn_mapped max_low_pfn +#define max_pfn_mapped end_pfn_map #endif /* CONFIG_X86_64 */ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) @@ -50,6 +50,8 @@ extern int page_is_ram(unsigned long pagenr); +extern unsigned long end_pfn_map; + struct page; static inline void clear_user_page(void *page, unsigned long vaddr, diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index f156778f707..54d5db63485 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -59,7 +59,6 @@ void clear_page(void *page); void copy_page(void *to, void *from); extern unsigned long end_pfn; -extern unsigned long end_pfn_map; extern unsigned long phys_base; extern unsigned long __phys_addr(unsigned long); -- cgit v1.2.3 From 67794292c8615b05f46419ba8d4fd99e7c9a5db9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 21 Mar 2008 21:27:10 +0100 Subject: x86: replace the now useless max_pfn_mapped define Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/page.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index b734939916c..6724a4bc6b7 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -33,10 +33,8 @@ #ifdef CONFIG_X86_64 #include -#define max_pfn_mapped end_pfn_map #else #include -#define max_pfn_mapped end_pfn_map #endif /* CONFIG_X86_64 */ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) @@ -50,7 +48,7 @@ extern int page_is_ram(unsigned long pagenr); -extern unsigned long end_pfn_map; +extern unsigned long max_pfn_mapped; struct page; -- cgit v1.2.3 From cc6150321903ca4c3bc9d53b0cdafb05d77d64d0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:28 +0100 Subject: x86: account overlapped mappings in max_pfn_mapped When end_pfn is not aligned to 2MB (or 1GB) then the kernel might map more memory than end_pfn. Account this in max_pfn_mapped. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/page_64.h | 3 +++ include/asm-x86/proto.h | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index 54d5db63485..6ea72859c49 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -80,6 +80,9 @@ typedef struct { pteval_t pte; } pte_t; #define vmemmap ((struct page *)VMEMMAP_START) +extern unsigned long init_memory_mapping(unsigned long start, + unsigned long end); + #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_FLATMEM diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index 9da46af3b0e..1e17bcce450 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h @@ -7,8 +7,6 @@ extern void early_idt_handler(void); -extern void init_memory_mapping(unsigned long start, unsigned long end); - extern void system_call(void); extern void syscall_init(void); -- cgit v1.2.3 From c9caa02c529d5e113e40cbc77254558fcdfa4215 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:29 +0100 Subject: x86: add set_memory_4k to pageattr.c Add a new function to force split large pages into 4k pages. This is needed for some followup optimizations. I had to add a new field to cpa_data to pass down the information that try_preserve_large_page should not run. Right now no set_page_4k() because I didn't need it and all the specialized users I have in mind would be more comfortable with pure addresses. I also didn't export it because it's unlikely external code needs it. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/cacheflush.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 7ab5b520b7b..cb1d6f8fd00 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -45,6 +45,7 @@ int set_memory_nx(unsigned long addr, int numpages); int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); +int set_memory_4k(unsigned long addr, int numpages); void clflush_cache_range(void *addr, unsigned int size); -- cgit v1.2.3 From 1de87bd40e119d26533b5135677901990390bfa9 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 22 Mar 2008 10:59:28 +0100 Subject: x86: re-add rdmsrl_safe RDMSR for 64bit values with exception handling. Makes it easier to deal with 64bit valued MSRs. The old 64bit code base had that too as checking_rdmsrl(), but it got dropped somehow. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/msr.h | 8 ++++++++ include/asm-x86/paravirt.h | 7 +++++++ 2 files changed, 15 insertions(+) (limited to 'include') diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 2c698a2e81f..3707650a169 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -150,6 +150,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) __err; \ }) +static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) +{ + int err; + + *p = native_read_msr_safe(msr, &err); + return err; +} + #define rdtscl(low) \ ((low) = (u32)native_read_tsc()) diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 0c23f7940bc..3d419398499 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -693,6 +693,13 @@ do { \ _err; \ }) +static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) +{ + int err; + + *p = paravirt_read_msr(msr, &err); + return err; +} static inline u64 paravirt_read_tsc(void) { -- cgit v1.2.3 From 8346ea17aa20e9864b0f7dc03d55f3cd5620b8c1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:32 +0100 Subject: x86: split large page mapping for AMD TSEG On AMD SMM protected memory is part of the address map, but handled internally like an MTRR. That leads to large pages getting split internally which has some performance implications. Check for the AMD TSEG MSR and split the large page mapping on that area explicitely if it is part of the direct mapping. There is also SMM ASEG, but it is in the first 1MB and already covered by the earlier split first page patch. Idea for this came from an earlier patch by Andreas Herrmann On a RevF dual Socket Opteron system kernbench shows a clear improvement from this: (together with the earlier patches in this series, especially the split first 2MB patch) [lower is better] no split stddev split stddev delta Elapsed Time 87.146 (0.727516) 84.296 (1.09098) -3.2% User Time 274.537 (4.05226) 273.692 (3.34344) -0.3% System Time 34.907 (0.42492) 34.508 (0.26832) -1.1% Percent CPU 322.5 (38.3007) 326.5 (44.5128) +1.2% => About 3.2% improvement in elapsed time for kernbench. With GB pages on AMD Fam1h the impact of splitting is much higher of course, since it would split two full GB pages (together with the first 1MB split patch) instead of two 2MB pages. I could not benchmark a clear difference in kernbench on gbpages, so I kept it disabled for that case That was only limited benchmarking of course, so if someone was interested in running more tests for the gbpages case that could be revisited (contributions welcome) I didn't bother implementing this for 32bit because it is very unlikely the 32bit lowmem mapping overlaps into the TSEG near 4GB and the 2MB low split is already handled for both. [ mingo@elte.hu: do it on gbpages kernels too, there's no clear reason why it shouldnt help there. ] Signed-off-by: Andi Kleen Acked-by: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/msr-index.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index af4e07f661b..09413ad39d3 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h @@ -112,6 +112,7 @@ #define MSR_K8_SYSCFG 0xc0010010 #define MSR_K8_HWCR 0xc0010015 #define MSR_K8_ENABLE_C1E 0xc0010055 +#define MSR_K8_TSEG_ADDR 0xc0010112 #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ -- cgit v1.2.3 From 5af5573ee06c361378e22a9dd71dae0320e841f7 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 25 Mar 2008 13:28:56 -0300 Subject: x86: move ipi definitions to mach_ipi.h take them out of the x86_64-only asm/mach_apic.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/mach_ipi.h | 10 ++++++++++ include/asm-x86/mach_apic.h | 3 --- 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h index 0dba244c86d..be323364e68 100644 --- a/include/asm-x86/mach-default/mach_ipi.h +++ b/include/asm-x86/mach-default/mach_ipi.h @@ -9,10 +9,15 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector); extern int no_broadcast; +#ifdef CONFIG_X86_64 +#include +#define send_IPI_mask (genapic->send_IPI_mask) +#else static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_bitmask(mask, vector); } +#endif static inline void __local_send_IPI_allbutself(int vector) { @@ -33,6 +38,10 @@ static inline void __local_send_IPI_all(int vector) __send_IPI_shortcut(APIC_DEST_ALLINC, vector); } +#ifdef CONFIG_X86_64 +#define send_IPI_allbutself (genapic->send_IPI_allbutself) +#define send_IPI_all (genapic->send_IPI_all) +#else static inline void send_IPI_allbutself(int vector) { /* @@ -50,5 +59,6 @@ static inline void send_IPI_all(int vector) { __local_send_IPI_all(vector); } +#endif #endif /* __ASM_MACH_IPI_H */ diff --git a/include/asm-x86/mach_apic.h b/include/asm-x86/mach_apic.h index 7b7115a0c1c..1bc68c0c0cd 100644 --- a/include/asm-x86/mach_apic.h +++ b/include/asm-x86/mach_apic.h @@ -20,9 +20,6 @@ #define vector_allocation_domain (genapic->vector_allocation_domain) #define apic_id_registered (genapic->apic_id_registered) #define init_apic_ldr (genapic->init_apic_ldr) -#define send_IPI_mask (genapic->send_IPI_mask) -#define send_IPI_allbutself (genapic->send_IPI_allbutself) -#define send_IPI_all (genapic->send_IPI_all) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) #define phys_pkg_id (genapic->phys_pkg_id) -- cgit v1.2.3 From dd46e3ca73d136aa7f9f1813e4cbb6934c3611cc Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 25 Mar 2008 18:10:46 -0300 Subject: x86: move apic declarations to mach_apic.h take them out of the x86_64-specific asm/mach_apic.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/mach_apic.h | 83 ++++++++++++++++++-------------- include/asm-x86/mach_apic.h | 26 ---------- 2 files changed, 46 insertions(+), 63 deletions(-) delete mode 100644 include/asm-x86/mach_apic.h (limited to 'include') diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 13900e8cc1a..1f56e7d5bfd 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -1,6 +1,8 @@ #ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H +#ifdef CONFIG_X86_LOCAL_APIC + #include #include @@ -14,24 +16,25 @@ static inline cpumask_t target_cpus(void) return cpumask_of_cpu(0); #endif } -#define TARGET_CPUS (target_cpus()) #define NO_BALANCE_IRQ (0) #define esr_disable (0) +#ifdef CONFIG_X86_64 +#include +#define INT_DELIVERY_MODE (genapic->int_delivery_mode) +#define INT_DEST_MODE (genapic->int_dest_mode) +#define TARGET_CPUS (genapic->target_cpus()) +#define apic_id_registered (genapic->apic_id_registered) +#define init_apic_ldr (genapic->init_apic_ldr) +#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) +#define phys_pkg_id (genapic->phys_pkg_id) +#define vector_allocation_domain (genapic->vector_allocation_domain) +extern void setup_apic_routing(void); +#else #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} - -static inline unsigned long check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - +#define TARGET_CPUS (target_cpus()) /* * Set up the logical destination ID. * @@ -49,32 +52,52 @@ static inline void init_apic_ldr(void) apic_write_around(APIC_LDR, val); } -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +static inline int apic_id_registered(void) { - return phys_map; + return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); +} + +static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +{ + return cpus_addr(cpumask)[0]; +} + +static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; } -#ifdef CONFIG_X86_64 -extern void setup_apic_routing(void); -#else static inline void setup_apic_routing(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Flat", nr_ioapics); } -#endif -static inline int multi_timer_check(int apic, int irq) +static inline int apicid_to_node(int logical_apicid) { return 0; } +#endif -#ifdef CONFIG_X86_32 -static inline int apicid_to_node(int logical_apicid) +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} + +static inline unsigned long check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +{ + return phys_map; +} + +static inline int multi_timer_check(int apic, int irq) { return 0; } -#endif /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) @@ -109,23 +132,9 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); } -static inline int apic_id_registered(void) -{ - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - return cpus_addr(cpumask)[0]; -} - static inline void enable_apic_mode(void) { } -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - +#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-x86/mach_apic.h b/include/asm-x86/mach_apic.h deleted file mode 100644 index 1bc68c0c0cd..00000000000 --- a/include/asm-x86/mach_apic.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -/* - * Copyright 2004 James Cleverdon, IBM. - * Subject to the GNU Public License, v.2 - * - * Generic APIC sub-arch defines. - * - * Hacked for x86-64 by James Cleverdon from i386 architecture code by - * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and - * James Cleverdon. - */ - -#include - -#define INT_DELIVERY_MODE (genapic->int_delivery_mode) -#define INT_DEST_MODE (genapic->int_dest_mode) -#define TARGET_CPUS (genapic->target_cpus()) -#define vector_allocation_domain (genapic->vector_allocation_domain) -#define apic_id_registered (genapic->apic_id_registered) -#define init_apic_ldr (genapic->init_apic_ldr) -#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) -#define phys_pkg_id (genapic->phys_pkg_id) - -#endif /* __ASM_MACH_APIC_H */ -- cgit v1.2.3 From 537e33136443bcd53ee13bc32a8f0fa46b1f3fdb Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Wed, 26 Mar 2008 02:16:15 +0100 Subject: x86 floppy: kill off the 'register' keyword from header When compilers became generally better at optimizing code than humans, the register keyword became mostly useless. For the floppy driver it certainly is since it's so slow compared to the rest of the system that optimizing access to a single variable or two isn't going to make any real difference So let's just leave it to the compiler - it'll do a better job anyway. This patch does away with a few register keywords in the x86 floppy driver. Signed-off-by: Jesper Juhl Signed-off-by: Ingo Molnar --- include/asm-x86/floppy.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h index 438b3033a25..dbe82a5c5ea 100644 --- a/include/asm-x86/floppy.h +++ b/include/asm-x86/floppy.h @@ -53,7 +53,7 @@ static int doing_pdma; static irqreturn_t floppy_hardint(int irq, void *dev_id) { - register unsigned char st; + unsigned char st; #undef TRACE_FLPY_INT @@ -71,8 +71,8 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) #endif { - register int lcount; - register char *lptr; + int lcount; + char *lptr; st = 1; for (lcount = virtual_dma_count, lptr = virtual_dma_addr; -- cgit v1.2.3 From aa040b2f0693695ae393cd9b8a93055952dbf76f Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sat, 22 Mar 2008 13:27:38 -0700 Subject: x86: simplify sync_test_bit(), improve Using a naked parameterless macro could lead to other tokens being unexpectedly replaced. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- include/asm-x86/sync_bitops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index f1078a5e4ed..b47a1d0b8a8 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -123,7 +123,7 @@ static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) return oldbit; } -#define sync_test_bit test_bit +#define sync_test_bit(nr, addr) test_bit(nr, addr) #undef ADDR -- cgit v1.2.3 From 7219bebd72726c13c1eaaa3ade0e829e998fb3b1 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Thu, 17 Apr 2008 17:41:31 +0200 Subject: x86: add comments to describe the new api's in cacheflush.h The new cacheflush.h API's didn't have any comments describing how they're to be used yet and the conventions around these functions. This patch adds comments to this effect; in order for that to be a logical series, some prototypes had to move around. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar --- include/asm-x86/cacheflush.h | 63 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index cb1d6f8fd00..f4c0ab50d2c 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -24,15 +24,34 @@ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy((dst), (src), (len)) -int __deprecated_for_modules change_page_attr(struct page *page, int numpages, - pgprot_t prot); -int set_pages_uc(struct page *page, int numpages); -int set_pages_wb(struct page *page, int numpages); -int set_pages_x(struct page *page, int numpages); -int set_pages_nx(struct page *page, int numpages); -int set_pages_ro(struct page *page, int numpages); -int set_pages_rw(struct page *page, int numpages); +/* + * The set_memory_* API can be used to change various attributes of a virtual + * address range. The attributes include: + * Cachability : UnCached, WriteCombining, WriteBack + * Executability : eXeutable, NoteXecutable + * Read/Write : ReadOnly, ReadWrite + * Presence : NotPresent + * + * Within a catagory, the attributes are mutually exclusive. + * + * The implementation of this API will take care of various aspects that + * are associated with changing such attributes, such as: + * - Flushing TLBs + * - Flushing CPU caches + * - Making sure aliases of the memory behind the mapping don't violate + * coherency rules as defined by the CPU in the system. + * + * What this API does not do: + * - Provide exclusion between various callers - including callers that + * operation on other mappings of the same physical page + * - Restore default attributes when a page is freed + * - Guarantee that mappings other than the requested one are + * in any state, other than that these do not violate rules for + * the CPU you have. Do not depend on any effects on other mappings, + * CPUs other than the one you have may have more relaxed rules. + * The caller is required to take care of these. + */ int _set_memory_uc(unsigned long addr, int numpages); int _set_memory_wc(unsigned long addr, int numpages); @@ -47,6 +66,34 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); int set_memory_4k(unsigned long addr, int numpages); +/* + * For legacy compatibility with the old APIs, a few functions + * are provided that work on a "struct page". + * These functions operate ONLY on the 1:1 kernel mapping of the + * memory that the struct page represents, and internally just + * call the set_memory_* function. See the description of the + * set_memory_* function for more details on conventions. + * + * These APIs should be considered *deprecated* and are likely going to + * be removed in the future. + * The reason for this is the implicit operation on the 1:1 mapping only, + * making this not a generally useful API. + * + * Specifically, many users of the old APIs had a virtual address, + * called virt_to_page() or vmalloc_to_page() on that address to + * get a struct page* that the old API required. + * To convert these cases, use set_memory_*() on the original + * virtual address, do not use these functions. + */ + +int set_pages_uc(struct page *page, int numpages); +int set_pages_wb(struct page *page, int numpages); +int set_pages_x(struct page *page, int numpages); +int set_pages_nx(struct page *page, int numpages); +int set_pages_ro(struct page *page, int numpages); +int set_pages_rw(struct page *page, int numpages); + + void clflush_cache_range(void *addr, unsigned int size); void cpa_init(void); -- cgit v1.2.3 From 903dcb5a1bd0ef2b09d756f646e367cd12659b6f Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:22 +0300 Subject: x86: move generic_processor_info to apic_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index eccbc581ec8..7ee54d396d6 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -43,6 +43,7 @@ extern unsigned long mp_lapic_addr; extern void find_smp_config(void); extern void get_smp_config(void); +void __cpuinit generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI extern void mp_register_lapic(u8 id, u8 enabled); extern void mp_register_lapic_address(u64 address); -- cgit v1.2.3 From 86c9835b46605fb29a3c30c6cc344d9df49e54a3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 28 Mar 2008 11:59:57 +0100 Subject: x86: mpparse, move generic processor info to apic_32.c fix Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 7ee54d396d6..31bac12a97d 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -1,6 +1,8 @@ #ifndef _AM_X86_MPSPEC_H #define _AM_X86_MPSPEC_H +#include + #include #ifdef CONFIG_X86_32 -- cgit v1.2.3 From fe874b3edff43f9a74d9903eb3710e5e0511faf1 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:05:56 -0300 Subject: x86: surround hard_smp_processor_id in APIC_DEFINITION APIC_DEFINITION is not defined in x86_64, so in practice, we keep our old code here. But as a nice side effect, the code is now equal to smp_32.h. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index c53a011bb91..25206333476 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -52,12 +52,16 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } -#include +# ifdef APIC_DEFINITION +extern int hard_smp_processor_id(void); +# else +# include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); } +# endif /* APIC_DEFINITION */ #endif -- cgit v1.2.3 From 2ba95bcbe68d692f549fb10809f15681a25ff6fb Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:05:57 -0300 Subject: x86: provide bogus hard_smp_processor_id We provide a bogus macro for x86_64 in case CONFIG_X86_LOCAL_APIC is not set. It will always be set for x86_64, so the effect is just to make the code equal to i386. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 25206333476..c46585e09ea 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -46,6 +46,8 @@ static inline int num_booting_cpus(void) #define safe_smp_processor_id() smp_processor_id() +#ifdef CONFIG_X86_LOCAL_APIC + static inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ @@ -63,5 +65,13 @@ static inline int hard_smp_processor_id(void) } # endif /* APIC_DEFINITION */ +#else /* CONFIG_X86_LOCAL_APIC */ + +# ifndef CONFIG_SMP +# define hard_smp_processor_id() 0 +# endif + +#endif /* CONFIG_X86_LOCAL_APIC */ + #endif -- cgit v1.2.3 From 1b00084386878f25c2c591ad19cb625880d4089d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:05:58 -0300 Subject: x86: merge hard/logical_smp_processor_id The code is now the same between i386 and x86_64. We already know what happens when it reaches this point: They go away from the arch-specific headers, and suddenly appears in the common header. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 27 +++++++++++++++++++++++++++ include/asm-x86/smp_32.h | 27 --------------------------- include/asm-x86/smp_64.h | 27 --------------------------- 3 files changed, 27 insertions(+), 54 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index ef26911dc22..e5534f19c31 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -123,6 +123,33 @@ void smp_store_cpu_info(int id); # include "smp_64.h" #endif +#ifdef CONFIG_X86_LOCAL_APIC + +static inline int logical_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); +} + +# ifdef APIC_DEFINITION +extern int hard_smp_processor_id(void); +# else +# include +static inline int hard_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); +} +# endif /* APIC_DEFINITION */ + +#else /* CONFIG_X86_LOCAL_APIC */ + +# ifndef CONFIG_SMP +# define hard_smp_processor_id() 0 +# endif + +#endif /* CONFIG_X86_LOCAL_APIC */ + #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); extern void cpu_uninit(void); diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index cb3ada2fedb..53432dbd542 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -42,32 +42,5 @@ static inline int num_booting_cpus(void) #define safe_smp_processor_id() 0 #endif /* !CONFIG_SMP */ -#ifdef CONFIG_X86_LOCAL_APIC - -static inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); -} - -# ifdef APIC_DEFINITION -extern int hard_smp_processor_id(void); -# else -# include -static inline int hard_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); -} -# endif /* APIC_DEFINITION */ - -#else /* CONFIG_X86_LOCAL_APIC */ - -# ifndef CONFIG_SMP -# define hard_smp_processor_id() 0 -# endif - -#endif /* CONFIG_X86_LOCAL_APIC */ - #endif /* !ASSEMBLY */ #endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index c46585e09ea..015d36e29ad 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -46,32 +46,5 @@ static inline int num_booting_cpus(void) #define safe_smp_processor_id() smp_processor_id() -#ifdef CONFIG_X86_LOCAL_APIC - -static inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); -} - -# ifdef APIC_DEFINITION -extern int hard_smp_processor_id(void); -# else -# include -static inline int hard_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); -} -# endif /* APIC_DEFINITION */ - -#else /* CONFIG_X86_LOCAL_APIC */ - -# ifndef CONFIG_SMP -# define hard_smp_processor_id() 0 -# endif - -#endif /* CONFIG_X86_LOCAL_APIC */ - #endif -- cgit v1.2.3 From c1fa6c977eb978e1d09867475ec59c9a5799127f Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:05:59 -0300 Subject: x86: surround apic headers in apic definitions Although those constants are always defined in x86_64, and will have the effect of just including the headers in the very way we did before, I'm doing this in a separate patch to be conservative and avoid surprises. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 015d36e29ad..b83151d7388 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -7,9 +7,13 @@ /* * We need the APIC definitions automatically as part of 'smp.h' */ -#include -#include -#include +#ifdef CONFIG_X86_LOCAL_APIC +# include +# include +# ifdef CONFIG_X86_IO_APIC +# include +# endif +#endif #include #include -- cgit v1.2.3 From b23dab08fa37b302a8980e4cf925f2cb94288538 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:00 -0300 Subject: x86: merge includes in smp.h move all include directives from smp_{32,64}.h to smp.h. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 13 +++++++++++++ include/asm-x86/smp_32.h | 13 ------------- include/asm-x86/smp_64.h | 16 ---------------- 3 files changed, 13 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index e5534f19c31..21472cea3d6 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -5,6 +5,19 @@ #include #include +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ +#ifdef CONFIG_X86_LOCAL_APIC +# include +# include +# ifdef CONFIG_X86_IO_APIC +# include +# endif +#endif +#include +#include + extern cpumask_t cpu_callout_map; extern int smp_num_siblings; diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 53432dbd542..694d3245a88 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -2,19 +2,6 @@ #define __ASM_SMP_H #ifndef __ASSEMBLY__ -#include -#include - -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#ifdef CONFIG_X86_LOCAL_APIC -# include -# include -# ifdef CONFIG_X86_IO_APIC -# include -# endif -#endif extern cpumask_t cpu_callin_map; diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index b83151d7388..eead92e30b2 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -1,22 +1,6 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H -#include -#include - -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#ifdef CONFIG_X86_LOCAL_APIC -# include -# include -# ifdef CONFIG_X86_IO_APIC -# include -# endif -#endif -#include -#include - extern cpumask_t cpu_initialized; extern cpumask_t cpu_callin_map; -- cgit v1.2.3 From 24e8ecffa84dd560e0d4d6fcaeca6950805854e7 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:01 -0300 Subject: x86: split safe_smp_processor_id This implementation in x86_64 is clean and consistent, but we sacrifice it for the sake of being equal to i386 (since the other way around would be harder). Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index eead92e30b2..8ea49529f32 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -27,12 +27,12 @@ static inline int num_booting_cpus(void) return cpus_weight(cpu_callout_map); } +#define safe_smp_processor_id() smp_processor_id() #else /* CONFIG_SMP */ #define stack_smp_processor_id() 0 - +#define safe_smp_processor_id() 0 #endif /* !CONFIG_SMP */ -#define safe_smp_processor_id() smp_processor_id() #endif -- cgit v1.2.3 From a9c057c1d1b1080a01004ecac54308365e167b83 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:02 -0300 Subject: x86: merge SMP definitions of smp.h we merge everything that is inside CONFIG_SMP to smp.h. They differ a little bit, so we use CONFIG_X86_32_SMP and CONFIG_X86_64_SMP as markers. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 33 ++++++++++++++++++++++++++++++++- include/asm-x86/smp_32.h | 21 --------------------- include/asm-x86/smp_64.h | 27 --------------------------- 3 files changed, 32 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 21472cea3d6..57b3d86dd9e 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -126,8 +126,39 @@ extern unsigned long setup_trampoline(void); void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -#else + +/* We don't mark CPUs online until __cpu_up(), so we need another measure */ +static inline int num_booting_cpus(void) +{ + return cpus_weight(cpu_callout_map); +} +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_X86_32_SMP +/* + * This function is needed by all SMP systems. It must _always_ be valid + * from the initial startup. We map APIC_BASE very early in page_setup(), + * so this is correct in the x86 case. + */ +DECLARE_PER_CPU(int, cpu_number); +#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) +extern int safe_smp_processor_id(void); + +#elif defined(CONFIG_X86_64_SMP) +#define raw_smp_processor_id() read_pda(cpunumber) + +#define stack_smp_processor_id() \ +({ \ + struct thread_info *ti; \ + __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ + ti->cpu; \ +}) +#define safe_smp_processor_id() smp_processor_id() + +#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid +#define safe_smp_processor_id() 0 +#define stack_smp_processor_id() 0 #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 694d3245a88..d9ae5ac93df 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -8,26 +8,5 @@ extern cpumask_t cpu_callin_map; extern void (*mtrr_hook)(void); extern void zap_low_mappings(void); -#ifdef CONFIG_SMP -/* - * This function is needed by all SMP systems. It must _always_ be valid - * from the initial startup. We map APIC_BASE very early in page_setup(), - * so this is correct in the x86 case. - */ -DECLARE_PER_CPU(int, cpu_number); -#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) - -extern int safe_smp_processor_id(void); - -/* We don't mark CPUs online until __cpu_up(), so we need another measure */ -static inline int num_booting_cpus(void) -{ - return cpus_weight(cpu_callout_map); -} - -#else /* CONFIG_SMP */ -#define safe_smp_processor_id() 0 -#endif /* !CONFIG_SMP */ - #endif /* !ASSEMBLY */ #endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 8ea49529f32..058f4139979 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -7,32 +7,5 @@ extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -#ifdef CONFIG_SMP - -#define raw_smp_processor_id() read_pda(cpunumber) - -#define stack_smp_processor_id() \ -({ \ - struct thread_info *ti; \ - asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ - ti->cpu; \ -}) - -/* - * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies - * scheduling and IPI sending and compresses data structures. - */ -static inline int num_booting_cpus(void) -{ - return cpus_weight(cpu_callout_map); -} - -#define safe_smp_processor_id() smp_processor_id() -#else /* CONFIG_SMP */ -#define stack_smp_processor_id() 0 -#define safe_smp_processor_id() 0 -#endif /* !CONFIG_SMP */ - - #endif -- cgit v1.2.3 From 8be9ac850564a409c1238cd5f53776c340aea4dc Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:04 -0300 Subject: x86: merge smp_32.h and smp_64.h into smp.h Merge what's left from smp_32.h and smp_64.h into smp.h By now, they're basically extern definitions. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 11 +++++------ include/asm-x86/smp_32.h | 12 ------------ include/asm-x86/smp_64.h | 11 ----------- 3 files changed, 5 insertions(+), 29 deletions(-) delete mode 100644 include/asm-x86/smp_32.h delete mode 100644 include/asm-x86/smp_64.h (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 57b3d86dd9e..bcbd25cbd86 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -19,6 +19,11 @@ #include extern cpumask_t cpu_callout_map; +extern cpumask_t cpu_initialized; +extern cpumask_t cpu_callin_map; + +extern void (*mtrr_hook)(void); +extern void zap_low_mappings(void); extern int smp_num_siblings; extern unsigned int num_processors; @@ -161,12 +166,6 @@ extern int safe_smp_processor_id(void); #define stack_smp_processor_id() 0 #endif -#ifdef CONFIG_X86_32 -# include "smp_32.h" -#else -# include "smp_64.h" -#endif - #ifdef CONFIG_X86_LOCAL_APIC static inline int logical_smp_processor_id(void) diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h deleted file mode 100644 index d9ae5ac93df..00000000000 --- a/include/asm-x86/smp_32.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - -#ifndef __ASSEMBLY__ - -extern cpumask_t cpu_callin_map; - -extern void (*mtrr_hook)(void); -extern void zap_low_mappings(void); - -#endif /* !ASSEMBLY */ -#endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h deleted file mode 100644 index 058f4139979..00000000000 --- a/include/asm-x86/smp_64.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - -extern cpumask_t cpu_initialized; -extern cpumask_t cpu_callin_map; - -extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait); - -#endif - -- cgit v1.2.3 From fb8e8375394e1156a5a1e7ba53504b141d2365a8 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 27 Mar 2008 17:28:39 -0700 Subject: x86: sparsemem: reduce i386 PAE section size A 1G section size makes memory hotplug too coarse in a virtual environment. Retuce it by a factor of 2 to 512M. I would have liked to make it smaller, but it runs out of reserved flags in the page flags. Signed-off-by: Jeremy Fitzhardinge Cc: KAMEZAWA Hiroyuki Cc: Yasunori Goto Cc: Christoph Lameter Cc: Dave Hansen Signed-off-by: Ingo Molnar --- include/asm-x86/sparsemem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index fa684f353aa..9bd48b0a534 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h @@ -16,7 +16,7 @@ #ifdef CONFIG_X86_32 # ifdef CONFIG_X86_PAE -# define SECTION_SIZE_BITS 30 +# define SECTION_SIZE_BITS 29 # define MAX_PHYSADDR_BITS 36 # define MAX_PHYSMEM_BITS 36 # else -- cgit v1.2.3 From 6093015db2bd9e70cf20cdd23be1a50733baafdd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 30 Mar 2008 11:45:23 +0200 Subject: x86: cleanup replace most vm86 flags with flags from processor-flags.h, fix - fix build error - fix CONFIG_HEADERS_CHECK error Signed-off-by: Ingo Molnar --- include/asm-x86/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild index 3b8160a2b47..1e3554596f7 100644 --- a/include/asm-x86/Kbuild +++ b/include/asm-x86/Kbuild @@ -10,6 +10,7 @@ header-y += prctl.h header-y += ptrace-abi.h header-y += sigcontext32.h header-y += ucontext.h +header-y += processor-flags.h unifdef-y += e820.h unifdef-y += ist.h -- cgit v1.2.3 From 6b6891f9c545ccd45d6d8ddfd33ce27c22c271a7 Mon Sep 17 00:00:00 2001 From: "gorcunov@gmail.com" Date: Fri, 28 Mar 2008 17:56:57 +0300 Subject: x86: cleanup - rename VM_MASK to X86_VM_MASK This patch renames VM_MASK to X86_VM_MASK (which in turn defined as alias to X86_EFLAGS_VM) to better distinguish from virtual memory flags. We can't just use X86_EFLAGS_VM instead because it is also used for conditional compilation Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- include/asm-x86/ptrace.h | 4 ++-- include/asm-x86/vm86.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index e779f2b26b3..24ec061566c 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -170,7 +170,7 @@ static inline int user_mode(struct pt_regs *regs) static inline int user_mode_vm(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & VM_MASK)) >= + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL; #else return user_mode(regs); @@ -180,7 +180,7 @@ static inline int user_mode_vm(struct pt_regs *regs) static inline int v8086_mode(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return (regs->flags & VM_MASK); + return (regs->flags & X86_VM_MASK); #else return 0; /* No V86 mode support in long mode */ #endif diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index a2be241ed03..f5f3dc479c3 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h @@ -17,9 +17,9 @@ #define IOPL_MASK 0x00003000 #define NT_MASK 0x00004000 #ifdef CONFIG_VM86 -#define VM_MASK 0x00020000 +#define X86_VM_MASK X86_EFLAGS_VM #else -#define VM_MASK 0 /* ignored */ +#define X86_VM_MASK 0 /* No VM86 support */ #endif #define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 /* virtual interrupt flag */ -- cgit v1.2.3 From a5c15d419d4b68535222b51f9054dd08d5e67470 Mon Sep 17 00:00:00 2001 From: "gorcunov@gmail.com" Date: Fri, 28 Mar 2008 17:56:56 +0300 Subject: x86: replace most VM86 flags with flags from processor-flags.h Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- include/asm-x86/vm86.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index f5f3dc479c3..074b357146d 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h @@ -12,19 +12,13 @@ * Linus */ -#define TF_MASK 0x00000100 -#define IF_MASK 0x00000200 -#define IOPL_MASK 0x00003000 -#define NT_MASK 0x00004000 +#include + #ifdef CONFIG_VM86 #define X86_VM_MASK X86_EFLAGS_VM #else #define X86_VM_MASK 0 /* No VM86 support */ #endif -#define AC_MASK 0x00040000 -#define VIF_MASK 0x00080000 /* virtual interrupt flag */ -#define VIP_MASK 0x00100000 /* virtual interrupt pending */ -#define ID_MASK 0x00200000 #define BIOSSEG 0x0f000 -- cgit v1.2.3 From 05f2d12c3563dea8c81b301f9f3cf7919af23b13 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:02 -0500 Subject: x86: change GET_APIC_ID() from an inline function to an out-of-line function Introduce a function to read the local APIC_ID. This change is in preparation for additional changes to the APICID functions that will come in a later patch. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/mach_apic.h | 2 +- include/asm-x86/mach-es7000/mach_apic.h | 2 +- include/asm-x86/mach-visws/mach_apic.h | 2 +- include/asm-x86/smp.h | 7 ++++++- 4 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 1f56e7d5bfd..14217a970c5 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -54,7 +54,7 @@ static inline void init_apic_ldr(void) static inline int apic_id_registered(void) { - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); + return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index 0137b6e142c..fbc8ad256f5 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h @@ -141,7 +141,7 @@ static inline void setup_portio_remap(void) extern unsigned int boot_cpu_physical_apicid; static inline int check_phys_apicid_present(int cpu_physical_apicid) { - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); return (1); } diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h index efac6f0d139..a9ef33a8a99 100644 --- a/include/asm-x86/mach-visws/mach_apic.h +++ b/include/asm-x86/mach-visws/mach_apic.h @@ -23,7 +23,7 @@ static inline int apic_id_registered(void) { - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); + return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); } /* diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index bcbd25cbd86..c0d693ca435 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -174,6 +174,11 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } +static inline unsigned int read_apic_id(void) +{ + return *(u32 *)(APIC_BASE + APIC_ID); +} + # ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); # else @@ -181,7 +186,7 @@ extern int hard_smp_processor_id(void); static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); + return GET_APIC_ID(read_apic_id()); } # endif /* APIC_DEFINITION */ -- cgit v1.2.3 From ae261868658773538ddda829c50224e5851c2342 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:06 -0500 Subject: x86: add functions to determine if platform is a UV platform Add functions that can be used to determine if an x86_64 system is a SGI "UV" system. UV systems come in 3 types and are identified by the OEM ID in the MADT. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/genapic_32.h | 5 +++++ include/asm-x86/genapic_64.h | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 5fa893dce72..f1b96932746 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -114,4 +114,9 @@ struct genapic { extern struct genapic *genapic; +enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; +#define get_uv_system_type() UV_NONE +#define is_uv_system() 0 + + #endif diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index d7e516ccbaa..914815c28ae 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h @@ -33,5 +33,10 @@ extern struct genapic *genapic; extern struct genapic apic_flat; extern struct genapic apic_physflat; +extern int acpi_madt_oem_check(char *, char *); + +enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; +extern enum uv_system_type get_uv_system_type(void); +extern int is_uv_system(void); #endif -- cgit v1.2.3 From a65d1d644c2b65bfb99e766e7160d764b8b2bfa4 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:08 -0500 Subject: x86: increase size of APICID Increase the number of bits in an apicid from 8 to 32. By default, MP_processor_info() gets the APICID from the mpc_config_processor structure. However, this structure limits the size of APICID to 8 bits. This patch allows the caller of MP_processor_info() to optionally pass a larger APICID that will be used instead of the one in the mpc_config_processor struct. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/apicdef.h | 9 ++++++--- include/asm-x86/mpspec.h | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 8b244683431..6b9008c7873 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -133,7 +133,7 @@ # define MAX_IO_APICS 64 #else # define MAX_IO_APICS 128 -# define MAX_LOCAL_APIC 256 +# define MAX_LOCAL_APIC 32768 #endif /* @@ -406,6 +406,9 @@ struct local_apic { #undef u32 -#define BAD_APICID 0xFFu - +#ifdef CONFIG_X86_32 + #define BAD_APICID 0xFFu +#else + #define BAD_APICID 0xFFFFu +#endif #endif diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 31bac12a97d..1f6445b147f 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -47,9 +47,9 @@ extern void get_smp_config(void); void __cpuinit generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI -extern void mp_register_lapic(u8 id, u8 enabled); +extern void mp_register_lapic(int id, u8 enabled); extern void mp_register_lapic_address(u64 address); -extern void mp_register_ioapic(u8 id, u32 address, u32 gsi_base); +extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); extern void mp_config_acpi_legacy_irqs(void); -- cgit v1.2.3 From 0d3e865b2644e4a2250ab25c5475a0cd0d514b7e Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:11 -0500 Subject: x86: add UV specific header for MMR definitions Definitions of UV MMRs. Note: this file is auto-generated by hardware design tools. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/uv/uv_mmrs.h | 373 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 373 insertions(+) create mode 100644 include/asm-x86/uv/uv_mmrs.h (limited to 'include') diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h new file mode 100644 index 00000000000..3b69fe6b637 --- /dev/null +++ b/include/asm-x86/uv/uv_mmrs.h @@ -0,0 +1,373 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI UV MMR definitions + * + * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef __ASM_X86_UV_MMRS__ +#define __ASM_X86_UV_MMRS__ + +/* + * AUTO GENERATED - Do not edit + */ + + #define UV_MMR_ENABLE (1UL << 63) + +/* ========================================================================= */ +/* UVH_IPI_INT */ +/* ========================================================================= */ +#define UVH_IPI_INT 0x60500UL +#define UVH_IPI_INT_32 0x0360 + +#define UVH_IPI_INT_VECTOR_SHFT 0 +#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL +#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 +#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL +#define UVH_IPI_INT_DESTMODE_SHFT 11 +#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL +#define UVH_IPI_INT_APIC_ID_SHFT 16 +#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL +#define UVH_IPI_INT_SEND_SHFT 63 +#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL + +union uvh_ipi_int_u { + unsigned long v; + struct uvh_ipi_int_s { + unsigned long vector_ : 8; /* RW */ + unsigned long delivery_mode : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long rsvd_12_15 : 4; /* */ + unsigned long apic_id : 32; /* RW */ + unsigned long rsvd_48_62 : 15; /* */ + unsigned long send : 1; /* WP */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009f0 + +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL + +union uvh_lb_bau_intd_payload_queue_first_u { + unsigned long v; + struct uvh_lb_bau_intd_payload_queue_first_s { + unsigned long rsvd_0_3: 4; /* */ + unsigned long address : 39; /* RW */ + unsigned long rsvd_43_48: 6; /* */ + unsigned long node_id : 14; /* RW */ + unsigned long rsvd_63 : 1; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009f8 + +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL + +union uvh_lb_bau_intd_payload_queue_last_u { + unsigned long v; + struct uvh_lb_bau_intd_payload_queue_last_s { + unsigned long rsvd_0_3: 4; /* */ + unsigned long address : 39; /* RW */ + unsigned long rsvd_43_63: 21; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x00a00 + +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL + +union uvh_lb_bau_intd_payload_queue_tail_u { + unsigned long v; + struct uvh_lb_bau_intd_payload_queue_tail_s { + unsigned long rsvd_0_3: 4; /* */ + unsigned long address : 39; /* RW */ + unsigned long rsvd_43_63: 21; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL + +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL +union uvh_lb_bau_intd_software_acknowledge_u { + unsigned long v; + struct uvh_lb_bau_intd_software_acknowledge_s { + unsigned long pending_0 : 1; /* RW, W1C */ + unsigned long pending_1 : 1; /* RW, W1C */ + unsigned long pending_2 : 1; /* RW, W1C */ + unsigned long pending_3 : 1; /* RW, W1C */ + unsigned long pending_4 : 1; /* RW, W1C */ + unsigned long pending_5 : 1; /* RW, W1C */ + unsigned long pending_6 : 1; /* RW, W1C */ + unsigned long pending_7 : 1; /* RW, W1C */ + unsigned long timeout_0 : 1; /* RW, W1C */ + unsigned long timeout_1 : 1; /* RW, W1C */ + unsigned long timeout_2 : 1; /* RW, W1C */ + unsigned long timeout_3 : 1; /* RW, W1C */ + unsigned long timeout_4 : 1; /* RW, W1C */ + unsigned long timeout_5 : 1; /* RW, W1C */ + unsigned long timeout_6 : 1; /* RW, W1C */ + unsigned long timeout_7 : 1; /* RW, W1C */ + unsigned long rsvd_16_63: 48; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL + +/* ========================================================================= */ +/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ +/* ========================================================================= */ +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009d8 + +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63 +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL + +union uvh_lb_bau_sb_activation_control_u { + unsigned long v; + struct uvh_lb_bau_sb_activation_control_s { + unsigned long index : 6; /* RW */ + unsigned long rsvd_6_61: 56; /* */ + unsigned long push : 1; /* WP */ + unsigned long init : 1; /* WP */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ +/* ========================================================================= */ +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009e0 + +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL + +union uvh_lb_bau_sb_activation_status_0_u { + unsigned long v; + struct uvh_lb_bau_sb_activation_status_0_s { + unsigned long status : 64; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ +/* ========================================================================= */ +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009e8 + +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL + +union uvh_lb_bau_sb_activation_status_1_u { + unsigned long v; + struct uvh_lb_bau_sb_activation_status_1_s { + unsigned long status : 64; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ +/* ========================================================================= */ +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009d0 + +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL + +union uvh_lb_bau_sb_descriptor_base_u { + unsigned long v; + struct uvh_lb_bau_sb_descriptor_base_s { + unsigned long rsvd_0_11 : 12; /* */ + unsigned long page_address : 31; /* RW */ + unsigned long rsvd_43_48 : 6; /* */ + unsigned long node_id : 14; /* RW */ + unsigned long rsvd_63 : 1; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_NODE_ID */ +/* ========================================================================= */ +#define UVH_NODE_ID 0x0UL + +#define UVH_NODE_ID_FORCE1_SHFT 0 +#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL +#define UVH_NODE_ID_MANUFACTURER_SHFT 1 +#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL +#define UVH_NODE_ID_PART_NUMBER_SHFT 12 +#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL +#define UVH_NODE_ID_REVISION_SHFT 28 +#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL +#define UVH_NODE_ID_NODE_ID_SHFT 32 +#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL +#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48 +#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL +#define UVH_NODE_ID_NI_PORT_SHFT 56 +#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL + +union uvh_node_id_u { + unsigned long v; + struct uvh_node_id_s { + unsigned long force1 : 1; /* RO */ + unsigned long manufacturer : 11; /* RO */ + unsigned long part_number : 16; /* RO */ + unsigned long revision : 4; /* RO */ + unsigned long node_id : 15; /* RW */ + unsigned long rsvd_47 : 1; /* */ + unsigned long nodes_per_bit : 7; /* RW */ + unsigned long rsvd_55 : 1; /* */ + unsigned long ni_port : 4; /* RO */ + unsigned long rsvd_60_63 : 4; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ +/* ========================================================================= */ +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL + +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL + +union uvh_rh_gam_gru_overlay_config_mmr_u { + unsigned long v; + struct uvh_rh_gam_gru_overlay_config_mmr_s { + unsigned long rsvd_0_27: 28; /* */ + unsigned long base : 18; /* RW */ + unsigned long gr4 : 1; /* RW */ + unsigned long rsvd_47_51: 5; /* */ + unsigned long n_gru : 4; /* RW */ + unsigned long rsvd_56_62: 7; /* */ + unsigned long enable : 1; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ +/* ========================================================================= */ +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL + +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL + +union uvh_rh_gam_mmr_overlay_config_mmr_u { + unsigned long v; + struct uvh_rh_gam_mmr_overlay_config_mmr_s { + unsigned long rsvd_0_25: 26; /* */ + unsigned long base : 20; /* RW */ + unsigned long dual_hub : 1; /* RW */ + unsigned long rsvd_47_62: 16; /* */ + unsigned long enable : 1; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RTC */ +/* ========================================================================= */ +#define UVH_RTC 0x28000UL + +#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 +#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL + +union uvh_rtc_u { + unsigned long v; + struct uvh_rtc_s { + unsigned long real_time_clock : 56; /* RW */ + unsigned long rsvd_56_63 : 8; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_SI_ADDR_MAP_CONFIG */ +/* ========================================================================= */ +#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL + +#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0 +#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL +#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8 +#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL + +union uvh_si_addr_map_config_u { + unsigned long v; + struct uvh_si_addr_map_config_s { + unsigned long m_skt : 6; /* RW */ + unsigned long rsvd_6_7: 2; /* */ + unsigned long n_skt : 4; /* RW */ + unsigned long rsvd_12_63: 52; /* */ + } s; +}; + + +#endif /* __ASM_X86_UV_MMRS__ */ -- cgit v1.2.3 From 952cf6d7ae52cc5423baa57e978e20e732a89ba6 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:13 -0500 Subject: x86: define the macros and tables for the basic UV infrastructure. Define the macros and tables for the basic UV infrastructure. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/uv/uv_hub.h | 210 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 include/asm-x86/uv/uv_hub.h (limited to 'include') diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h new file mode 100644 index 00000000000..b4fcf9cf895 --- /dev/null +++ b/include/asm-x86/uv/uv_hub.h @@ -0,0 +1,210 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI UV architectural definitions + * + * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef __ASM_X86_UV_HUB_H__ +#define __ASM_X86_UV_HUB_H__ + +#include +#include +#include +#include + + +/* + * Addressing Terminology + * + * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of + * routers always have low bit of 1, C/MBricks have low bit + * equal to 0. Most addressing macros that target UV hub chips + * right shift the NASID by 1 to exclude the always-zero bit. + * + * SNASID - NASID right shifted by 1 bit. + * + * + * Memory/UV-HUB Processor Socket Address Format: + * +--------+---------------+---------------------+ + * |00..0000| SNASID | NodeOffset | + * +--------+---------------+---------------------+ + * <--- N bits --->|<--------M bits -----> + * + * M number of node offset bits (35 .. 40) + * N number of SNASID bits (0 .. 10) + * + * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). + * The actual values are configuration dependent and are set at + * boot time + * + * APICID format + * NOTE!!!!!! This is the current format of the APICID. However, code + * should assume that this will change in the future. Use functions + * in this file for all APICID bit manipulations and conversion. + * + * 1111110000000000 + * 5432109876543210 + * nnnnnnnnnnlc0cch + * sssssssssss + * + * n = snasid bits + * l = socket number on board + * c = core + * h = hyperthread + * s = bits that are in the socket CSR + * + * Note: Processor only supports 12 bits in the APICID register. The ACPI + * tables hold all 16 bits. Software needs to be aware of this. + * + * Unless otherwise specified, all references to APICID refer to + * the FULL value contained in ACPI tables, not the subset in the + * processor APICID register. + */ + + +/* + * Maximum number of bricks in all partitions and in all coherency domains. + * This is the total number of bricks accessible in the numalink fabric. It + * includes all C & M bricks. Routers are NOT included. + * + * This value is also the value of the maximum number of non-router NASIDs + * in the numalink fabric. + * + * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused. + */ +#define UV_MAX_NUMALINK_BLADES 16384 + +/* + * Maximum number of C/Mbricks within a software SSI (hardware may support + * more). + */ +#define UV_MAX_SSI_BLADES 256 + +/* + * The largest possible NASID of a C or M brick (+ 2) + */ +#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2) + +/* + * The following defines attributes of the HUB chip. These attributes are + * frequently referenced and are kept in the per-cpu data areas of each cpu. + * They are kept together in a struct to minimize cache misses. + */ +struct uv_hub_info_s { + unsigned long global_mmr_base; + unsigned short local_nasid; + unsigned short gnode_upper; + unsigned short coherency_domain_number; + unsigned short numa_blade_id; + unsigned char blade_processor_id; + unsigned char m_val; + unsigned char n_val; +}; +DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); +#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) +#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) + +/* + * Local & Global MMR space macros. + * Note: macros are intended to be used ONLY by inline functions + * in this file - not by other kernel code. + */ +#define UV_SNASID(n) ((n) >> 1) +#define UV_NASID(n) ((n) << 1) + +#define UV_LOCAL_MMR_BASE 0xf4000000UL +#define UV_GLOBAL_MMR32_BASE 0xf8000000UL +#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) + +#define UV_GLOBAL_MMR32_SNASID_MASK 0x3ff +#define UV_GLOBAL_MMR32_SNASID_SHIFT 15 +#define UV_GLOBAL_MMR64_SNASID_SHIFT 26 + +#define UV_GLOBAL_MMR32_NASID_BITS(n) \ + (((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) << \ + (UV_GLOBAL_MMR32_SNASID_SHIFT)) + +#define UV_GLOBAL_MMR64_NASID_BITS(n) \ + ((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT) + +#define UV_APIC_NASID_SHIFT 6 + +/* + * Extract a NASID from an APICID (full apicid, not processor subset) + */ +static inline int uv_apicid_to_nasid(int apicid) +{ + return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT)); +} + +/* + * Access global MMRs using the low memory MMR32 space. This region supports + * faster MMR access but not all MMRs are accessible in this space. + */ +static inline unsigned long *uv_global_mmr32_address(int nasid, + unsigned long offset) +{ + return __va(UV_GLOBAL_MMR32_BASE | + UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset); +} + +static inline void uv_write_global_mmr32(int nasid, unsigned long offset, + unsigned long val) +{ + *uv_global_mmr32_address(nasid, offset) = val; +} + +static inline unsigned long uv_read_global_mmr32(int nasid, + unsigned long offset) +{ + return *uv_global_mmr32_address(nasid, offset); +} + +/* + * Access Global MMR space using the MMR space located at the top of physical + * memory. + */ +static inline unsigned long *uv_global_mmr64_address(int nasid, + unsigned long offset) +{ + return __va(UV_GLOBAL_MMR64_BASE | + UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset); +} + +static inline void uv_write_global_mmr64(int nasid, unsigned long offset, + unsigned long val) +{ + *uv_global_mmr64_address(nasid, offset) = val; +} + +static inline unsigned long uv_read_global_mmr64(int nasid, + unsigned long offset) +{ + return *uv_global_mmr64_address(nasid, offset); +} + +/* + * Access node local MMRs. Faster than using global space but only local MMRs + * are accessible. + */ +static inline unsigned long *uv_local_mmr_address(unsigned long offset) +{ + return __va(UV_LOCAL_MMR_BASE | offset); +} + +static inline unsigned long uv_read_local_mmr(unsigned long offset) +{ + return *uv_local_mmr_address(offset); +} + +static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) +{ + *uv_local_mmr_address(offset) = val; +} + +#endif /* __ASM_X86_UV_HUB__ */ + -- cgit v1.2.3 From 8400def8252f90ecd056657c0bac806afadd8511 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:14 -0500 Subject: x86: define the macros and tables for blade functions Add UV macros for converting between cpu numbers, blade numbers and node numbers. Note that these are used ONLY within x86_64 UV modules, and are not for general kernel use. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/uv/uv_hub.h | 74 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) (limited to 'include') diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h index b4fcf9cf895..26b9240d1e2 100644 --- a/include/asm-x86/uv/uv_hub.h +++ b/include/asm-x86/uv/uv_hub.h @@ -206,5 +206,79 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) *uv_local_mmr_address(offset) = val; } +/* + * Structures and definitions for converting between cpu, node, and blade + * numbers. + */ +struct uv_blade_info { + unsigned short nr_posible_cpus; + unsigned short nr_online_cpus; + unsigned short nasid; +}; +struct uv_blade_info *uv_blade_info; +extern short *uv_node_to_blade; +extern short *uv_cpu_to_blade; +extern short uv_possible_blades; + +/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */ +static inline int uv_blade_processor_id(void) +{ + return uv_hub_info->blade_processor_id; +} + +/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ +static inline int uv_numa_blade_id(void) +{ + return uv_hub_info->numa_blade_id; +} + +/* Convert a cpu number to the the UV blade number */ +static inline int uv_cpu_to_blade_id(int cpu) +{ + return uv_cpu_to_blade[cpu]; +} + +/* Convert linux node number to the UV blade number */ +static inline int uv_node_to_blade_id(int nid) +{ + return uv_node_to_blade[nid]; +} + +/* Convert a blade id to the NASID of the blade */ +static inline int uv_blade_to_nasid(int bid) +{ + return uv_blade_info[bid].nasid; +} + +/* Determine the number of possible cpus on a blade */ +static inline int uv_blade_nr_possible_cpus(int bid) +{ + return uv_blade_info[bid].nr_posible_cpus; +} + +/* Determine the number of online cpus on a blade */ +static inline int uv_blade_nr_online_cpus(int bid) +{ + return uv_blade_info[bid].nr_online_cpus; +} + +/* Convert a cpu id to the NASID of the blade containing the cpu */ +static inline int uv_cpu_to_nasid(int cpu) +{ + return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid; +} + +/* Convert a node number to the NASID of the blade */ +static inline int uv_node_to_nasid(int nid) +{ + return uv_blade_info[uv_node_to_blade_id(nid)].nasid; +} + +/* Maximum possible number of blades */ +static inline int uv_num_possible_blades(void) +{ + return uv_possible_blades; +} + #endif /* __ASM_X86_UV_HUB__ */ -- cgit v1.2.3 From ac23d4ee3f84de33c16ed7e68f9adee2386e74fb Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:16 -0500 Subject: x86: support for new UV apic UV supports really big systems. So big, in fact, that the APICID register does not contain enough bits to contain an APICID that is unique across all cpus. The UV BIOS supports 3 APICID modes: - legacy mode. This mode uses the old APIC mode where APICID is in bits [31:24] of the APICID register. - x2apic mode. This mode is whitebox-compatible. APICIDs are unique across all cpus. Standard x2apic APIC operations (Intel-defined) can be used for IPIs. The node identifier fits within the Intel-defined portion of the APICID register. - x2apic-uv mode. In this mode, the APICIDs on each node have unique IDs, but IDs on different node are not unique. For example, if each mode has 32 cpus, the APICIDs on each node might be 0 - 31. Every node has the same set of IDs. The UV hub is used to route IPIs/interrupts to the correct node. Traditional APIC operations WILL NOT WORK. In x2apic-uv mode, the ACPI tables all contain a full unique ID (note: exact bit layout still changing but the following is close): nnnnnnnnnnlc0cch n = unique node number l = socket number on board c = core h = hyperthread Only the "lc0cch" bits are written to the APICID register. The remaining bits are supplied by having the get_apic_id() function "OR" the extra bits into the value read from the APICID register. (Hmmm.. why not keep the ENTIRE APICID register in per-cpu data....) The x2apic-uv mode is recognized by the MADT table containing: oem_id = "SGI" oem_table_id = "UV-X" Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/genapic_64.h | 5 +++++ include/asm-x86/smp.h | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index 914815c28ae..1de931b263c 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h @@ -39,4 +39,9 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; extern enum uv_system_type get_uv_system_type(void); extern int is_uv_system(void); +extern struct genapic apic_x2apic_uv_x; +DECLARE_PER_CPU(int, x2apic_extra_bits); +extern void uv_cpu_init(void); +extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); + #endif diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index c0d693ca435..b3556626487 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -174,10 +174,15 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } +#ifdef CONFIG_X86_32_SMP static inline unsigned int read_apic_id(void) { return *(u32 *)(APIC_BASE + APIC_ID); } +#else +extern unsigned int read_apic_id(void); +#endif + # ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); -- cgit v1.2.3 From a24eae88ad3767d0a4a940a10e4a9cec849b7778 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 30 Mar 2008 12:17:12 +0200 Subject: x86: uv fix Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index b3556626487..654724c58f5 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -174,7 +174,7 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } -#ifdef CONFIG_X86_32_SMP +#ifndef CONFIG_X86_64 static inline unsigned int read_apic_id(void) { return *(u32 *)(APIC_BASE + APIC_ID); -- cgit v1.2.3 From b447a468fcd130aa8951672b6115c673c274e888 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 25 Mar 2008 15:06:51 -0700 Subject: x86: clean up non-smp usage of cpu maps Cleanup references to the early cpu maps for the non-SMP configuration and remove some functions called for SMP configurations only. Cc: Andi Kleen Cc: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 5 +++++ include/asm-x86/topology.h | 15 +++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 654724c58f5..d973c11688c 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -29,10 +29,15 @@ extern int smp_num_siblings; extern unsigned int num_processors; extern cpumask_t cpu_initialized; +#ifdef CONFIG_SMP extern u16 x86_cpu_to_apicid_init[]; extern u16 x86_bios_cpu_apicid_init[]; extern void *x86_cpu_to_apicid_early_ptr; extern void *x86_bios_cpu_apicid_early_ptr; +#else +#define x86_cpu_to_apicid_early_ptr NULL +#define x86_bios_cpu_apicid_early_ptr NULL +#endif DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 8d1a1f3d21b..81a29eb08ac 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -38,8 +38,13 @@ extern int cpu_to_node_map[]; #endif DECLARE_PER_CPU(int, x86_cpu_to_node_map); + +#ifdef CONFIG_SMP extern int x86_cpu_to_node_map_init[]; extern void *x86_cpu_to_node_map_early_ptr; +#else +#define x86_cpu_to_node_map_early_ptr NULL +#endif extern cpumask_t node_to_cpumask_map[]; @@ -54,6 +59,8 @@ static inline int cpu_to_node(int cpu) } #else /* CONFIG_X86_64 */ + +#ifdef CONFIG_SMP static inline int early_cpu_to_node(int cpu) { int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; @@ -65,6 +72,9 @@ static inline int early_cpu_to_node(int cpu) else return NUMA_NO_NODE; } +#else +#define early_cpu_to_node(cpu) cpu_to_node(cpu) +#endif static inline int cpu_to_node(int cpu) { @@ -76,10 +86,7 @@ static inline int cpu_to_node(int cpu) return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; } #endif - if (per_cpu_offset(cpu)) - return per_cpu(x86_cpu_to_node_map, cpu); - else - return NUMA_NO_NODE; + return per_cpu(x86_cpu_to_node_map, cpu); } #endif /* CONFIG_X86_64 */ -- cgit v1.2.3 From 61048c6328819b0973ef662f6d46f2e2bc753ceb Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:07 +0400 Subject: x86: don't set IO APIC features if IO APIC is not enabled Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/mach_apic.h | 2 ++ include/asm-x86/mpspec.h | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 14217a970c5..0a6634f62ab 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -69,8 +69,10 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) static inline void setup_apic_routing(void) { +#ifdef CONFIG_X86_IO_APIC printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Flat", nr_ioapics); +#endif } static inline int apicid_to_node(int logical_apicid) diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 1f6445b147f..08cc5e02795 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -36,9 +36,6 @@ extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; extern int smp_found_config; -extern int nr_ioapics; -extern int mp_irq_entries; -extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; extern int mpc_default_type; extern unsigned long mp_lapic_addr; -- cgit v1.2.3 From 9e5c5f1dd29c86307e6b3cfa75e85d0efccc1f6b Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:26 +0400 Subject: x86: move mp_ioapic_routing to boot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/io_apic.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 095e8e30a34..0c9e17c73e0 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h @@ -110,6 +110,13 @@ extern int nr_ioapic_registers[MAX_IO_APICS]; * MP-BIOS irq configuration table structures: */ +struct mp_ioapic_routing { + int apic_id; + int gsi_base; + int gsi_end; + u32 pin_programmed[4]; +}; + /* I/O APIC entries */ extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; -- cgit v1.2.3 From 2fe60147570231cde0d1f14711d2e34ccdf54b65 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:44 +0400 Subject: x86: move up & smp variables to setup.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index d973c11688c..3496e1c299b 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -128,7 +128,6 @@ int native_cpu_up(unsigned int cpunum); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); -extern unsigned disabled_cpus; extern void prefill_possible_map(void); #define SMP_TRAMPOLINE_BASE 0x6000 @@ -144,6 +143,8 @@ static inline int num_booting_cpus(void) } #endif /* CONFIG_SMP */ +extern unsigned disabled_cpus __cpuinitdata; + #ifdef CONFIG_X86_32_SMP /* * This function is needed by all SMP systems. It must _always_ be valid -- cgit v1.2.3 From dfac2189c2e1fbb90ee83f15b5e404425754e9f4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:50 +0400 Subject: x86: move mp_register_lapic to boot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 08cc5e02795..c614051f4fc 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -44,7 +44,6 @@ extern void get_smp_config(void); void __cpuinit generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI -extern void mp_register_lapic(int id, u8 enabled); extern void mp_register_lapic_address(u64 address); extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, -- cgit v1.2.3 From 31d2092eb0c23636b73d2c24c0c11b66470cef58 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:57 +0400 Subject: x86: move mp_register_lapic_address to boot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index c614051f4fc..57a991b9c05 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -44,7 +44,6 @@ extern void get_smp_config(void); void __cpuinit generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI -extern void mp_register_lapic_address(u64 address); extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); -- cgit v1.2.3 From e44b7b7525ad9d43163ab5e60c784325419e0ea6 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Thu, 10 Apr 2008 23:28:10 +0200 Subject: x86: move suspend wakeup code to C Move wakeup code to .c, so that video mode setting code can be shared between boot and wakeup. Remove nasty assembly code in 64-bit case by re-using trampoline code. Stack setup was fixed to clear high 16bits of %esp, maybe that fixes some machines. .c code sharing and morse code was done H. Peter Anvin, Sam Ravnborg reviewed kbuild related stuff, and it seems okay to him. Rafael did some cleanups. [rjw: * Made the patch stop breaking compilation on x86-32 * Added arch/x86/kernel/acpi/sleep.h * Got rid of compiler warnings in arch/x86/kernel/acpi/sleep.c * Fixed 32-bit compilation on x86-64 systems * Added include/asm-x86/trampoline.h and fixed the non-SMP compilation on 64-bit x86 * Removed arch/x86/kernel/acpi/sleep_32.c which was not used * Fixed some breakage caused by the integration of smpboot.c done under us in the meantime] Signed-off-by: Pavel Machek Signed-off-by: H. Peter Anvin Reviewed-by: Sam Ravnborg Signed-off-by: Rafael J. Wysocki Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 13 ------------- include/asm-x86/trampoline.h | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 13 deletions(-) create mode 100644 include/asm-x86/trampoline.h (limited to 'include') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 3496e1c299b..62ebdec394b 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -45,22 +45,12 @@ DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); -/* - * Trampoline 80x86 program as an array. - */ -extern const unsigned char trampoline_data []; -extern const unsigned char trampoline_end []; -extern unsigned char *trampoline_base; - /* Static state in head.S used to set up a CPU */ extern struct { void *sp; unsigned short ss; } stack_start; -extern unsigned long init_rsp; -extern unsigned long initial_code; - struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); @@ -130,9 +120,6 @@ extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); -#define SMP_TRAMPOLINE_BASE 0x6000 -extern unsigned long setup_trampoline(void); - void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h new file mode 100644 index 00000000000..b156b08d013 --- /dev/null +++ b/include/asm-x86/trampoline.h @@ -0,0 +1,21 @@ +#ifndef __TRAMPOLINE_HEADER +#define __TRAMPOLINE_HEADER + +#ifndef __ASSEMBLY__ + +/* + * Trampoline 80x86 program as an array. + */ +extern const unsigned char trampoline_data []; +extern const unsigned char trampoline_end []; +extern unsigned char *trampoline_base; + +extern unsigned long init_rsp; +extern unsigned long initial_code; + +#define TRAMPOLINE_BASE 0x6000 +extern unsigned long setup_trampoline(void); + +#endif /* __ASSEMBLY__ */ + +#endif /* __TRAMPOLINE_HEADER */ -- cgit v1.2.3