From 08cd93f9e11ac8c3e42c72debe7dc55a761149c2 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Thu, 14 Feb 2008 23:23:37 +0200 Subject: remove mca-pentium This patch removes the mca-pentium boot option that was a noop. besides the source code cleanup factor, this saves some text as well: arch/x86/kernel/cpu/bugs.o: text data bss dec hex filename 651 77 4 732 2dc bugs.o.before 631 53 4 688 2b0 bugs.o.after Signed-off-by: Adrian Bunk Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/bugs.c | 8 -------- arch/x86/kernel/setup_32.c | 1 - 2 files changed, 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 9b95edcfc6a..027e5c003b1 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -25,14 +25,6 @@ static int __init no_halt(char *s) __setup("no-hlt", no_halt); -static int __init mca_pentium(char *s) -{ - mca_pentium_flag = 1; - return 1; -} - -__setup("mca-pentium", mca_pentium); - static int __init no_387(char *s) { boot_cpu_data.hard_math = 0; diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 691ab4cb167..a1d7071a51c 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -164,7 +164,6 @@ unsigned long mmu_cr4_features = X86_CR4_PAE; unsigned int machine_id; unsigned int machine_submodel_id; unsigned int BIOS_revision; -unsigned int mca_pentium_flag; /* Boot loader ID as an integer, for the benefit of proc_dointvec */ int bootloader_type; -- cgit v1.2.3 From f1452d424dc0e079fb97af8cb8d3a0f7b5fddd46 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Thu, 14 Feb 2008 15:23:53 -0800 Subject: x86, kprobes: remove sparse warnings from x86 arch/x86/kernel/kprobes.c:584:16: warning: symbol 'kretprobe_trampoline_holder' was not declared. Should it be static? arch/x86/kernel/kprobes.c:676:6: warning: symbol 'trampoline_handler' was not declared. Should it be static? Make them static and add the __used attribute, approach taken from the arm kprobes implementation. kretprobe_trampoline_holder uses inline assemly to define the global symbol kretprobe_trampoline, but nothing ever calls the holder explicitly. trampoline handler is only called from inline assembly in the same file, mark it used and static. Signed-off-by: Harvey Harrison Acked-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/kprobes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index a99e764fd66..34a591283f5 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -581,7 +581,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) * When a retprobed function returns, this code saves registers and * calls trampoline_handler() runs, which calls the kretprobe's handler. */ -void __kprobes kretprobe_trampoline_holder(void) +static void __used __kprobes kretprobe_trampoline_holder(void) { asm volatile ( ".global kretprobe_trampoline\n" @@ -673,7 +673,7 @@ void __kprobes kretprobe_trampoline_holder(void) /* * Called from kretprobe_trampoline */ -void * __kprobes trampoline_handler(struct pt_regs *regs) +static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; -- cgit v1.2.3 From cc7e73f35dccba7503bdaf40bb5189225697664d Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 13 Feb 2008 13:26:39 -0800 Subject: x86: sparse error in efi_32.c arch/x86/kernel/efi_32.c:42:6: warning: symbol 'efi_call_phys_prelog' was not declared. Should it be static? arch/x86/kernel/efi_32.c:84:6: warning: symbol 'efi_call_phys_epilog' was not declared. Should it be static? Signed-off-by: Harvey Harrison Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/efi_32.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c index cb91f985b4a..5d23d85624d 100644 --- a/arch/x86/kernel/efi_32.c +++ b/arch/x86/kernel/efi_32.c @@ -28,6 +28,7 @@ #include #include #include +#include /* * To make EFI call EFI runtime service in physical addressing mode we need -- cgit v1.2.3 From ecaea42eb8507735a97a496cc5068de06542e8b2 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 13 Feb 2008 13:26:13 -0800 Subject: x86: sparse warning in efi.c Yes, it should. Signed-off-by: Harvey Harrison Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/efi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 0c0eeb163d9..759e02bec07 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c @@ -54,7 +54,7 @@ EXPORT_SYMBOL(efi); struct efi_memory_map memmap; -struct efi efi_phys __initdata; +static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; static int __init setup_noefi(char *arg) -- cgit v1.2.3 From 148a142495f40d3da729d62cb7127ccb26223106 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Wed, 13 Feb 2008 23:29:33 +0200 Subject: x86: make mxcsr_feature_mask static again Signed-off-by: Adrian Bunk Cc: Roland McGrath Cc: hpa@zytor.com Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/i387.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 26719bd2c77..763dfc40723 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -39,7 +39,7 @@ #define HAVE_HWFP 1 #endif -unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; +static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; void mxcsr_feature_mask_init(void) { -- cgit v1.2.3 From d3cfeb4fbe2a5e88fd5f98892f4dc49dcab8a9e7 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Wed, 13 Feb 2008 23:29:42 +0200 Subject: x86: unexport io_delay_type Signed-off-by: Adrian Bunk Cc: hpa@zytor.com Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/io_delay.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c index bd49321034d..c706a306155 100644 --- a/arch/x86/kernel/io_delay.c +++ b/arch/x86/kernel/io_delay.c @@ -13,7 +13,6 @@ #include int io_delay_type __read_mostly = CONFIG_DEFAULT_IO_DELAY_TYPE; -EXPORT_SYMBOL_GPL(io_delay_type); static int __initdata io_delay_override; -- cgit v1.2.3 From f7f3d791e61d7baf8b0aee0384fdd469c0d2ac9b Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Wed, 13 Feb 2008 23:29:53 +0200 Subject: x86: don't make irq_return global Signed-off-by: Adrian Bunk Cc: hpa@zytor.com Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/entry_32.S | 2 +- arch/x86/kernel/entry_64.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 824e21b80aa..4b87c32b639 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -409,7 +409,7 @@ restore_nocheck_notrace: RESTORE_REGS addl $4, %esp # skip orig_eax/error_code CFI_ADJUST_CFA_OFFSET -4 -ENTRY(irq_return) +irq_return: INTERRUPT_RETURN .section .fixup,"ax" iret_exc: diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 6be39a387c5..2ad9a1bc6a7 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -583,7 +583,7 @@ retint_restore_args: /* return to kernel space */ restore_args: RESTORE_ARGS 0,8,0 -ENTRY(irq_return) +irq_return: INTERRUPT_RETURN .section __ex_table, "a" -- cgit v1.2.3 From aa65af3f92da39a686c8a3479426d15854fa27ab Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Wed, 13 Feb 2008 23:29:55 +0200 Subject: x86: don't make swapper_pg_fixmap global Signed-off-by: Adrian Bunk Cc: Ian Campbell Cc: hpa@zytor.com Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/head_32.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 74ef4a41f22..25eb98540a4 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -612,7 +612,7 @@ ENTRY(swapper_pg_pmd) ENTRY(swapper_pg_dir) .fill 1024,4,0 #endif -ENTRY(swapper_pg_fixmap) +swapper_pg_fixmap: .fill 1024,4,0 ENTRY(empty_zero_page) .fill 4096,1,0 -- cgit v1.2.3 From 8e31c2ac1101b09ad6c6d3539b032d312cc3302b Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 11 Feb 2008 10:50:21 +0100 Subject: x86: CPA: remove BUG_ON for LRU/Compound pages New implementation does not use lru for anything so there is no need to reject pages that are in the LRU. Similar for compound pages (which were checked because they also use page->lru) [ tglx@linutronix.de: removed unused variable ] Signed-off-by: Andi Kleen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/pageattr.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 3ee14996c82..e2a74ea11a5 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -513,7 +513,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) unsigned long address = cpa->vaddr; int do_split, err; unsigned int level; - struct page *kpte_page; pte_t *kpte, old_pte; repeat: @@ -532,10 +531,6 @@ repeat: return -EINVAL; } - kpte_page = virt_to_page(kpte); - BUG_ON(PageLRU(kpte_page)); - BUG_ON(PageCompound(kpte_page)); - if (level == PG_LEVEL_4K) { pte_t new_pte; pgprot_t new_prot = pte_pgprot(old_pte); -- cgit v1.2.3 From a062bae9c443a8c0ab17f231eb83690dfb897524 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sun, 3 Feb 2008 15:40:30 +0800 Subject: x86: fix compile warning building without CONFIG_SYSCTL arch/x86/kernel/nmi_64.c:50: warning: 'unknown_nmi_panic_callback' declared 'static' but never defined This patch also fixes nmi_32.c Signed-off-by: Li Zefan Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/nmi_32.c | 21 +++++++++------------ arch/x86/kernel/nmi_64.c | 21 +++++++++------------ 2 files changed, 18 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index edd413650b3..6a0aa703868 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c @@ -46,9 +46,6 @@ static unsigned int nmi_hz = HZ; static DEFINE_PER_CPU(short, wd_enabled); -/* local prototypes */ -static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); - static int endflag __initdata = 0; #ifdef CONFIG_SMP @@ -391,15 +388,6 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) return rc; } -int do_nmi_callback(struct pt_regs * regs, int cpu) -{ -#ifdef CONFIG_SYSCTL - if (unknown_nmi_panic) - return unknown_nmi_panic_callback(regs, cpu); -#endif - return 0; -} - #ifdef CONFIG_SYSCTL static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) @@ -453,6 +441,15 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, #endif +int do_nmi_callback(struct pt_regs *regs, int cpu) +{ +#ifdef CONFIG_SYSCTL + if (unknown_nmi_panic) + return unknown_nmi_panic_callback(regs, cpu); +#endif + return 0; +} + void __trigger_all_cpu_backtrace(void) { int i; diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index fb99484d21c..9a4fde74bee 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c @@ -46,9 +46,6 @@ static unsigned int nmi_hz = HZ; static DEFINE_PER_CPU(short, wd_enabled); -/* local prototypes */ -static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); - /* Run after command line and cpu_init init, but before all other checks */ void nmi_watchdog_default(void) { @@ -394,15 +391,6 @@ asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code) nmi_exit(); } -int do_nmi_callback(struct pt_regs * regs, int cpu) -{ -#ifdef CONFIG_SYSCTL - if (unknown_nmi_panic) - return unknown_nmi_panic_callback(regs, cpu); -#endif - return 0; -} - void stop_nmi(void) { acpi_nmi_disable(); @@ -464,6 +452,15 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, #endif +int do_nmi_callback(struct pt_regs *regs, int cpu) +{ +#ifdef CONFIG_SYSCTL + if (unknown_nmi_panic) + return unknown_nmi_panic_callback(regs, cpu); +#endif + return 0; +} + void __trigger_all_cpu_backtrace(void) { int i; -- cgit v1.2.3 From 08acb672624ece2d9234817570a0b3332cc8dae3 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 17 Feb 2008 13:22:46 +0100 Subject: x86: fix section mismatch warning in setup_64.c:nearby_node nearby_node() were only used by __cpuinit amd_detect_cmp() So annotating nearby_node() __cpuinit was the trivial fix. Signed-off-by: Sam Ravnborg Cc: Sam Ravnborg Cc: Andrew Morton Cc: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/setup_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index c0d8208af12..ff9029d3413 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -518,7 +518,7 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) } #ifdef CONFIG_NUMA -static int nearby_node(int apicid) +static int __cpuinit nearby_node(int apicid) { int i, node; -- cgit v1.2.3 From 04d733bd3588fda8934591fdb0a3d719c5ec8fa0 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 17 Feb 2008 13:22:47 +0100 Subject: x86: fix section mismatch in setup_64.c:srat_detect_node srat_detect_node() is only used by __cpuinit init_intel(). So the trivial fix is to annotate srat_detect_node() with __cpuinit. Signed-off-by: Sam Ravnborg Cc: Sam Ravnborg Cc: Andrew Morton Cc: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/setup_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index ff9029d3413..e1866172deb 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -791,7 +791,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) return 1; } -static void srat_detect_node(void) +static void __cpuinit srat_detect_node(void) { #ifdef CONFIG_NUMA unsigned node; -- cgit v1.2.3 From 177c7715cd94a66d951fcafbacedd278a2d6fcab Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 17 Feb 2008 13:22:49 +0100 Subject: x86: fix section mismatch warning in topology.c:arch_register_cpu arch_register_cpu() is only defined for HOTPLUG_CPU code so simple fix is to ignore references by annotating the function __ref. Signed-off-by: Sam Ravnborg Cc: Sam Ravnborg Cc: Andrew Morton Cc: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index a40051b71d9..0fcc95a354f 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -34,7 +34,7 @@ static DEFINE_PER_CPU(struct x86_cpu, cpu_devices); #ifdef CONFIG_HOTPLUG_CPU -int arch_register_cpu(int num) +int __ref arch_register_cpu(int num) { /* * CPU0 cannot be offlined due to several -- cgit v1.2.3 From d01b9ad56e2cc7b6204b89ef10a53e78d70b5877 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 17 Feb 2008 13:22:58 +0100 Subject: x86: fix section mismatch in srat_64.c:reserve_hotadd reserve_hotadd() are only used by __init acpi_numa_memory_affinity_init(). Annotate reserve_hotadd() with __init is the trivial fix. Signed-off-by: Sam Ravnborg Cc: Sam Ravnborg Cc: Andrew Morton Cc: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/srat_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index ecd91ea8a8a..845001c617c 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -166,7 +166,8 @@ static inline int save_add_info(void) {return 0;} * Both SPARSE and RESERVE need nodes_add information. * This code supports one contiguous hot add area per node. */ -static int reserve_hotadd(int node, unsigned long start, unsigned long end) +static int __init +reserve_hotadd(int node, unsigned long start, unsigned long end) { unsigned long s_pfn = start >> PAGE_SHIFT; unsigned long e_pfn = end >> PAGE_SHIFT; -- cgit v1.2.3 From da5968ae305ab5209ebc2502ef6a8fbf2cce536c Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 17 Feb 2008 13:22:59 +0100 Subject: x86: fix section mismatch in head_64.S:initial_code initial_code are initially used to hold a function pointer from __init and later from __cpuinit. This confuses modpost and changing initial_code to REFDATA silence the warning. (But now we do not discard the variable anymore). Signed-off-by: Sam Ravnborg Cc: Sam Ravnborg Cc: Andrew Morton Cc: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/head_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 53e5820d605..eb415043a92 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -255,7 +255,7 @@ ENTRY(secondary_startup_64) lretq /* SMP bootup changes these two */ - __CPUINITDATA + __REFDATA .align 8 ENTRY(initial_code) .quad x86_64_start_kernel -- cgit v1.2.3 From 6871b76fb5197a4db28de7116baf834fa36b7bfe Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 17 Feb 2008 13:23:00 +0100 Subject: x86: annotate pci/common.s:pci_scan_bus_with_sysdata with __devinit Signed-off-by: Sam Ravnborg Cc: Sam Ravnborg Cc: Andrew Morton Cc: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/pci/common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index b7c67a187b6..7b6e3bb9b28 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -541,7 +541,7 @@ void pcibios_disable_device (struct pci_dev *dev) pcibios_disable_irq(dev); } -struct pci_bus *pci_scan_bus_with_sysdata(int busno) +struct pci_bus *__devinit pci_scan_bus_with_sysdata(int busno) { struct pci_bus *bus = NULL; struct pci_sysdata *sd; -- cgit v1.2.3 From e43eb7bab6e82e1aa93ce4d39546c54347a68077 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 14 Feb 2008 08:38:49 +0100 Subject: x86: exclude vsyscall files from stackprotect Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/Makefile | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 76ec0f8f138..4eb5ce84110 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -6,7 +6,15 @@ extra-y := head_$(BITS).o init_task.o vmlinux.lds extra-$(CONFIG_X86_64) += head64.o CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) -CFLAGS_vsyscall_64.o := $(PROFILING) -g0 + +# +# vsyscalls (which work on the user stack) should have +# no stack-protector checks: +# +nostackp := $(call cc-option, -fno-stack-protector) +CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) +CFLAGS_hpet.o := $(nostackp) +CFLAGS_tsc_64.o := $(nostackp) obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o obj-y += traps_$(BITS).o irq_$(BITS).o -- cgit v1.2.3 From 4b44f810166fb83ad1a817ee599006a7157ee54c Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Sun, 17 Feb 2008 14:41:16 +0100 Subject: x86: coding style fixes in arch/x86/lib/io_64.c This simple patch makes the file error free (according to checkpatch.pl) no code changed: arch/x86/lib/io_64.o: text data bss dec hex filename 308 0 0 308 134 io_64.o.before 308 0 0 308 134 io_64.o.after md5: 3c64f9ed83d091678e849b36ca27bee3 io_64.o.before.asm 3c64f9ed83d091678e849b36ca27bee3 io_64.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/lib/io_64.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/lib/io_64.c b/arch/x86/lib/io_64.c index 87b4a4e1803..3f1eb59b5f0 100644 --- a/arch/x86/lib/io_64.c +++ b/arch/x86/lib/io_64.c @@ -1,23 +1,25 @@ #include -#include #include +#include -void __memcpy_toio(unsigned long dst,const void*src,unsigned len) +void __memcpy_toio(unsigned long dst, const void *src, unsigned len) { - __inline_memcpy((void *) dst,src,len); + __inline_memcpy((void *)dst, src, len); } EXPORT_SYMBOL(__memcpy_toio); -void __memcpy_fromio(void *dst,unsigned long src,unsigned len) +void __memcpy_fromio(void *dst, unsigned long src, unsigned len) { - __inline_memcpy(dst,(const void *) src,len); + __inline_memcpy(dst, (const void *)src, len); } EXPORT_SYMBOL(__memcpy_fromio); void memset_io(volatile void __iomem *a, int b, size_t c) { - /* XXX: memset can mangle the IO patterns quite a bit. - perhaps it would be better to use a dumb one */ - memset((void *)a,b,c); + /* + * TODO: memset can mangle the IO patterns quite a bit. + * perhaps it would be better to use a dumb one: + */ + memset((void *)a, b, c); } EXPORT_SYMBOL(memset_io); -- cgit v1.2.3 From 0df025b709ae09081e21545761a249ec2d969689 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Sun, 17 Feb 2008 14:56:50 +0100 Subject: x86: coding style fixes in arch/x86/lib/csum-wrappers_64.c no code changed: arch/x86/lib/csum-wrappers_64.o: text data bss dec hex filename 839 0 0 839 347 csum-wrappers_64.o.before 839 0 0 839 347 csum-wrappers_64.o.after md5: b31994226c33e0b52bef5a0e110b84b0 csum-wrappers_64.o.before.asm b31994226c33e0b52bef5a0e110b84b0 csum-wrappers_64.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/lib/csum-wrappers_64.c | 80 ++++++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 40 deletions(-) (limited to 'arch') diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index fd42a4a095f..95e45dcc5a2 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -1,117 +1,117 @@ /* Copyright 2002,2003 Andi Kleen, SuSE Labs. * Subject to the GNU Public License v.2 - * + * * Wrappers of assembly checksum functions for x86-64. */ #include #include -/** - * csum_partial_copy_from_user - Copy and checksum from user space. - * @src: source address (user space) +/** + * csum_partial_copy_from_user - Copy and checksum from user space. + * @src: source address (user space) * @dst: destination address * @len: number of bytes to be copied. * @isum: initial sum that is added into the result (32bit unfolded) * @errp: set to -EFAULT for an bad source address. - * + * * Returns an 32bit unfolded checksum of the buffer. - * src and dst are best aligned to 64bits. - */ + * src and dst are best aligned to 64bits. + */ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum isum, int *errp) -{ +{ might_sleep(); *errp = 0; - if (likely(access_ok(VERIFY_READ,src, len))) { + if (likely(access_ok(VERIFY_READ, src, len))) { /* Why 6, not 7? To handle odd addresses aligned we would need to do considerable complications to fix the checksum which is defined as an 16bit accumulator. The fix alignment code is primarily for performance compatibility with 32bit and that will handle odd addresses slowly too. */ - if (unlikely((unsigned long)src & 6)) { - while (((unsigned long)src & 6) && len >= 2) { - __u16 val16; + if (unlikely((unsigned long)src & 6)) { + while (((unsigned long)src & 6) && len >= 2) { + __u16 val16; *errp = __get_user(val16, (const __u16 __user *)src); if (*errp) return isum; *(__u16 *)dst = val16; isum = (__force __wsum)add32_with_carry( (__force unsigned)isum, val16); - src += 2; - dst += 2; + src += 2; + dst += 2; len -= 2; } } isum = csum_partial_copy_generic((__force const void *)src, dst, len, isum, errp, NULL); - if (likely(*errp == 0)) + if (likely(*errp == 0)) return isum; - } + } *errp = -EFAULT; - memset(dst,0,len); - return isum; -} + memset(dst, 0, len); + return isum; +} EXPORT_SYMBOL(csum_partial_copy_from_user); -/** - * csum_partial_copy_to_user - Copy and checksum to user space. +/** + * csum_partial_copy_to_user - Copy and checksum to user space. * @src: source address * @dst: destination address (user space) * @len: number of bytes to be copied. * @isum: initial sum that is added into the result (32bit unfolded) * @errp: set to -EFAULT for an bad destination address. - * + * * Returns an 32bit unfolded checksum of the buffer. * src and dst are best aligned to 64bits. - */ + */ __wsum csum_partial_copy_to_user(const void *src, void __user *dst, int len, __wsum isum, int *errp) -{ +{ might_sleep(); if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { *errp = -EFAULT; - return 0; + return 0; } if (unlikely((unsigned long)dst & 6)) { - while (((unsigned long)dst & 6) && len >= 2) { + while (((unsigned long)dst & 6) && len >= 2) { __u16 val16 = *(__u16 *)src; isum = (__force __wsum)add32_with_carry( (__force unsigned)isum, val16); *errp = __put_user(val16, (__u16 __user *)dst); if (*errp) return isum; - src += 2; - dst += 2; + src += 2; + dst += 2; len -= 2; } } *errp = 0; - return csum_partial_copy_generic(src, (void __force *)dst,len,isum,NULL,errp); -} + return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp); +} EXPORT_SYMBOL(csum_partial_copy_to_user); -/** +/** * csum_partial_copy_nocheck - Copy and checksum. * @src: source address * @dst: destination address * @len: number of bytes to be copied. * @isum: initial sum that is added into the result (32bit unfolded) - * + * * Returns an 32bit unfolded checksum of the buffer. - */ + */ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) -{ - return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); -} +{ + return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); +} EXPORT_SYMBOL(csum_partial_copy_nocheck); __sum16 csum_ipv6_magic(const struct in6_addr *saddr, @@ -119,16 +119,16 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, __u32 len, unsigned short proto, __wsum sum) { __u64 rest, sum64; - + rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + (__force __u64)sum; asm(" addq (%[saddr]),%[sum]\n" " adcq 8(%[saddr]),%[sum]\n" - " adcq (%[daddr]),%[sum]\n" + " adcq (%[daddr]),%[sum]\n" " adcq 8(%[daddr]),%[sum]\n" " adcq $0,%[sum]\n" - : [sum] "=r" (sum64) - : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr)); + : [sum] "=r" (sum64) + : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); } -- cgit v1.2.3 From d76c1ae4d1f4f322d47e7c6e47a277384ba9d9cb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 Feb 2008 16:48:25 +0100 Subject: x86: clean up csum-wrappers_64.c some more no code changed: arch/x86/lib/csum-wrappers_64.o: text data bss dec hex filename 839 0 0 839 347 csum-wrappers_64.o.before 839 0 0 839 347 csum-wrappers_64.o.after md5: b31994226c33e0b52bef5a0e110b84b0 csum-wrappers_64.o.before.asm b31994226c33e0b52bef5a0e110b84b0 csum-wrappers_64.o.after.asm Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/lib/csum-wrappers_64.c | 87 ++++++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 95e45dcc5a2..459b58a8a15 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -1,9 +1,9 @@ -/* Copyright 2002,2003 Andi Kleen, SuSE Labs. +/* + * Copyright 2002, 2003 Andi Kleen, SuSE Labs. * Subject to the GNU Public License v.2 * * Wrappers of assembly checksum functions for x86-64. */ - #include #include @@ -24,37 +24,47 @@ csum_partial_copy_from_user(const void __user *src, void *dst, { might_sleep(); *errp = 0; - if (likely(access_ok(VERIFY_READ, src, len))) { - /* Why 6, not 7? To handle odd addresses aligned we - would need to do considerable complications to fix the - checksum which is defined as an 16bit accumulator. The - fix alignment code is primarily for performance - compatibility with 32bit and that will handle odd - addresses slowly too. */ - if (unlikely((unsigned long)src & 6)) { - while (((unsigned long)src & 6) && len >= 2) { - __u16 val16; - *errp = __get_user(val16, (const __u16 __user *)src); - if (*errp) - return isum; - *(__u16 *)dst = val16; - isum = (__force __wsum)add32_with_carry( - (__force unsigned)isum, val16); - src += 2; - dst += 2; - len -= 2; - } + + if (!likely(access_ok(VERIFY_READ, src, len))) + goto out_err; + + /* + * Why 6, not 7? To handle odd addresses aligned we + * would need to do considerable complications to fix the + * checksum which is defined as an 16bit accumulator. The + * fix alignment code is primarily for performance + * compatibility with 32bit and that will handle odd + * addresses slowly too. + */ + if (unlikely((unsigned long)src & 6)) { + while (((unsigned long)src & 6) && len >= 2) { + __u16 val16; + + *errp = __get_user(val16, (const __u16 __user *)src); + if (*errp) + return isum; + + *(__u16 *)dst = val16; + isum = (__force __wsum)add32_with_carry( + (__force unsigned)isum, val16); + src += 2; + dst += 2; + len -= 2; } - isum = csum_partial_copy_generic((__force const void *)src, - dst, len, isum, errp, NULL); - if (likely(*errp == 0)) - return isum; } + isum = csum_partial_copy_generic((__force const void *)src, + dst, len, isum, errp, NULL); + if (unlikely(*errp)) + goto out_err; + + return isum; + +out_err: *errp = -EFAULT; memset(dst, 0, len); + return isum; } - EXPORT_SYMBOL(csum_partial_copy_from_user); /** @@ -73,6 +83,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst, int len, __wsum isum, int *errp) { might_sleep(); + if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { *errp = -EFAULT; return 0; @@ -81,6 +92,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst, if (unlikely((unsigned long)dst & 6)) { while (((unsigned long)dst & 6) && len >= 2) { __u16 val16 = *(__u16 *)src; + isum = (__force __wsum)add32_with_carry( (__force unsigned)isum, val16); *errp = __put_user(val16, (__u16 __user *)dst); @@ -93,9 +105,9 @@ csum_partial_copy_to_user(const void *src, void __user *dst, } *errp = 0; - return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp); + return csum_partial_copy_generic(src, (void __force *)dst, + len, isum, NULL, errp); } - EXPORT_SYMBOL(csum_partial_copy_to_user); /** @@ -122,14 +134,17 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + (__force __u64)sum; - asm(" addq (%[saddr]),%[sum]\n" - " adcq 8(%[saddr]),%[sum]\n" - " adcq (%[daddr]),%[sum]\n" - " adcq 8(%[daddr]),%[sum]\n" - " adcq $0,%[sum]\n" + + asm(" addq (%[saddr]),%[sum]\n" + " adcq 8(%[saddr]),%[sum]\n" + " adcq (%[daddr]),%[sum]\n" + " adcq 8(%[daddr]),%[sum]\n" + " adcq $0,%[sum]\n" + : [sum] "=r" (sum64) : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); - return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); -} + return csum_fold( + (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); +} EXPORT_SYMBOL(csum_ipv6_magic); -- cgit v1.2.3 From d8ff0bbf564f7ebf6c33ef6662d8f00c7d43ba80 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Fri, 1 Feb 2008 21:31:51 +0100 Subject: x86: fix printout ugliness in cpu info printk fix print_cpu_info, because it produced on boot: CPU: <6>AMD Athlon(tm) 64 Processor 3200+ stepping 00 instead of: CPU: AMD Athlon(tm) 64 Processor 3200+ stepping 00 (broken since 04e1ba852132c9ad006affcd5b8c8606295170b0 - x86: cleanup kernel/setup_64.c) Signed-off-by: Marcin Slusarz Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/setup_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index e1866172deb..6fd804f0782 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -1046,7 +1046,7 @@ __setup("noclflush", setup_noclflush); void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) { if (c->x86_model_id[0]) - printk(KERN_INFO "%s", c->x86_model_id); + printk(KERN_CONT "%s", c->x86_model_id); if (c->x86_mask || c->cpuid_level >= 0) printk(KERN_CONT " stepping %02x\n", c->x86_mask); -- cgit v1.2.3 From bbb1e57a1c0b732cfeb727bed7c61e80a79c6479 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sat, 9 Feb 2008 00:49:13 +0300 Subject: x86 cleanup: suspend_asm_64.S - use X86_CR4_PGE instead of numeric value By including we're allowed to use X86_CR4_PGE instead of numeric constant. md5 sums of compiled files are differ due to this inclusion but .text section remains the same. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/power/hibernate_asm_64.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 1deb3244b99..000415947d9 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -20,6 +20,7 @@ #include #include #include +#include ENTRY(swsusp_arch_suspend) movq $saved_context, %rax @@ -60,7 +61,7 @@ ENTRY(restore_image) /* Flush TLB */ movq mmu_cr4_features(%rip), %rax movq %rax, %rdx - andq $~(1<<7), %rdx # PGE + andq $~(X86_CR4_PGE), %rdx movq %rdx, %cr4; # turn off PGE movq %cr3, %rcx; # flush TLB movq %rcx, %cr3; @@ -112,7 +113,7 @@ ENTRY(restore_registers) /* Flush TLB, including "global" things (vmalloc) */ movq mmu_cr4_features(%rip), %rax movq %rax, %rdx - andq $~(1<<7), %rdx; # PGE + andq $~(X86_CR4_PGE), %rdx movq %rdx, %cr4; # turn off PGE movq %cr3, %rcx; # flush TLB movq %rcx, %cr3 -- cgit v1.2.3 From 3cdac41f2090ad9013dfefab7399b1debfb9275a Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sun, 17 Feb 2008 18:17:17 +0300 Subject: x86: lds - Use PAGE_SIZE instead of numeric constant It's much better to use PAGE_SIZE then magic 4096 (though it's almost synonym in most cases on x86 but not for *all* cases ;) Signed-off-by: Cyrill Gorcunov Acked-by: Sam Ravnborg Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/vmlinux_32.lds.S | 26 +++++++++++++------------- arch/x86/kernel/vmlinux_64.lds.S | 28 ++++++++++++++-------------- 2 files changed, 27 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index f1148ac8abe..2ffa9656fe7 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S @@ -38,7 +38,7 @@ SECTIONS /* read-only */ .text : AT(ADDR(.text) - LOAD_OFFSET) { - . = ALIGN(4096); /* not really needed, already page aligned */ + . = ALIGN(PAGE_SIZE); /* not really needed, already page aligned */ *(.text.page_aligned) TEXT_TEXT SCHED_TEXT @@ -70,21 +70,21 @@ SECTIONS RODATA /* writeable */ - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ DATA_DATA CONSTRUCTORS } :data - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { __nosave_begin = .; *(.data.nosave) - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __nosave_end = .; } - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { *(.data.page_aligned) *(.data.idt) @@ -108,7 +108,7 @@ SECTIONS } /* might get freed after init */ - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { __smp_locks = .; *(.smp_locks) @@ -120,10 +120,10 @@ SECTIONS * after boot. Always make sure that ALIGN() directive is present after * the section which contains __smp_alt_end. */ - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); /* will be freed after init */ - . = ALIGN(4096); /* Init code and data */ + . = ALIGN(PAGE_SIZE); /* Init code and data */ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { __init_begin = .; _sinittext = .; @@ -174,23 +174,23 @@ SECTIONS EXIT_DATA } #if defined(CONFIG_BLK_DEV_INITRD) - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { __initramfs_start = .; *(.init.ramfs) __initramfs_end = .; } #endif - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { __per_cpu_start = .; *(.data.percpu) *(.data.percpu.shared_aligned) __per_cpu_end = .; } - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); /* freed after init ends here */ - + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __init_end = .; __bss_start = .; /* BSS */ @@ -200,7 +200,7 @@ SECTIONS __bss_stop = .; _end = . ; /* This is where the kernel creates the early boot page tables */ - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); pg0 = . ; } diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 0992b9946c6..5e9ec1b33c1 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -37,7 +37,7 @@ SECTIONS KPROBES_TEXT *(.fixup) *(.gnu.warning) - _etext = .; /* End of text section */ + _etext = .; /* End of text section */ } :text = 0x9090 . = ALIGN(16); /* Exception table */ @@ -60,7 +60,7 @@ SECTIONS __tracedata_end = .; } - . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ + . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { DATA_DATA @@ -119,7 +119,7 @@ SECTIONS .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) } - . = VSYSCALL_VIRT_ADDR + 4096; + . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; #undef VSYSCALL_ADDR #undef VSYSCALL_PHYS_ADDR @@ -134,23 +134,23 @@ SECTIONS *(.data.init_task) }:data.init - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { *(.data.page_aligned) } /* might get freed after init */ - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __smp_alt_begin = .; __smp_locks = .; .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { *(.smp_locks) } __smp_locks_end = .; - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __smp_alt_end = .; - . = ALIGN(4096); /* Init code and data */ + . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; @@ -191,7 +191,7 @@ SECTIONS .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { *(.altinstructions) } - __alt_instructions_end = .; + __alt_instructions_end = .; .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { *(.altinstr_replacement) } @@ -207,25 +207,25 @@ SECTIONS /* vdso blob that is mapped into user space */ vdso_start = . ; .vdso : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) } - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); vdso_end = .; #ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __initramfs_start = .; .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } __initramfs_end = .; #endif - PERCPU(4096) + PERCPU(PAGE_SIZE) - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __init_end = .; - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __nosave_begin = .; .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __nosave_end = .; __bss_start = .; /* BSS */ -- cgit v1.2.3 From 85c42d0dbb129b6bff560dacabec753677d64081 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sun, 17 Feb 2008 18:17:18 +0300 Subject: x86: lds - Use THREAD_SIZE instead of numeric constant Though we use PDA for regular task stack but that is not acceptable for init_task wich is special one. We still have to allocate init_task's stack in that manner. Signed-off-by: Cyrill Gorcunov Acked-by: Sam Ravnborg Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/vmlinux_64.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 5e9ec1b33c1..fab13229973 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -129,7 +129,7 @@ SECTIONS #undef VVIRT_OFFSET #undef VVIRT - . = ALIGN(8192); /* init_task */ + . = ALIGN(THREAD_SIZE); /* init_task */ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { *(.data.init_task) }:data.init -- cgit v1.2.3 From fd59e9e9c8e35cd2a1834c0d1f67aedf0c5c68c2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 Feb 2008 20:20:24 +0100 Subject: x86: change IO delay back to 0x80 change back the IO delay to 0x80. Alan says that 0xed is known to break some older boxes, and given that the get-rid-of-outb-APIs efforts are well underway we should just let them be finished. Signed-off-by: Ingo Molnar Acked-by: Alan Cox Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 864affc9a7b..702eb39901c 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -156,7 +156,7 @@ config IO_DELAY_TYPE_NONE choice prompt "IO delay type" - default IO_DELAY_0XED + default IO_DELAY_0X80 config IO_DELAY_0X80 bool "port 0x80 based port-IO delay [recommended]" -- cgit v1.2.3 From 3f85d63ea4ff922f6abdb509f4aaf6993b3273a3 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sat, 16 Feb 2008 12:36:10 +0100 Subject: x86: fix vdso_install breaks user "make install" I suggest to make the vdso_install step independent as in following patch. This solves the issue at ahnd and still gives us the posibility to install the files should they be needed. Signed-off-by: Sam Ravnborg Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/powerpc/Makefile | 2 +- arch/x86/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 6845482f009..1c6ce3536e4 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -176,7 +176,7 @@ define archhelp @echo ' *_defconfig - Select default config from arch/$(ARCH)/configs' endef -install: vdso_install +install: $(Q)$(MAKE) $(build)=$(boot) install vdso_install: diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 204af43535c..f1e739a43d4 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -229,7 +229,7 @@ zdisk bzdisk: vmlinux fdimage fdimage144 fdimage288 isoimage: vmlinux $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ -install: vdso_install +install: $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install PHONY += vdso_install -- cgit v1.2.3 From 7c6357da1185d286adaa4452d829ac9b27c4d12f Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 18 Feb 2008 00:59:54 +0200 Subject: x86: i8259A: remove redundant irq_descinitialization Remove redundant irq_desc[NR_IRQS] element initialization in init_ISA_irqs(). irq_desc[NR_IRQS] is already statically initialized with the same values in kernel/irq/handle.c . besides the clean-up value this also saves some space: text data bss dec hex filename 1389 356 14 1759 6df i8259_32.o.before 1325 356 14 1695 69f i8259_32.o.after Signed-off-by: Ahmed S. Darwish Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/i8259_32.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c index 2d25b77102f..fe631967d62 100644 --- a/arch/x86/kernel/i8259_32.c +++ b/arch/x86/kernel/i8259_32.c @@ -26,8 +26,6 @@ * present in the majority of PC/AT boxes. * plus some generic x86 specific things if generic specifics makes * any sense at all. - * this file should become arch/i386/kernel/irq.c when the old irq.c - * moves to arch independent land */ static int i8259A_auto_eoi; @@ -362,23 +360,12 @@ void __init init_ISA_irqs (void) #endif init_8259A(0); - for (i = 0; i < NR_IRQS; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = NULL; - irq_desc[i].depth = 1; - - if (i < 16) { - /* - * 16 old-style INTA-cycle interrupts: - */ - set_irq_chip_and_handler_name(i, &i8259A_chip, - handle_level_irq, "XT"); - } else { - /* - * 'high' PCI IRQs filled in on demand - */ - irq_desc[i].chip = &no_irq_chip; - } + /* + * 16 old-style INTA-cycle interrupts: + */ + for (i = 0; i < 16; i++) { + set_irq_chip_and_handler_name(i, &i8259A_chip, + handle_level_irq, "XT"); } } -- cgit v1.2.3 From d8a9e6a51ec58486f850e3606e3fcb86b5b7da41 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 18 Feb 2008 09:54:33 -0800 Subject: x86: fix WARN_ON() message: teach page_is_ram() about the special 4Kb bios data page This patch teaches page_is_ram() about the fact that the first 4Kb of memory are special on x86, even though the E820 table normally doesn't exclude it. This fixes the WARN_ON() reported by Laurent Riffard who was also very helpful in diagnosing the issue. [ mingo@elte.hu: we are working on doing this properly in the e820 space, but for 2.6.25 this is the better fix. ] Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/ioremap.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 9f42d7e9c15..7fb6eff644b 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -42,6 +42,15 @@ int page_is_ram(unsigned long pagenr) unsigned long addr, end; int i; + /* + * A special case is the first 4Kb of memory; + * This is a BIOS owned area, not kernel ram, but generally + * not listed as such in the E820 table. + */ + if (pagenr == 0) + return 0; + + for (i = 0; i < e820.nr_map; i++) { /* * Not usable memory: -- cgit v1.2.3 From 156fbc3fbe4ab640297b1ae2092821363840aeb6 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 18 Feb 2008 09:58:45 -0800 Subject: x86: fix page_is_ram() thinko page_is_ram() has a special case for the 640k-1M bios area, however due to a thinko the special case checks the e820 table entry and not the memory the user has asked for. This patch fixes the bug. [ mingo@elte.hu: this too is better solved in the e820 space, but those fixes are too intrusive for v2.6.25. ] Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/ioremap.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 7fb6eff644b..f4c95aec5ac 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -50,6 +50,13 @@ int page_is_ram(unsigned long pagenr) if (pagenr == 0) return 0; + /* + * Second special case: Some BIOSen report the PC BIOS + * area (640->1Mb) as ram even though it is not. + */ + if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) && + pagenr < (BIOS_END >> PAGE_SHIFT)) + return 0; for (i = 0; i < e820.nr_map; i++) { /* @@ -60,14 +67,6 @@ int page_is_ram(unsigned long pagenr) addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; - /* - * Sanity check: Some BIOSen report areas as RAM that - * are not. Notably the 640->1Mb area, which is the - * PCI BIOS area. - */ - if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) && - end < (BIOS_END >> PAGE_SHIFT)) - continue; if ((pagenr >= addr) && (pagenr < end)) return 1; -- cgit v1.2.3