From 4c7f8900f1d8a0e464e7092f132a7e93f7c20f2f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 23 Feb 2008 07:06:55 +0100 Subject: x86: stackprotector & PARAVIRT fix on paravirt enabled 64-bit kernels the paravirt ops do function calls themselves - which is bad with the stackprotector - for example pda_init() loads 0 into %gs and then does MSR_GS_BASE write (which modifies gs.base) - but that MSR write is a function call on paravirt, which with stackprotector tries to read the stack canary from the PDA ... crashing the bootup. the solution was suggested by Arjan van de Ven: to exclude paravirt.c from stackprotector, too many lowlevel functionality is in it. It's not like we'll have paravirt functions with character arrays on their stack anyway... Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5e618c3b472..8161e5a91ca 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -14,6 +14,7 @@ nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) CFLAGS_hpet.o := $(nostackp) CFLAGS_tsc_64.o := $(nostackp) +CFLAGS_paravirt.o := $(nostackp) obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o obj-y += traps_$(BITS).o irq_$(BITS).o -- cgit v1.2.3 From e00320875d0cc5f8099a7227b2f25fbb3231268d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 14 Feb 2008 08:48:23 +0100 Subject: x86: fix stackprotector canary updates during context switches fix a bug noticed and fixed by pageexec@freemail.hu. if built with -fstack-protector-all then we'll have canary checks built into the __switch_to() function. That does not work well with the canary-switching code there: while we already use the %rsp of the new task, we still call __switch_to() whith the previous task's canary value in the PDA, hence the __switch_to() ssp prologue instructions will store the previous canary. Then we update the PDA and upon return from __switch_to() the canary check triggers and we panic. so update the canary after we have called __switch_to(), where we are at the same stackframe level as the last stackframe of the next (and now freshly current) task. Note: this means that we call __switch_to() [and its sub-functions] still with the old canary, but that is not a problem, both the previous and the next task has a high-quality canary. The only (mostly academic) disadvantage is that the canary of one task may leak onto the stack of another task, increasing the risk of information leaks, were an attacker able to read the stack of specific tasks (but not that of others). To solve this we'll have to reorganize the way we switch tasks, and move the PDA setting into the switch_to() assembly code. That will happen in another patch. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process_64.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index e2319f39988..d8640388039 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -640,7 +640,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) write_pda(kernelstack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); #ifdef CONFIG_CC_STACKPROTECTOR - write_pda(stack_canary, next_p->stack_canary); /* * Build time only check to make sure the stack_canary is at * offset 40 in the pda; this is a gcc ABI requirement -- cgit v1.2.3 From ce22bd92cba0958e052cb1ce0f89f1d3a02b60a7 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 12 May 2008 15:44:31 +0200 Subject: x86: setup stack canary for the idle threads The idle threads for non-boot CPUs are a bit special in how they are created; the result is that these don't have the stack canary set up properly in their PDA. Easiest fix is to just always set the PDA up correctly when entering the idle thread; this is a NOP for the boot cpu. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process_64.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index d8640388039..9e69e223023 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -146,6 +146,15 @@ static inline void play_dead(void) void cpu_idle(void) { current_thread_info()->status |= TS_POLLING; + +#ifdef CONFIG_CC_STACKPROTECTOR + /* + * If we're the non-boot CPU, nothing set the PDA stack + * canary up for us. This is as good a place as any for + * doing that. + */ + write_pda(stack_canary, current->stack_canary); +#endif /* endless idle loop with no priority at all */ while (1) { tick_nohz_stop_sched_tick(); -- cgit v1.2.3 From 7e09b2a02dae4616a5a26000169964b32f86cd35 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 14 Feb 2008 09:22:34 +0100 Subject: x86: fix canary of the boot CPU's idle task the boot CPU's idle task has a zero stackprotector canary value. this is a special task that is never forked, so the fork code does not randomize its canary. Do it when we hit cpu_idle(). Academic sidenote: this means that the early init code runs with a zero canary and hence the canary becomes predictable for this short, boot-only amount of time. Although attack vectors against early init code are very rare, it might make sense to move this initialization to an earlier point. (to one of the early init functions that never return - such as start_kernel()) Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process_64.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9e69e223023..d4c7ac7aa43 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -150,9 +150,13 @@ void cpu_idle(void) #ifdef CONFIG_CC_STACKPROTECTOR /* * If we're the non-boot CPU, nothing set the PDA stack - * canary up for us. This is as good a place as any for - * doing that. + * canary up for us - and if we are the boot CPU we have + * a 0 stack canary. This is a good place for updating + * it, as we wont ever return from this function (so the + * invalid canaries already on the stack wont ever + * trigger): */ + current->stack_canary = get_random_int(); write_pda(stack_canary, current->stack_canary); #endif /* endless idle loop with no priority at all */ -- cgit v1.2.3 From 72370f2a5b227bd3817593a6b15ea3f53f51dfcb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 13 Feb 2008 16:15:34 +0100 Subject: x86: if stackprotector is enabled, thn use stack-protector-all by default also enable the rodata and nx tests. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig | 3 ++- arch/x86/Kconfig.debug | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dcbec34154c..83d8392c133 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1142,7 +1142,7 @@ config SECCOMP config CC_STACKPROTECTOR bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - depends on X86_64 && EXPERIMENTAL && BROKEN + depends on X86_64 help This option turns on the -fstack-protector GCC feature. This feature puts, at the beginning of critical functions, a canary @@ -1159,6 +1159,7 @@ config CC_STACKPROTECTOR config CC_STACKPROTECTOR_ALL bool "Use stack-protector for all functions" depends on CC_STACKPROTECTOR + default y help Normally, GCC only inserts the canary value protection for functions that use large-ish on-stack buffers. By enabling diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index ac1e31ba479..b5c5b55d043 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -102,6 +102,7 @@ config DIRECT_GBPAGES config DEBUG_RODATA_TEST bool "Testcase for the DEBUG_RODATA feature" depends on DEBUG_RODATA + default y help This option enables a testcase for the DEBUG_RODATA feature as well as for the change_page_attr() infrastructure. -- cgit v1.2.3 From 18aa8bb12dcb10adc3d7c9d69714d53667c0ab7f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 14 Feb 2008 09:42:02 +0100 Subject: stackprotector: add boot_init_stack_canary() add the boot_init_stack_canary() and make the secondary idle threads use it. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process_64.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index d4c7ac7aa43..5107cb214c7 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -147,7 +147,6 @@ void cpu_idle(void) { current_thread_info()->status |= TS_POLLING; -#ifdef CONFIG_CC_STACKPROTECTOR /* * If we're the non-boot CPU, nothing set the PDA stack * canary up for us - and if we are the boot CPU we have @@ -156,9 +155,8 @@ void cpu_idle(void) * invalid canaries already on the stack wont ever * trigger): */ - current->stack_canary = get_random_int(); - write_pda(stack_canary, current->stack_canary); -#endif + boot_init_stack_canary(); + /* endless idle loop with no priority at all */ while (1) { tick_nohz_stop_sched_tick(); -- cgit v1.2.3 From 420594296838fdc9a674470d710cda7d1487f9f4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 14 Feb 2008 09:44:08 +0100 Subject: x86: fix the stackprotector canary of the boot CPU Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process_64.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 5107cb214c7..cce47f7fbf2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -16,6 +16,7 @@ #include +#include #include #include #include -- cgit v1.2.3 From 113c5413cf9051cc50b88befdc42e3402bb92115 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 14 Feb 2008 10:36:03 +0100 Subject: x86: unify stackprotector features streamline the stackprotector features under a single option and make the stronger feature the one accessible. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 83d8392c133..0cd1695c24f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1140,13 +1140,17 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. +config CC_STACKPROTECTOR_ALL + bool + config CC_STACKPROTECTOR bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" depends on X86_64 + select CC_STACKPROTECTOR_ALL help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of critical functions, a canary - value on the stack just before the return address, and validates + This option turns on the -fstack-protector GCC feature. This + feature puts, at the beginning of functions, a canary value on + the stack just before the return address, and validates the value just before actually returning. Stack based buffer overflows (that need to overwrite this return address) now also overwrite the canary, which gets detected and the attack is then @@ -1154,16 +1158,8 @@ config CC_STACKPROTECTOR This feature requires gcc version 4.2 or above, or a distribution gcc with the feature backported. Older versions are automatically - detected and for those versions, this configuration option is ignored. - -config CC_STACKPROTECTOR_ALL - bool "Use stack-protector for all functions" - depends on CC_STACKPROTECTOR - default y - help - Normally, GCC only inserts the canary value protection for - functions that use large-ish on-stack buffers. By enabling - this option, GCC will be asked to do this for ALL functions. + detected and for those versions, this configuration option is + ignored. (and a warning is printed during bootup) source kernel/Kconfig.hz -- cgit v1.2.3 From b40a4392a3c262e0d1b5379b4e142a8eefa63439 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Fri, 18 Apr 2008 06:16:45 -0700 Subject: stackprotector: turn not having the right gcc into a #warning If the user selects the stack-protector config option, but does not have a gcc that has the right bits enabled (for example because it isn't build with a glibc that supports TLS, as is common for cross-compilers, but also because it may be too old), then the runtime test fails right now. This patch adds a warning message for this scenario. This warning accomplishes two goals 1) the user is informed that the security option he selective isn't available 2) the user is suggested to turn of the CONFIG option that won't work for him, and would make the runtime test fail anyway. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 3cff3c894cf..c3e0eeeb1dd 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -73,7 +73,7 @@ else stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \ - "$(CC)" -fstack-protector ) + "$(CC)" "-fstack-protector -DGCC_HAS_SP" ) stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \ "$(CC)" -fstack-protector-all ) -- cgit v1.2.3 From 7c9f8861e6c9c839f913e49b98c3854daca18f27 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Tue, 22 Apr 2008 16:38:23 -0500 Subject: stackprotector: use canary at end of stack to indicate overruns at oops time (Updated with a common max-stack-used checker that knows about the canary, as suggested by Joe Perches) Use a canary at the end of the stack to clearly indicate at oops time whether the stack has ever overflowed. This is a very simple implementation with a couple of drawbacks: 1) a thread may legitimately use exactly up to the last word on the stack -- but the chances of doing this and then oopsing later seem slim 2) it's possible that the stack usage isn't dense enough that the canary location could get skipped over -- but the worst that happens is that we don't flag the overrun -- though this happens fairly often in my testing :( With the code in place, an intentionally-bloated stack oops might do: BUG: unable to handle kernel paging request at ffff8103f84cc680 IP: [] update_curr+0x9a/0xa8 PGD 8063 PUD 0 Thread overran stack or stack corrupted Oops: 0000 [1] SMP CPU 0 ... ... unless the stack overrun is so bad that it corrupts some other thread. Signed-off-by: Eric Sandeen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/fault.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index fd7e1798c75..1f524df68b9 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -581,6 +582,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) unsigned long address; int write, si_code; int fault; + unsigned long *stackend; + #ifdef CONFIG_X86_64 unsigned long flags; #endif @@ -850,6 +853,10 @@ no_context: show_fault_oops(regs, error_code, address); + stackend = end_of_stack(tsk); + if (*stackend != STACK_END_MAGIC) + printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); + tsk->thread.cr2 = address; tsk->thread.trap_no = 14; tsk->thread.error_code = error_code; -- cgit v1.2.3 From ae03c49964af5033534e518eebe439c3b90f43a7 Mon Sep 17 00:00:00 2001 From: Alain Knaff Date: Sun, 4 Jan 2009 22:46:17 +0100 Subject: bzip2/lzma: x86 kernel compression support Impact: Replaces x86 kernel decompressor with new code This is the third part of the bzip2/lzma patch The bzip patch is based on an idea by Christian Ludwig, includes support for compressing the kernel with bzip2 or lzma rather than gzip. Both compressors give smaller sizes than gzip. Lzma's decompresses faster than bzip2. It also supports ramdisks and initramfs' compressed using these two compressors. The functionality has been successfully used for a couple of years by the udpcast project This version applies to "tip" kernel 2.6.28 This part contains: - support for new bzip2 and lzma kernel compression for x86 Signed-off-by: Alain Knaff Signed-off-by: H. Peter Anvin --- arch/x86/boot/compressed/Makefile | 21 ++++++- arch/x86/boot/compressed/misc.c | 118 +++++--------------------------------- arch/x86/include/asm/boot.h | 12 +++- 3 files changed, 44 insertions(+), 107 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 1771c804e02..3ca4c194b8e 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -47,18 +47,35 @@ ifeq ($(CONFIG_X86_32),y) ifdef CONFIG_RELOCATABLE $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE + $(call if_changed,bzip2) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE + $(call if_changed,lzma) else $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) endif LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T else + $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T endif -$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE +suffix_$(CONFIG_KERNEL_GZIP) = gz +suffix_$(CONFIG_KERNEL_BZIP2) = bz2 +suffix_$(CONFIG_KERNEL_LZMA) = lzma + +$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index da062216948..e45be73684f 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -116,71 +116,13 @@ /* * gzip declarations */ - -#define OF(args) args #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset((s), 0, (n)) -typedef unsigned char uch; -typedef unsigned short ush; -typedef unsigned long ulg; - -/* - * Window size must be at least 32k, and a power of two. - * We don't actually have a window just a huge output buffer, - * so we report a 2G window size, as that should always be - * larger than our output buffer: - */ -#define WSIZE 0x80000000 - -/* Input buffer: */ -static unsigned char *inbuf; - -/* Sliding window buffer (and final output buffer): */ -static unsigned char *window; - -/* Valid bytes in inbuf: */ -static unsigned insize; - -/* Index of next byte to be processed in inbuf: */ -static unsigned inptr; - -/* Bytes in output buffer: */ -static unsigned outcnt; - -/* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ -#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAM 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ -#define RESERVED 0xC0 /* bit 6, 7: reserved */ - -#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) - -/* Diagnostic functions */ -#ifdef DEBUG -# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0) -# define Trace(x) do { fprintf x; } while (0) -# define Tracev(x) do { if (verbose) fprintf x ; } while (0) -# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0) -# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0) -# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0) -#else -# define Assert(cond, msg) -# define Trace(x) -# define Tracev(x) -# define Tracevv(x) -# define Tracec(c, x) -# define Tracecv(c, x) -#endif -static int fill_inbuf(void); -static void flush_window(void); static void error(char *m); /* @@ -189,13 +131,8 @@ static void error(char *m); static struct boot_params *real_mode; /* Pointer to real-mode data */ static int quiet; -extern unsigned char input_data[]; -extern int input_len; - -static long bytes_out; - static void *memset(void *s, int c, unsigned n); -static void *memcpy(void *dest, const void *src, unsigned n); +void *memcpy(void *dest, const void *src, unsigned n); static void __putstr(int, const char *); #define putstr(__x) __putstr(0, __x) @@ -213,7 +150,17 @@ static char *vidmem; static int vidport; static int lines, cols; -#include "../../../../lib/inflate.c" +#ifdef CONFIG_KERNEL_GZIP +#include "../../../../lib/decompress_inflate.c" +#endif + +#ifdef CONFIG_KERNEL_BZIP2 +#include "../../../../lib/decompress_bunzip2.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif static void scroll(void) { @@ -282,7 +229,7 @@ static void *memset(void *s, int c, unsigned n) return s; } -static void *memcpy(void *dest, const void *src, unsigned n) +void *memcpy(void *dest, const void *src, unsigned n) { int i; const char *s = src; @@ -293,38 +240,6 @@ static void *memcpy(void *dest, const void *src, unsigned n) return dest; } -/* =========================================================================== - * Fill the input buffer. This is called only when the buffer is empty - * and at least one byte is really needed. - */ -static int fill_inbuf(void) -{ - error("ran out of input data"); - return 0; -} - -/* =========================================================================== - * Write the output window window[0..outcnt-1] and update crc and bytes_out. - * (Used for the decompressed data only.) - */ -static void flush_window(void) -{ - /* With my window equal to my output buffer - * I only need to compute the crc here. - */ - unsigned long c = crc; /* temporary variable */ - unsigned n; - unsigned char *in, ch; - - in = window; - for (n = 0; n < outcnt; n++) { - ch = *in++; - c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); - } - crc = c; - bytes_out += (unsigned long)outcnt; - outcnt = 0; -} static void error(char *x) { @@ -407,12 +322,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, lines = real_mode->screen_info.orig_video_lines; cols = real_mode->screen_info.orig_video_cols; - window = output; /* Output buffer (Normally at 1M) */ free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; - inbuf = input_data; /* Input buffer */ - insize = input_len; - inptr = 0; #ifdef CONFIG_X86_64 if ((unsigned long)output & (__KERNEL_ALIGN - 1)) @@ -430,10 +341,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, #endif #endif - makecrc(); if (!quiet) putstr("\nDecompressing Linux... "); - gunzip(); + decompress(input_data, input_len, NULL, NULL, output, NULL, error); parse_elf(output); if (!quiet) putstr("done.\nBooting the kernel.\n"); diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index dd61616cb73..c0e8e68a31f 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -15,11 +15,21 @@ + (CONFIG_PHYSICAL_ALIGN - 1)) \ & ~(CONFIG_PHYSICAL_ALIGN - 1)) +#if (defined CONFIG_KERNEL_BZIP2) +#define BOOT_HEAP_SIZE 0x400000 +#else + #ifdef CONFIG_X86_64 #define BOOT_HEAP_SIZE 0x7000 -#define BOOT_STACK_SIZE 0x4000 #else #define BOOT_HEAP_SIZE 0x4000 +#endif + +#endif + +#ifdef CONFIG_X86_64 +#define BOOT_STACK_SIZE 0x4000 +#else #define BOOT_STACK_SIZE 0x1000 #endif -- cgit v1.2.3 From 2e9f3bddcbc711bb14d86c6f068a779bf3710247 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Sun, 4 Jan 2009 15:41:25 -0800 Subject: bzip2/lzma: make config machinery an arch configurable Impact: Bug fix (we should not show this menu on irrelevant architectures) Make the config machinery to drive the gzip/bzip2/lzma selection dependent on the architecture advertising HAVE_KERNEL_* so that we don't display this for architectures where it doesn't matter. Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 862adb9bf0d..7b66c34d0aa 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -39,6 +39,9 @@ config X86 select HAVE_GENERIC_DMA_COHERENT if X86_32 select HAVE_EFFICIENT_UNALIGNED_ACCESS select USER_STACKTRACE_SUPPORT + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_LZMA config ARCH_DEFCONFIG string -- cgit v1.2.3 From 7760ec77ab2a9e48bdd0d13341446a8a51f0b9f1 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 7 Jan 2009 18:10:13 +0530 Subject: x86: smp.h remove obsolete function declaration Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/smp.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 830b9fcb642..83a4cc07431 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -22,7 +22,6 @@ extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; extern cpumask_t cpu_callin_map; -extern void (*mtrr_hook)(void); extern void zap_low_mappings(void); extern int __cpuinit get_local_pda(int cpu); -- cgit v1.2.3 From dacf7333571d770366bff74d10b56aa545434605 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 7 Jan 2009 17:26:35 +0530 Subject: x86: smp.h move zap_low_mappings declartion to tlbflush.h Impact: cleanup, moving NON-SMP stuff from smp.h Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/smp.h | 2 -- arch/x86/include/asm/tlbflush.h | 2 ++ arch/x86/mm/init_32.c | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 83a4cc07431..64c9e848f13 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -22,8 +22,6 @@ extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; extern cpumask_t cpu_callin_map; -extern void zap_low_mappings(void); - extern int __cpuinit get_local_pda(int cpu); extern int smp_num_siblings; diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 0e7bbb54911..aed0b700b83 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -175,4 +175,6 @@ static inline void flush_tlb_kernel_range(unsigned long start, flush_tlb_all(); } +extern void zap_low_mappings(void); + #endif /* _ASM_X86_TLBFLUSH_H */ diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index f99a6c6c432..a9dd0b7ad61 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -49,7 +49,6 @@ #include #include #include -#include unsigned int __VMALLOC_RESERVE = 128 << 20; -- cgit v1.2.3 From 6e5385d44b2df05e50a8d07ba0e14d3e32685237 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 7 Jan 2009 18:11:35 +0530 Subject: x86: smp.h move prefill_possible_map declartion to cpu.h Impact: cleanup, moving NON-SMP stuff from smp.h Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpu.h | 10 ++++++++++ arch/x86/include/asm/smp.h | 6 ------ arch/x86/kernel/setup.c | 2 +- arch/x86/kernel/smpboot.c | 1 - 4 files changed, 11 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index bae482df603..29aa6d0752b 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -7,6 +7,16 @@ #include #include +#ifdef CONFIG_SMP + +extern void prefill_possible_map(void); + +#else /* CONFIG_SMP */ + +static inline void prefill_possible_map(void) {} + +#endif /* CONFIG_SMP */ + struct x86_cpu { struct cpu cpu; }; diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 64c9e848f13..62bd3f68269 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -138,8 +138,6 @@ void play_dead_common(void); void native_send_call_func_ipi(const struct cpumask *mask); void native_send_call_func_single_ipi(int cpu); -extern void prefill_possible_map(void); - void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) @@ -148,10 +146,6 @@ static inline int num_booting_cpus(void) { return cpus_weight(cpu_callout_map); } -#else -static inline void prefill_possible_map(void) -{ -} #endif /* CONFIG_SMP */ extern unsigned disabled_cpus __cpuinitdata; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ae0d8042cf6..f41c4486c27 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -89,7 +89,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 07576bee03e..f8c885bed18 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -53,7 +53,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From f472cdba849cc3d838f3788469316e8572463a8c Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 7 Jan 2009 21:34:25 +0530 Subject: x86: smp.h move stack_processor_id declartion to cpu.h Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpu.h | 2 ++ arch/x86/include/asm/smp.h | 1 - arch/x86/kernel/cpu/common.c | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 29aa6d0752b..f958e7e49c0 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -15,6 +15,8 @@ extern void prefill_possible_map(void); static inline void prefill_possible_map(void) {} +#define stack_smp_processor_id() 0 + #endif /* CONFIG_SMP */ struct x86_cpu { diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 62bd3f68269..ed4af9a89cf 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -173,7 +173,6 @@ extern int safe_smp_processor_id(void); #else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid #define safe_smp_processor_id() 0 -#define stack_smp_processor_id() 0 #endif #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3f95a40f718..f7619a2eaff 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -21,6 +21,7 @@ #include #include #include +#include #ifdef CONFIG_X86_LOCAL_APIC #include #include -- cgit v1.2.3 From 96b89dc6598a50e3aac8e2c6d826ae3795b7d030 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 7 Jan 2009 21:35:48 +0530 Subject: x86: smp.h move safe_smp_processor_id declartion to cpu.h Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpu.h | 1 + arch/x86/include/asm/smp.h | 1 - arch/x86/kernel/crash.c | 2 +- arch/x86/kernel/reboot.c | 1 + arch/x86/mach-voyager/setup.c | 1 + 5 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index f958e7e49c0..4c16888ffa3 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -15,6 +15,7 @@ extern void prefill_possible_map(void); static inline void prefill_possible_map(void) {} +#define safe_smp_processor_id() 0 #define stack_smp_processor_id() 0 #endif /* CONFIG_SMP */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index ed4af9a89cf..c92b93594ab 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -172,7 +172,6 @@ extern int safe_smp_processor_id(void); #else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid -#define safe_smp_processor_id() 0 #endif #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index c689d19e35a..11b93cabdf7 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2b46eb41643..f8536fee5c1 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -14,6 +14,7 @@ #include #include #include +#include #ifdef CONFIG_X86_32 # include diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index a580b9562e7..0ade62555ff 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c @@ -9,6 +9,7 @@ #include #include #include +#include void __init pre_intr_init_hook(void) { -- cgit v1.2.3 From af8968abf09fe5984bdd206e54e0eeb1dc1fa29c Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 7 Jan 2009 21:37:33 +0530 Subject: x86: smp.h move cpu_physical_id declartion to cpu.h Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpu.h | 1 + arch/x86/include/asm/smp.h | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 4c16888ffa3..89edafb9339 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -15,6 +15,7 @@ extern void prefill_possible_map(void); static inline void prefill_possible_map(void) {} +#define cpu_physical_id(cpu) boot_cpu_physical_apicid #define safe_smp_processor_id() 0 #define stack_smp_processor_id() 0 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index c92b93594ab..c975b6f83c6 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -170,8 +170,6 @@ extern int safe_smp_processor_id(void); }) #define safe_smp_processor_id() smp_processor_id() -#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ -#define cpu_physical_id(cpu) boot_cpu_physical_apicid #endif #ifdef CONFIG_X86_LOCAL_APIC -- cgit v1.2.3 From 6d652ea1d056390a0c33db92b44ed219284b71af Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 7 Jan 2009 21:38:59 +0530 Subject: x86: smp.h move boot_cpu_id declartion to cpu.h Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpu.h | 7 +++++++ arch/x86/include/asm/smp.h | 6 ------ arch/x86/kernel/io_apic.c | 1 + 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 89edafb9339..f03b23e3286 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -31,4 +31,11 @@ extern void arch_unregister_cpu(int); #endif DECLARE_PER_CPU(int, cpu_state); + +#ifdef CONFIG_X86_HAS_BOOT_CPU_ID +extern unsigned char boot_cpu_id; +#else +#define boot_cpu_id 0 +#endif + #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index c975b6f83c6..74ad9ef6ae0 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -212,11 +212,5 @@ static inline int hard_smp_processor_id(void) #endif /* CONFIG_X86_LOCAL_APIC */ -#ifdef CONFIG_X86_HAS_BOOT_CPU_ID -extern unsigned char boot_cpu_id; -#else -#define boot_cpu_id 0 -#endif - #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_SMP_H */ diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1c4a1302536..109c91db202 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 41401db698cbb5d1869776bf336881db267e7d19 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Thu, 8 Jan 2009 15:42:46 +0530 Subject: x86: rename intel_mp_floating to mpf_intel Impact: cleanup, solve 80 columns wrap problems intel_mp_floating should be renamed to mpf_intel. The reason: the 'f' in MPF already means 'floating' which means MP Floating pointer structure - no need to repeat that in the type name. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 3 ++- arch/x86/kernel/mpparse.c | 12 ++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index 59568bc4767..187dc920193 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -24,7 +24,8 @@ # endif #endif -struct intel_mp_floating { +/* Intel MP Floating Pointer Structure */ +struct mpf_intel { char mpf_signature[4]; /* "_MP_" */ unsigned int mpf_physptr; /* Configuration table address */ unsigned char mpf_length; /* Our length (paragraphs) */ diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index c0601c2848a..6cea941c4db 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -569,14 +569,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) } } -static struct intel_mp_floating *mpf_found; +static struct mpf_intel *mpf_found; /* * Scan the memory blocks for an SMP configuration block. */ static void __init __get_smp_config(unsigned int early) { - struct intel_mp_floating *mpf = mpf_found; + struct mpf_intel *mpf = mpf_found; if (!mpf) return; @@ -687,14 +687,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, unsigned reserve) { unsigned int *bp = phys_to_virt(base); - struct intel_mp_floating *mpf; + struct mpf_intel *mpf; apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", bp, length); BUILD_BUG_ON(sizeof(*mpf) != 16); while (length > 0) { - mpf = (struct intel_mp_floating *)bp; + mpf = (struct mpf_intel *)bp; if ((*bp == SMP_MAGIC_IDENT) && (mpf->mpf_length == 1) && !mpf_checksum((unsigned char *)bp, 16) && @@ -1000,7 +1000,7 @@ static int __init update_mp_table(void) { char str[16]; char oem[10]; - struct intel_mp_floating *mpf; + struct mpf_intel *mpf; struct mpc_table *mpc, *mpc_new; if (!enable_update_mptable) @@ -1052,7 +1052,7 @@ static int __init update_mp_table(void) mpc = mpc_new; /* check if we can modify that */ if (mpc_new_phys - mpf->mpf_physptr) { - struct intel_mp_floating *mpf_new; + struct mpf_intel *mpf_new; /* steal 16 bytes from [0, 1k) */ printk(KERN_INFO "mpf new: %x\n", 0x400 - 16); mpf_new = phys_to_virt(0x400 - 16); -- cgit v1.2.3 From 1eb1b3b65dc3e3ffcc6a60e115c085c0c11c1077 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Thu, 8 Jan 2009 15:43:26 +0530 Subject: x86: rename all fields of mpf_intel mpf_X to X Impact: cleanup, solve 80 columns wrap problems It would be cleaner to rename all the mpf->mpf_X fields to mpf->X - that alone would give 4 characters per usage site. (we already know that it's an 'mpf' entity - no need to duplicate that in the field too) Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 20 ++++++++-------- arch/x86/kernel/mpparse.c | 50 +++++++++++++++++++-------------------- 2 files changed, 35 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index 187dc920193..4a7f96d7c18 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -26,16 +26,16 @@ /* Intel MP Floating Pointer Structure */ struct mpf_intel { - char mpf_signature[4]; /* "_MP_" */ - unsigned int mpf_physptr; /* Configuration table address */ - unsigned char mpf_length; /* Our length (paragraphs) */ - unsigned char mpf_specification;/* Specification version */ - unsigned char mpf_checksum; /* Checksum (makes sum 0) */ - unsigned char mpf_feature1; /* Standard or configuration ? */ - unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ - unsigned char mpf_feature3; /* Unused (0) */ - unsigned char mpf_feature4; /* Unused (0) */ - unsigned char mpf_feature5; /* Unused (0) */ + char signature[4]; /* "_MP_" */ + unsigned int physptr; /* Configuration table address */ + unsigned char length; /* Our length (paragraphs) */ + unsigned char specification; /* Specification version */ + unsigned char checksum; /* Checksum (makes sum 0) */ + unsigned char feature1; /* Standard or configuration ? */ + unsigned char feature2; /* Bit7 set for IMCR|PIC */ + unsigned char feature3; /* Unused (0) */ + unsigned char feature4; /* Unused (0) */ + unsigned char feature5; /* Unused (0) */ }; #define MPC_SIGNATURE "PCMP" diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 6cea941c4db..8385d4e7e15 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -597,9 +597,9 @@ static void __init __get_smp_config(unsigned int early) } printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", - mpf->mpf_specification); + mpf->specification); #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) - if (mpf->mpf_feature2 & (1 << 7)) { + if (mpf->feature2 & (1 << 7)) { printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); pic_mode = 1; } else { @@ -610,7 +610,7 @@ static void __init __get_smp_config(unsigned int early) /* * Now see if we need to read further. */ - if (mpf->mpf_feature1 != 0) { + if (mpf->feature1 != 0) { if (early) { /* * local APIC has default address @@ -620,16 +620,16 @@ static void __init __get_smp_config(unsigned int early) } printk(KERN_INFO "Default MP configuration #%d\n", - mpf->mpf_feature1); - construct_default_ISA_mptable(mpf->mpf_feature1); + mpf->feature1); + construct_default_ISA_mptable(mpf->feature1); - } else if (mpf->mpf_physptr) { + } else if (mpf->physptr) { /* * Read the physical hardware table. Anything here will * override the defaults. */ - if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { + if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) { #ifdef CONFIG_X86_LOCAL_APIC smp_found_config = 0; #endif @@ -696,10 +696,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, while (length > 0) { mpf = (struct mpf_intel *)bp; if ((*bp == SMP_MAGIC_IDENT) && - (mpf->mpf_length == 1) && + (mpf->length == 1) && !mpf_checksum((unsigned char *)bp, 16) && - ((mpf->mpf_specification == 1) - || (mpf->mpf_specification == 4))) { + ((mpf->specification == 1) + || (mpf->specification == 4))) { #ifdef CONFIG_X86_LOCAL_APIC smp_found_config = 1; #endif @@ -712,7 +712,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, return 1; reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE, BOOTMEM_DEFAULT); - if (mpf->mpf_physptr) { + if (mpf->physptr) { unsigned long size = PAGE_SIZE; #ifdef CONFIG_X86_32 /* @@ -721,14 +721,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, * the bottom is mapped now. * PC-9800's MPC table places on the very last * of physical memory; so that simply reserving - * PAGE_SIZE from mpg->mpf_physptr yields BUG() + * PAGE_SIZE from mpf->physptr yields BUG() * in reserve_bootmem. */ unsigned long end = max_low_pfn * PAGE_SIZE; - if (mpf->mpf_physptr + size > end) - size = end - mpf->mpf_physptr; + if (mpf->physptr + size > end) + size = end - mpf->physptr; #endif - reserve_bootmem_generic(mpf->mpf_physptr, size, + reserve_bootmem_generic(mpf->physptr, size, BOOTMEM_DEFAULT); } @@ -1013,19 +1013,19 @@ static int __init update_mp_table(void) /* * Now see if we need to go further. */ - if (mpf->mpf_feature1 != 0) + if (mpf->feature1 != 0) return 0; - if (!mpf->mpf_physptr) + if (!mpf->physptr) return 0; - mpc = phys_to_virt(mpf->mpf_physptr); + mpc = phys_to_virt(mpf->physptr); if (!smp_check_mpc(mpc, oem, str)) return 0; printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); - printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); + printk(KERN_INFO "physptr: %x\n", mpf->physptr); if (mpc_new_phys && mpc->length > mpc_new_length) { mpc_new_phys = 0; @@ -1046,23 +1046,23 @@ static int __init update_mp_table(void) } printk(KERN_INFO "use in-positon replacing\n"); } else { - mpf->mpf_physptr = mpc_new_phys; + mpf->physptr = mpc_new_phys; mpc_new = phys_to_virt(mpc_new_phys); memcpy(mpc_new, mpc, mpc->length); mpc = mpc_new; /* check if we can modify that */ - if (mpc_new_phys - mpf->mpf_physptr) { + if (mpc_new_phys - mpf->physptr) { struct mpf_intel *mpf_new; /* steal 16 bytes from [0, 1k) */ printk(KERN_INFO "mpf new: %x\n", 0x400 - 16); mpf_new = phys_to_virt(0x400 - 16); memcpy(mpf_new, mpf, 16); mpf = mpf_new; - mpf->mpf_physptr = mpc_new_phys; + mpf->physptr = mpc_new_phys; } - mpf->mpf_checksum = 0; - mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16); - printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr); + mpf->checksum = 0; + mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16); + printk(KERN_INFO "physptr new: %x\n", mpf->physptr); } /* -- cgit v1.2.3 From 068790334cececc3d2d945617ccc585477da2e38 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 10 Jan 2009 12:17:37 +0530 Subject: x86: smp.h move cpu_callin_mask and cpu_callin_map declartion to cpumask.h Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpumask.h | 19 +++++++++++++++++++ arch/x86/include/asm/smp.h | 3 --- arch/x86/kernel/cpu/common.c | 1 + arch/x86/kernel/setup_percpu.c | 1 + arch/x86/kernel/smpboot.c | 1 + 5 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 arch/x86/include/asm/cpumask.h (limited to 'arch') diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h new file mode 100644 index 00000000000..308dddd5632 --- /dev/null +++ b/arch/x86/include/asm/cpumask.h @@ -0,0 +1,19 @@ +#ifndef _ASM_X86_CPUMASK_H +#define _ASM_X86_CPUMASK_H +#ifndef __ASSEMBLY__ +#include + +#ifdef CONFIG_X86_64 + +extern cpumask_var_t cpu_callin_mask; + +#else /* CONFIG_X86_32 */ + +extern cpumask_t cpu_callin_map; + +#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) + +#endif /* CONFIG_X86_32 */ + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_X86_CPUMASK_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 1963e27673c..c35aa5c0dd1 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -20,19 +20,16 @@ #ifdef CONFIG_X86_64 -extern cpumask_var_t cpu_callin_mask; extern cpumask_var_t cpu_callout_mask; extern cpumask_var_t cpu_initialized_mask; extern cpumask_var_t cpu_sibling_setup_mask; #else /* CONFIG_X86_32 */ -extern cpumask_t cpu_callin_map; extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; extern cpumask_t cpu_sibling_setup_map; -#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) #define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) #define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) #define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 14e543b6fd4..f0025846244 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -22,6 +22,7 @@ #include #include #include +#include #ifdef CONFIG_X86_LOCAL_APIC #include #include diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 55c46074eba..bf63de72b64 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -13,6 +13,7 @@ #include #include #include +#include #ifdef CONFIG_X86_LOCAL_APIC unsigned int num_processors; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6c2b8444b83..84ac1cf46d8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From fb8fd077fbf0de6662acfd240e8e6b25cf3202ca Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 10 Jan 2009 12:20:24 +0530 Subject: x86: smp.h move cpu_callout_mask and cpu_callout_map declartion to cpumask.h Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpumask.h | 3 +++ arch/x86/include/asm/smp.h | 4 +--- arch/x86/kernel/smpboot.c | 1 - 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h index 308dddd5632..9933fcad3c8 100644 --- a/arch/x86/include/asm/cpumask.h +++ b/arch/x86/include/asm/cpumask.h @@ -6,12 +6,15 @@ #ifdef CONFIG_X86_64 extern cpumask_var_t cpu_callin_mask; +extern cpumask_var_t cpu_callout_mask; #else /* CONFIG_X86_32 */ extern cpumask_t cpu_callin_map; +extern cpumask_t cpu_callout_map; #define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) +#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index c35aa5c0dd1..a3afec5cad0 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -17,20 +17,18 @@ #endif #include #include +#include #ifdef CONFIG_X86_64 -extern cpumask_var_t cpu_callout_mask; extern cpumask_var_t cpu_initialized_mask; extern cpumask_var_t cpu_sibling_setup_mask; #else /* CONFIG_X86_32 */ -extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; extern cpumask_t cpu_sibling_setup_map; -#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) #define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) #define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 84ac1cf46d8..6c2b8444b83 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -55,7 +55,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 493f6ca54e1ea59732dd334e35c5fe2d8e440b06 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 10 Jan 2009 12:48:22 +0530 Subject: x86: smp.h move cpu_initialized_mask and cpu_initialized declartion to cpumask.h Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpumask.h | 3 +++ arch/x86/include/asm/smp.h | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h index 9933fcad3c8..d4cfd120b84 100644 --- a/arch/x86/include/asm/cpumask.h +++ b/arch/x86/include/asm/cpumask.h @@ -7,14 +7,17 @@ extern cpumask_var_t cpu_callin_mask; extern cpumask_var_t cpu_callout_mask; +extern cpumask_var_t cpu_initialized_mask; #else /* CONFIG_X86_32 */ extern cpumask_t cpu_callin_map; extern cpumask_t cpu_callout_map; +extern cpumask_t cpu_initialized; #define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) #define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) +#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index a3afec5cad0..7d2a80319e8 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -21,15 +21,12 @@ #ifdef CONFIG_X86_64 -extern cpumask_var_t cpu_initialized_mask; extern cpumask_var_t cpu_sibling_setup_mask; #else /* CONFIG_X86_32 */ -extern cpumask_t cpu_initialized; extern cpumask_t cpu_sibling_setup_map; -#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) #define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) #endif /* CONFIG_X86_32 */ -- cgit v1.2.3 From 52811d8c9beb67da6bc4b770de3c4134376788a1 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 10 Jan 2009 12:58:50 +0530 Subject: x86: smp.h move cpu_sibling_setup_mask and cpu_sibling_setup_map declartion to cpumask.h Impact: cleanup Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpumask.h | 3 +++ arch/x86/include/asm/smp.h | 12 ------------ 2 files changed, 3 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h index d4cfd120b84..26c6dad9047 100644 --- a/arch/x86/include/asm/cpumask.h +++ b/arch/x86/include/asm/cpumask.h @@ -8,16 +8,19 @@ extern cpumask_var_t cpu_callin_mask; extern cpumask_var_t cpu_callout_mask; extern cpumask_var_t cpu_initialized_mask; +extern cpumask_var_t cpu_sibling_setup_mask; #else /* CONFIG_X86_32 */ extern cpumask_t cpu_callin_map; extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; +extern cpumask_t cpu_sibling_setup_map; #define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) #define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) #define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) +#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 7d2a80319e8..a8cea7b0943 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -19,18 +19,6 @@ #include #include -#ifdef CONFIG_X86_64 - -extern cpumask_var_t cpu_sibling_setup_mask; - -#else /* CONFIG_X86_32 */ - -extern cpumask_t cpu_sibling_setup_map; - -#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) - -#endif /* CONFIG_X86_32 */ - extern int __cpuinit get_local_pda(int cpu); extern int smp_num_siblings; -- cgit v1.2.3 From 7106a5ab89c50c6b5aadea0850b40323804a922d Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Sat, 10 Jan 2009 23:00:22 -0500 Subject: x86-64: remove locked instruction from switch_to() Impact: micro-optimization The patch below removes an unnecessary locked instruction from switch_to(). TIF_FORK is only ever set in copy_thread() on initial process creation, and gets cleared during the first scheduling of the process. As such, it is safe to use an unlocked test for the flag within switch_to(). Signed-off-by: Benjamin LaHaise Signed-off-by: Ingo Molnar --- arch/x86/include/asm/system.h | 6 +++--- arch/x86/kernel/entry_64.S | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 8e626ea33a1..fa47b1e6a86 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -96,15 +96,15 @@ do { \ "thread_return:\n\t" \ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ - LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ - "jc ret_from_fork\n\t" \ + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ + "jnz ret_from_fork\n\t" \ RESTORE_CONTEXT \ : "=a" (last) \ : [next] "S" (next), [prev] "D" (prev), \ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)), \ - [tif_fork] "i" (TIF_FORK), \ + [_tif_fork] "i" (_TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, stack)), \ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ : "memory", "cc" __EXTRA_CLOBBER) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e28c7a98779..38dd37458e4 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -408,6 +408,8 @@ END(save_paranoid) ENTRY(ret_from_fork) DEFAULT_FRAME + LOCK ; btr $TIF_FORK,TI_flags(%r8) + push kernel_eflags(%rip) CFI_ADJUST_CFA_OFFSET 8 popf # reset kernel eflags -- cgit v1.2.3 From dee4102a9a5882b4f7d5cc165ba29e8cc63cf92e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 11 Jan 2009 00:29:15 -0800 Subject: sparseirq: use kstat_irqs_cpu instead Impact: build fix Ingo Molnar wrote: > tip/arch/blackfin/kernel/irqchip.c: In function 'show_interrupts': > tip/arch/blackfin/kernel/irqchip.c:85: error: 'struct kernel_stat' has no member named 'irqs' > make[2]: *** [arch/blackfin/kernel/irqchip.o] Error 1 > make[2]: *** Waiting for unfinished jobs.... > So could move kstat_irqs array to irq_desc struct. (s390, m68k, sparc) are not touched yet, because they don't support genirq Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/blackfin/kernel/irqchip.c | 2 +- arch/frv/kernel/irq.c | 2 +- arch/h8300/kernel/irq.c | 4 ++-- arch/ia64/kernel/irq.c | 2 +- arch/m32r/kernel/irq.c | 2 +- arch/mips/kernel/irq.c | 2 +- arch/mn10300/kernel/irq.c | 2 +- arch/parisc/kernel/irq.c | 2 +- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/platforms/cell/interrupt.c | 2 +- arch/sh/kernel/irq.c | 2 +- arch/um/kernel/irq.c | 2 +- arch/xtensa/kernel/irq.c | 2 +- 13 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index ab8209cbbad..a4e12d1b379 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -82,7 +82,7 @@ int show_interrupts(struct seq_file *p, void *v) goto skip; seq_printf(p, "%3d: ", i); for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, " %8s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index 73abae767fd..af3e824b91b 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -74,7 +74,7 @@ int show_interrupts(struct seq_file *p, void *v) if (action) { seq_printf(p, "%3d: ", i); for_each_present_cpu(cpu) - seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); seq_printf(p, " %s", action->name); for (action = action->next; diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index ef4f0047067..74f8dd7b34d 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c @@ -183,7 +183,7 @@ asmlinkage void do_IRQ(int irq) #if defined(CONFIG_PROC_FS) int show_interrupts(struct seq_file *p, void *v) { - int i = *(loff_t *) v, j; + int i = *(loff_t *) v; struct irqaction * action; unsigned long flags; @@ -196,7 +196,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!action) goto unlock; seq_printf(p, "%3d: ",i); - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, "-%-8s", irq_desc[i].name); seq_printf(p, " %s", action->name); diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index a58f64ca9f0..4f596613bff 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -80,7 +80,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) { - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); } #endif seq_printf(p, " %14s", irq_desc[i].chip->name); diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 2aeae467009..8dfd31e87c4 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -49,7 +49,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 4b4007b3083..98f336dc3cd 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -108,7 +108,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 56c64ccc9c2..50fdb5c16e0 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -221,7 +221,7 @@ int show_interrupts(struct seq_file *p, void *v) if (action) { seq_printf(p, "%3d: ", i); for_each_present_cpu(cpu) - seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); seq_printf(p, " %14s.%u", irq_desc[i].chip->name, (GxICR(i) & GxICR_LEVEL) >> GxICR_LEVEL_SHIFT); diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index ac2c822928c..704341b0b09 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -183,7 +183,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%3d: ", i); #ifdef CONFIG_SMP for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #else seq_printf(p, "%10u ", kstat_irqs(i)); #endif diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 23b8b5e36f9..17efb7118db 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -190,7 +190,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%3d: ", i); #ifdef CONFIG_SMP for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #else seq_printf(p, "%10u ", kstat_irqs(i)); #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 28c04dab263..1f0d774ad92 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -254,7 +254,7 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) goto out_eoi; } - kstat_cpu(cpu).irqs[irq]++; + kstat_incr_irqs_this_cpu(irq, desc); /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 64b7690c664..0080a1607aa 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) goto unlock; seq_printf(p, "%3d: ",i); for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, "-%-8s", irq_desc[i].name); seq_printf(p, " %s", action->name); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 3d7aad09b17..336b6156907 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -42,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 5fbcde59a92..f3b66fba5b8 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); -- cgit v1.2.3 From 7f7ace0cda64c99599c23785f8979a072e118058 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sat, 10 Jan 2009 21:58:08 -0800 Subject: cpumask: update irq_desc to use cpumask_var_t Impact: reduce memory usage, use new cpumask API. Replace the affinity and pending_masks with cpumask_var_t's. This adds to the significant size reduction done with the SPARSE_IRQS changes. The added functions (init_alloc_desc_masks & init_copy_desc_masks) are in the include file so they can be inlined (and optimized out for the !CONFIG_CPUMASKS_OFFSTACK case.) [Naming chosen to be consistent with the other init*irq functions, as well as the backwards arg declaration of "from, to" instead of the more common "to, from" standard.] Includes a slight change to the declaration of struct irq_desc to embed the pending_mask within ifdef(CONFIG_SMP) to be consistent with other references, and some small changes to Xen. Tested: sparse/non-sparse/cpumask_offstack/non-cpumask_offstack/nonuma/nosmp on x86_64 Signed-off-by: Mike Travis Cc: Chris Wright Cc: Jeremy Fitzhardinge Cc: KOSAKI Motohiro Cc: Venkatesh Pallipadi Cc: virtualization@lists.osdl.org Cc: xen-devel@lists.xensource.com Cc: Yinghai Lu --- arch/x86/kernel/io_apic.c | 20 ++++++++++---------- arch/x86/kernel/irq_32.c | 2 +- arch/x86/kernel/irq_64.c | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1c4a1302536..1337eab60ec 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -356,7 +356,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) if (!cfg->move_in_progress) { /* it means that domain is not changed */ - if (!cpumask_intersects(&desc->affinity, mask)) + if (!cpumask_intersects(desc->affinity, mask)) cfg->move_desc_pending = 1; } } @@ -579,9 +579,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) if (assign_irq_vector(irq, cfg, mask)) return BAD_APICID; - cpumask_and(&desc->affinity, cfg->domain, mask); + cpumask_and(desc->affinity, cfg->domain, mask); set_extra_move_desc(desc, mask); - return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); + return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask); } static void @@ -2383,7 +2383,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) if (cfg->move_in_progress) send_cleanup_vector(cfg); - cpumask_copy(&desc->affinity, mask); + cpumask_copy(desc->affinity, mask); } static int migrate_irq_remapped_level_desc(struct irq_desc *desc) @@ -2405,11 +2405,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) } /* everthing is clear. we have right of way */ - migrate_ioapic_irq_desc(desc, &desc->pending_mask); + migrate_ioapic_irq_desc(desc, desc->pending_mask); ret = 0; desc->status &= ~IRQ_MOVE_PENDING; - cpumask_clear(&desc->pending_mask); + cpumask_clear(desc->pending_mask); unmask: unmask_IO_APIC_irq_desc(desc); @@ -2434,7 +2434,7 @@ static void ir_irq_migration(struct work_struct *work) continue; } - desc->chip->set_affinity(irq, &desc->pending_mask); + desc->chip->set_affinity(irq, desc->pending_mask); spin_unlock_irqrestore(&desc->lock, flags); } } @@ -2448,7 +2448,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, { if (desc->status & IRQ_LEVEL) { desc->status |= IRQ_MOVE_PENDING; - cpumask_copy(&desc->pending_mask, mask); + cpumask_copy(desc->pending_mask, mask); migrate_irq_remapped_level_desc(desc); return; } @@ -2516,7 +2516,7 @@ static void irq_complete_move(struct irq_desc **descp) /* domain has not changed, but affinity did */ me = smp_processor_id(); - if (cpu_isset(me, desc->affinity)) { + if (cpumask_test_cpu(me, desc->affinity)) { *descp = desc = move_irq_desc(desc, me); /* get the new one */ cfg = desc->chip_data; @@ -4039,7 +4039,7 @@ void __init setup_ioapic_dest(void) */ if (desc->status & (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) - mask = &desc->affinity; + mask = desc->affinity; else mask = TARGET_CPUS; diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 74b9ff7341e..e0f29be8ab0 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -248,7 +248,7 @@ void fixup_irqs(void) if (irq == 2) continue; - affinity = &desc->affinity; + affinity = desc->affinity; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { printk("Breaking affinity for irq %i\n", irq); affinity = cpu_all_mask; diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 63c88e6ec02..0b21cb1ea11 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -100,7 +100,7 @@ void fixup_irqs(void) /* interrupt's are disabled at this point */ spin_lock(&desc->lock); - affinity = &desc->affinity; + affinity = desc->affinity; if (!irq_has_action(irq) || cpumask_equal(affinity, cpu_online_mask)) { spin_unlock(&desc->lock); -- cgit v1.2.3 From 4595f9620cda8a1e973588e743cf5f8436dd20c6 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 10 Jan 2009 21:58:09 -0800 Subject: x86: change flush_tlb_others to take a const struct cpumask Impact: reduce stack usage, use new cpumask API. This is made a little more tricky by uv_flush_tlb_others which actually alters its argument, for an IPI to be sent to the remaining cpus in the mask. I solve this by allocating a cpumask_var_t for this case and falling back to IPI should this fail. To eliminate temporaries in the caller, all flush_tlb_others implementations now do the this-cpu-elimination step themselves. Note also the curious "cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask)" which has been there since pre-git and yet f->flush_cpumask is always zero at this point. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/include/asm/paravirt.h | 8 +++-- arch/x86/include/asm/tlbflush.h | 8 ++--- arch/x86/include/asm/uv/uv_bau.h | 3 +- arch/x86/kernel/tlb_32.c | 67 +++++++++++++++++----------------------- arch/x86/kernel/tlb_64.c | 61 +++++++++++++++++++----------------- arch/x86/kernel/tlb_uv.c | 16 +++++----- arch/x86/xen/enlighten.c | 31 +++++++------------ 7 files changed, 93 insertions(+), 101 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ba3e2ff6aed..c26c6bf4da0 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -244,7 +244,8 @@ struct pv_mmu_ops { void (*flush_tlb_user)(void); void (*flush_tlb_kernel)(void); void (*flush_tlb_single)(unsigned long addr); - void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, + void (*flush_tlb_others)(const struct cpumask *cpus, + struct mm_struct *mm, unsigned long va); /* Hooks for allocating and freeing a pagetable top-level */ @@ -984,10 +985,11 @@ static inline void __flush_tlb_single(unsigned long addr) PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); } -static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, +static inline void flush_tlb_others(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va) { - PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); + PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va); } static inline int paravirt_pgd_alloc(struct mm_struct *mm) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 0e7bbb54911..f4e1b550ce6 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, __flush_tlb(); } -static inline void native_flush_tlb_others(const cpumask_t *cpumask, +static inline void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va) { @@ -142,8 +142,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_mm(vma->vm_mm); } -void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, - unsigned long va); +void native_flush_tlb_others(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va); #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 @@ -166,7 +166,7 @@ static inline void reset_lazy_tlbstate(void) #endif /* SMP */ #ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) +#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va) #endif static inline void flush_tlb_kernel_range(unsigned long start, diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 50423c7b56b..74e6393bfdd 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -325,7 +325,8 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) #define cpubit_isset(cpu, bau_local_cpumask) \ test_bit((cpu), (bau_local_cpumask).bits) -extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long); +extern int uv_flush_tlb_others(struct cpumask *, + struct mm_struct *, unsigned long); extern void uv_bau_message_intr1(void); extern void uv_bau_timeout_intr1(void); diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index ce505464224..ec53818f4e3 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -20,7 +20,7 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) * Optimizations Manfred Spraul */ -static cpumask_t flush_cpumask; +static cpumask_var_t flush_cpumask; static struct mm_struct *flush_mm; static unsigned long flush_va; static DEFINE_SPINLOCK(tlbstate_lock); @@ -92,7 +92,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs) cpu = get_cpu(); - if (!cpu_isset(cpu, flush_cpumask)) + if (!cpumask_test_cpu(cpu, flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the @@ -114,35 +114,22 @@ void smp_invalidate_interrupt(struct pt_regs *regs) } ack_APIC_irq(); smp_mb__before_clear_bit(); - cpu_clear(cpu, flush_cpumask); + cpumask_clear_cpu(cpu, flush_cpumask); smp_mb__after_clear_bit(); out: put_cpu_no_resched(); inc_irq_stat(irq_tlb_count); } -void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, - unsigned long va) +void native_flush_tlb_others(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va) { - cpumask_t cpumask = *cpumaskp; - /* - * A couple of (to be removed) sanity checks: - * - * - current CPU must not be in mask * - mask must exist :) */ - BUG_ON(cpus_empty(cpumask)); - BUG_ON(cpu_isset(smp_processor_id(), cpumask)); + BUG_ON(cpumask_empty(cpumask)); BUG_ON(!mm); -#ifdef CONFIG_HOTPLUG_CPU - /* If a CPU which we ran on has gone down, OK. */ - cpus_and(cpumask, cpumask, cpu_online_map); - if (unlikely(cpus_empty(cpumask))) - return; -#endif - /* * i'm not happy about this global shared spinlock in the * MM hot path, but we'll see how contended it is. @@ -150,9 +137,17 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, */ spin_lock(&tlbstate_lock); + cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id())); +#ifdef CONFIG_HOTPLUG_CPU + /* If a CPU which we ran on has gone down, OK. */ + cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask); + if (unlikely(cpumask_empty(flush_cpumask))) { + spin_unlock(&tlbstate_lock); + return; + } +#endif flush_mm = mm; flush_va = va; - cpus_or(flush_cpumask, cpumask, flush_cpumask); /* * Make the above memory operations globally visible before @@ -163,9 +158,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); + send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR); - while (!cpus_empty(flush_cpumask)) + while (!cpumask_empty(flush_cpumask)) /* nothing. lockup detection does not belong here */ cpu_relax(); @@ -177,25 +172,19 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; - cpumask_t cpu_mask; preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); local_flush_tlb(); - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); preempt_enable(); } void flush_tlb_mm(struct mm_struct *mm) { - cpumask_t cpu_mask; preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); if (current->active_mm == mm) { if (current->mm) @@ -203,8 +192,8 @@ void flush_tlb_mm(struct mm_struct *mm) else leave_mm(smp_processor_id()); } - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); preempt_enable(); } @@ -212,11 +201,8 @@ void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; - cpumask_t cpu_mask; preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); if (current->active_mm == mm) { if (current->mm) @@ -225,9 +211,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) leave_mm(smp_processor_id()); } - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, va); - + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, va); preempt_enable(); } EXPORT_SYMBOL(flush_tlb_page); @@ -254,3 +239,9 @@ void reset_lazy_tlbstate(void) per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; } +static int init_flush_cpumask(void) +{ + alloc_cpumask_var(&flush_cpumask, GFP_KERNEL); + return 0; +} +early_initcall(init_flush_cpumask); diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index f8be6f1d2e4..38836aef51b 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -43,10 +43,10 @@ union smp_flush_state { struct { - cpumask_t flush_cpumask; struct mm_struct *flush_mm; unsigned long flush_va; spinlock_t tlbstate_lock; + DECLARE_BITMAP(flush_cpumask, NR_CPUS); }; char pad[SMP_CACHE_BYTES]; } ____cacheline_aligned; @@ -131,7 +131,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &per_cpu(flush_state, sender); - if (!cpu_isset(cpu, f->flush_cpumask)) + if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) goto out; /* * This was a BUG() but until someone can quote me the @@ -153,19 +153,15 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) } out: ack_APIC_irq(); - cpu_clear(cpu, f->flush_cpumask); + cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); inc_irq_stat(irq_tlb_count); } -void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, - unsigned long va) +static void flush_tlb_others_ipi(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va) { int sender; union smp_flush_state *f; - cpumask_t cpumask = *cpumaskp; - - if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va)) - return; /* Caller has disabled preemption */ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; @@ -180,7 +176,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, f->flush_mm = mm; f->flush_va = va; - cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); + cpumask_andnot(to_cpumask(f->flush_cpumask), + cpumask, cpumask_of(smp_processor_id())); /* * Make the above memory operations globally visible before @@ -191,9 +188,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); + send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); - while (!cpus_empty(f->flush_cpumask)) + while (!cpumask_empty(to_cpumask(f->flush_cpumask))) cpu_relax(); f->flush_mm = NULL; @@ -201,6 +198,24 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, spin_unlock(&f->tlbstate_lock); } +void native_flush_tlb_others(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va) +{ + if (is_uv_system()) { + cpumask_var_t after_uv_flush; + + if (alloc_cpumask_var(&after_uv_flush, GFP_ATOMIC)) { + cpumask_andnot(after_uv_flush, + cpumask, cpumask_of(smp_processor_id())); + if (!uv_flush_tlb_others(after_uv_flush, mm, va)) + flush_tlb_others_ipi(after_uv_flush, mm, va); + free_cpumask_var(after_uv_flush); + return; + } + } + flush_tlb_others_ipi(cpumask, mm, va); +} + static int __cpuinit init_smp_flush(void) { int i; @@ -215,25 +230,18 @@ core_initcall(init_smp_flush); void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; - cpumask_t cpu_mask; preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); local_flush_tlb(); - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); preempt_enable(); } void flush_tlb_mm(struct mm_struct *mm) { - cpumask_t cpu_mask; - preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); if (current->active_mm == mm) { if (current->mm) @@ -241,8 +249,8 @@ void flush_tlb_mm(struct mm_struct *mm) else leave_mm(smp_processor_id()); } - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); preempt_enable(); } @@ -250,11 +258,8 @@ void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; - cpumask_t cpu_mask; preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); if (current->active_mm == mm) { if (current->mm) @@ -263,8 +268,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) leave_mm(smp_processor_id()); } - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, va); + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, va); preempt_enable(); } diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f885023167e..690dcf1a27d 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -212,11 +212,11 @@ static int uv_wait_completion(struct bau_desc *bau_desc, * The cpumaskp mask contains the cpus the broadcast was sent to. * * Returns 1 if all remote flushing was done. The mask is zeroed. - * Returns 0 if some remote flushing remains to be done. The mask is left - * unchanged. + * Returns 0 if some remote flushing remains to be done. The mask will have + * some bits still set. */ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, - cpumask_t *cpumaskp) + struct cpumask *cpumaskp) { int completion_status = 0; int right_shift; @@ -263,13 +263,13 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, * Success, so clear the remote cpu's from the mask so we don't * use the IPI method of shootdown on them. */ - for_each_cpu_mask(bit, *cpumaskp) { + for_each_cpu(bit, cpumaskp) { blade = uv_cpu_to_blade_id(bit); if (blade == this_blade) continue; - cpu_clear(bit, *cpumaskp); + cpumask_clear_cpu(bit, cpumaskp); } - if (!cpus_empty(*cpumaskp)) + if (!cpumask_empty(cpumaskp)) return 0; return 1; } @@ -296,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, * Returns 1 if all remote flushing was done. * Returns 0 if some remote flushing remains to be done. */ -int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, +int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm, unsigned long va) { int i; @@ -315,7 +315,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); i = 0; - for_each_cpu_mask(bit, *cpumaskp) { + for_each_cpu(bit, cpumaskp) { blade = uv_cpu_to_blade_id(bit); BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); if (blade == this_blade) { diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bea215230b2..965539ec425 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -634,35 +634,27 @@ static void xen_flush_tlb_single(unsigned long addr) preempt_enable(); } -static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, - unsigned long va) +static void xen_flush_tlb_others(const struct cpumask *cpus, + struct mm_struct *mm, unsigned long va) { struct { struct mmuext_op op; - cpumask_t mask; + DECLARE_BITMAP(mask, NR_CPUS); } *args; - cpumask_t cpumask = *cpus; struct multicall_space mcs; - /* - * A couple of (to be removed) sanity checks: - * - * - current CPU must not be in mask - * - mask must exist :) - */ - BUG_ON(cpus_empty(cpumask)); - BUG_ON(cpu_isset(smp_processor_id(), cpumask)); + BUG_ON(cpumask_empty(cpus)); BUG_ON(!mm); - /* If a CPU which we ran on has gone down, OK. */ - cpus_and(cpumask, cpumask, cpu_online_map); - if (cpus_empty(cpumask)) - return; - mcs = xen_mc_entry(sizeof(*args)); args = mcs.args; - args->mask = cpumask; - args->op.arg2.vcpumask = &args->mask; + args->op.arg2.vcpumask = to_cpumask(args->mask); + + /* Remove us, and any offline CPUS. */ + cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); + if (unlikely(cpumask_empty(to_cpumask(args->mask)))) + goto issue; if (va == TLB_FLUSH_ALL) { args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; @@ -673,6 +665,7 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); +issue: xen_mc_issue(PARAVIRT_LAZY_MMU); } -- cgit v1.2.3 From 0e21990ae7ee11af94f44f240b06e520cf1505d4 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sat, 10 Jan 2009 21:58:10 -0800 Subject: SGI UV cpumask: use static temp cpumask in flush_tlb Impact: Improve tlb flush performance for UV Calling alloc_cpumask_var a zillion times a second does affect performance. Replace with static cpumask. Note: when CONFIG_X86_UV is defined, this extra PER_CPU memory will be optimized out for non-UV configs as is_uv_system() will then return a constant 0. Signed-off-by: Mike Travis --- arch/x86/kernel/tlb_64.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 38836aef51b..7a3f9891302 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -202,16 +202,17 @@ void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va) { if (is_uv_system()) { - cpumask_var_t after_uv_flush; - - if (alloc_cpumask_var(&after_uv_flush, GFP_ATOMIC)) { - cpumask_andnot(after_uv_flush, - cpumask, cpumask_of(smp_processor_id())); - if (!uv_flush_tlb_others(after_uv_flush, mm, va)) - flush_tlb_others_ipi(after_uv_flush, mm, va); - free_cpumask_var(after_uv_flush); - return; - } + /* FIXME: could be an percpu_alloc'd thing */ + static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + struct cpumask *after_uv_flush = &get_cpu_var(flush_tlb_mask); + + cpumask_andnot(after_uv_flush, cpumask, + cpumask_of(smp_processor_id())); + if (!uv_flush_tlb_others(after_uv_flush, mm, va)) + flush_tlb_others_ipi(after_uv_flush, mm, va); + + put_cpu_var(flush_tlb_uv_cpumask); + return; } flush_tlb_others_ipi(cpumask, mm, va); } -- cgit v1.2.3 From a1c33bbeb7061f3ed39103c385844474eaa8f921 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sat, 10 Jan 2009 21:58:10 -0800 Subject: x86: cleanup remaining cpumask_t code in mce_amd_64.c Impact: Reduce memory usage, use new cpumask API. Use cpumask_var_t for 'cpus' cpumask in struct threshold_bank and update remaining old cpumask_t functions to new cpumask API. Signed-off-by: Mike Travis --- arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 8ae8c4ff094..4772e91e824 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c @@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = { struct threshold_bank { struct kobject *kobj; struct threshold_block *blocks; - cpumask_t cpus; + cpumask_var_t cpus; }; static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); @@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) #ifdef CONFIG_SMP if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ - i = first_cpu(per_cpu(cpu_core_map, cpu)); + i = cpumask_first(&per_cpu(cpu_core_map, cpu)); /* first core not up yet */ if (cpu_data(i).cpu_core_id) @@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) if (err) goto out; - b->cpus = per_cpu(cpu_core_map, cpu); + cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); per_cpu(threshold_banks, cpu)[bank] = b; goto out; } @@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) err = -ENOMEM; goto out; } + if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { + kfree(b); + err = -ENOMEM; + goto out; + } b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); if (!b->kobj) goto out_free; #ifndef CONFIG_SMP - b->cpus = CPU_MASK_ALL; + cpumask_setall(b->cpus); #else - b->cpus = per_cpu(cpu_core_map, cpu); + cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); #endif per_cpu(threshold_banks, cpu)[bank] = b; @@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) if (err) goto out_free; - for_each_cpu_mask_nr(i, b->cpus) { + for_each_cpu(i, b->cpus) { if (i == cpu) continue; @@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) out_free: per_cpu(threshold_banks, cpu)[bank] = NULL; + free_cpumask_var(b->cpus); kfree(b); out: return err; @@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) #endif /* remove all sibling symlinks before unregistering */ - for_each_cpu_mask_nr(i, b->cpus) { + for_each_cpu(i, b->cpus) { if (i == cpu) continue; @@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) free_out: kobject_del(b->kobj); kobject_put(b->kobj); + free_cpumask_var(b->cpus); kfree(b); per_cpu(threshold_banks, cpu)[bank] = NULL; } -- cgit v1.2.3 From f9b90566cd46e19f670a1e60a717ff243f060a8a Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sat, 10 Jan 2009 21:58:10 -0800 Subject: x86: reduce stack usage in init_intel_cacheinfo Impact: reduce stack usage. init_intel_cacheinfo() does not use the cpumask so define a subset of struct _cpuid4_info (_cpuid4_info_regs) that can be used instead. Signed-off-by: Mike Travis --- arch/x86/kernel/cpu/intel_cacheinfo.c | 63 ++++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 48533d77be7..58527a9fc40 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -132,7 +132,16 @@ struct _cpuid4_info { union _cpuid4_leaf_ecx ecx; unsigned long size; unsigned long can_disable; - cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ + DECLARE_BITMAP(shared_cpu_map, NR_CPUS); +}; + +/* subset of above _cpuid4_info w/o shared_cpu_map */ +struct _cpuid4_info_regs { + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; + unsigned long size; + unsigned long can_disable; }; #ifdef CONFIG_PCI @@ -263,7 +272,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, } static void __cpuinit -amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) +amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) { if (index < 3) return; @@ -271,7 +280,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) } static int -__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) +__cpuinit cpuid4_cache_lookup_regs(int index, + struct _cpuid4_info_regs *this_leaf) { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; @@ -299,6 +309,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) return 0; } +static int +__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) +{ + struct _cpuid4_info_regs *leaf_regs = + (struct _cpuid4_info_regs *)this_leaf; + + return cpuid4_cache_lookup_regs(index, leaf_regs); +} + static int __cpuinit find_num_cache_leaves(void) { unsigned int eax, ebx, ecx, edx; @@ -338,11 +357,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) * parameters cpuid leaf to find the cache details */ for (i = 0; i < num_cache_leaves; i++) { - struct _cpuid4_info this_leaf; - + struct _cpuid4_info_regs this_leaf; int retval; - retval = cpuid4_cache_lookup(i, &this_leaf); + retval = cpuid4_cache_lookup_regs(i, &this_leaf); if (retval >= 0) { switch(this_leaf.eax.split.level) { case 1: @@ -491,17 +509,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; if (num_threads_sharing == 1) - cpu_set(cpu, this_leaf->shared_cpu_map); + cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); else { index_msb = get_count_order(num_threads_sharing); for_each_online_cpu(i) { if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { - cpu_set(i, this_leaf->shared_cpu_map); + cpumask_set_cpu(i, + to_cpumask(this_leaf->shared_cpu_map)); if (i != cpu && per_cpu(cpuid4_info, i)) { - sibling_leaf = CPUID4_INFO_IDX(i, index); - cpu_set(cpu, sibling_leaf->shared_cpu_map); + sibling_leaf = + CPUID4_INFO_IDX(i, index); + cpumask_set_cpu(cpu, to_cpumask( + sibling_leaf->shared_cpu_map)); } } } @@ -513,9 +534,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) int sibling; this_leaf = CPUID4_INFO_IDX(cpu, index); - for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { + for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { sibling_leaf = CPUID4_INFO_IDX(sibling, index); - cpu_clear(cpu, sibling_leaf->shared_cpu_map); + cpumask_clear_cpu(cpu, + to_cpumask(sibling_leaf->shared_cpu_map)); } } #else @@ -620,8 +642,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, int n = 0; if (len > 1) { - cpumask_t *mask = &this_leaf->shared_cpu_map; + const struct cpumask *mask; + mask = to_cpumask(this_leaf->shared_cpu_map); n = type? cpulist_scnprintf(buf, len-2, mask) : cpumask_scnprintf(buf, len-2, mask); @@ -684,7 +707,8 @@ static struct pci_dev *get_k8_northbridge(int node) static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) { - int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); + const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); + int node = cpu_to_node(cpumask_first(mask)); struct pci_dev *dev = NULL; ssize_t ret = 0; int i; @@ -718,7 +742,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, size_t count) { - int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); + const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); + int node = cpu_to_node(cpumask_first(mask)); struct pci_dev *dev = NULL; unsigned int ret, index, val; @@ -863,7 +888,7 @@ err_out: return -ENOMEM; } -static cpumask_t cache_dev_map = CPU_MASK_NONE; +static DECLARE_BITMAP(cache_dev_map, NR_CPUS); /* Add/Remove cache interface for CPU device */ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) @@ -903,7 +928,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) } kobject_uevent(&(this_object->kobj), KOBJ_ADD); } - cpu_set(cpu, cache_dev_map); + cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); return 0; @@ -916,9 +941,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) if (per_cpu(cpuid4_info, cpu) == NULL) return; - if (!cpu_isset(cpu, cache_dev_map)) + if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) return; - cpu_clear(cpu, cache_dev_map); + cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); for (i = 0; i < num_cache_leaves; i++) kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); -- cgit v1.2.3 From 9594949b060efe86ecaa1a66839232a3b9800bc9 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sat, 10 Jan 2009 22:24:06 -0800 Subject: irq: change references from NR_IRQS to nr_irqs Impact: preparation, cleanup, add KERN_INFO printk Modify references from NR_IRQS to nr_irqs as the later will become variable-sized based on nr_cpu_ids when CONFIG_SPARSE_IRQS=y. Signed-off-by: Mike Travis --- arch/x86/kernel/io_apic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1337eab60ec..ae80638012d 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3183,7 +3183,7 @@ unsigned int create_irq_nr(unsigned int irq_want) irq = 0; spin_lock_irqsave(&vector_lock, flags); - for (new = irq_want; new < NR_IRQS; new++) { + for (new = irq_want; new < nr_irqs; new++) { if (platform_legacy_irq(new)) continue; -- cgit v1.2.3 From 9332fccdedf8e09448f3b69b624211ae879f6c45 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sat, 10 Jan 2009 22:24:07 -0800 Subject: irq: initialize nr_irqs based on nr_cpu_ids Impact: Reduce memory usage. This is the second half of the changes to make the irq_desc_ptrs be variable sized based on nr_cpu_ids. This is done by adding a new "max_nr_irqs" macro to irq_vectors.h (and a dummy in irqnr.h) to return a max NR_IRQS value based on NR_CPUS or nr_cpu_ids. This necessitated moving the define of MAX_IO_APICS to a separate file (asm/apicnum.h) so it could be included without the baggage of the other asm/apicdef.h declarations. Signed-off-by: Mike Travis --- arch/x86/include/asm/apicdef.h | 8 ++------ arch/x86/include/asm/apicnum.h | 12 ++++++++++++ arch/x86/include/asm/irq_vectors.h | 16 +++++++++++----- 3 files changed, 25 insertions(+), 11 deletions(-) create mode 100644 arch/x86/include/asm/apicnum.h (limited to 'arch') diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 63134e31e8b..1a6454ef7f6 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h @@ -132,12 +132,8 @@ #define APIC_BASE_MSR 0x800 #define X2APIC_ENABLE (1UL << 10) -#ifdef CONFIG_X86_32 -# define MAX_IO_APICS 64 -#else -# define MAX_IO_APICS 128 -# define MAX_LOCAL_APIC 32768 -#endif +/* get MAX_IO_APICS */ +#include /* * All x86-64 systems are xAPIC compatible. diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h new file mode 100644 index 00000000000..82f613c607c --- /dev/null +++ b/arch/x86/include/asm/apicnum.h @@ -0,0 +1,12 @@ +#ifndef _ASM_X86_APICNUM_H +#define _ASM_X86_APICNUM_H + +/* define MAX_IO_APICS */ +#ifdef CONFIG_X86_32 +# define MAX_IO_APICS 64 +#else +# define MAX_IO_APICS 128 +# define MAX_LOCAL_APIC 32768 +#endif + +#endif /* _ASM_X86_APICNUM_H */ diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index f7ff65032b9..602361ad0e7 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -105,6 +105,8 @@ #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) +#include /* need MAX_IO_APICS */ + #ifndef CONFIG_SPARSE_IRQ # if NR_CPUS < MAX_IO_APICS # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) @@ -112,11 +114,15 @@ # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) # endif #else -# if (8 * NR_CPUS) > (32 * MAX_IO_APICS) -# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS)) -# else -# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) -# endif + +/* defined as a macro so nr_irqs = max_nr_irqs(nr_cpu_ids) can be used */ +# define max_nr_irqs(nr_cpus) \ + ((8 * nr_cpus) > (32 * MAX_IO_APICS) ? \ + (NR_VECTORS + (8 * NR_CPUS)) : \ + (NR_VECTORS + (32 * MAX_IO_APICS))) \ + +# define NR_IRQS max_nr_irqs(NR_CPUS) + #endif #elif defined(CONFIG_X86_VOYAGER) -- cgit v1.2.3 From 92296c6d6e908c35fca287a21af27be814af9c75 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 11 Jan 2009 09:22:58 -0800 Subject: cpumask, irq: non-x86 build failures Ingo Molnar wrote: > All non-x86 architectures fail to build: > > In file included from /home/mingo/tip/include/linux/random.h:11, > from /home/mingo/tip/include/linux/stackprotector.h:6, > from /home/mingo/tip/init/main.c:17: > /home/mingo/tip/include/linux/irqnr.h:26:63: error: asm/irq_vectors.h: No such file or directory Do not include asm/irq_vectors.h in generic code - it's not available on all architectures. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apicdef.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 1a6454ef7f6..63134e31e8b 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h @@ -132,8 +132,12 @@ #define APIC_BASE_MSR 0x800 #define X2APIC_ENABLE (1UL << 10) -/* get MAX_IO_APICS */ -#include +#ifdef CONFIG_X86_32 +# define MAX_IO_APICS 64 +#else +# define MAX_IO_APICS 128 +# define MAX_LOCAL_APIC 32768 +#endif /* * All x86-64 systems are xAPIC compatible. -- cgit v1.2.3 From dd3feda7748b4c2739de47daaaa387fb01926c15 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 12 Jan 2009 14:44:29 +0530 Subject: x86: microcode_intel.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of ERROR: trailing whitespace ERROR: "(foo*)" should be "(foo *)" total: 3 errors, 1 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/microcode_intel.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index b7f4c929e61..5e9f4fc5138 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c @@ -87,9 +87,9 @@ #include #include #include +#include #include -#include #include #include @@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf) return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1; } -static inline int +static inline int update_match_revision(struct microcode_header_intel *mc_header, int rev) { return (mc_header->rev <= rev) ? 0 : 1; @@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device) return ret; } - ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size, - &get_ucode_fw); + ret = generic_load_microcode(cpu, (void *)firmware->data, + firmware->size, &get_ucode_fw); release_firmware(firmware); @@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size) /* We should bind the task to the CPU */ BUG_ON(cpu != raw_smp_processor_id()); - return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user); + return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); } static void microcode_fini_cpu(int cpu) -- cgit v1.2.3 From 448dd2fa3ec915ad4325868ef8bb9b9490d9f6a9 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 12 Jan 2009 14:45:14 +0530 Subject: x86: msr.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of total: 0 errors, 1 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/msr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 726266695b2..3cf3413ec62 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -35,10 +35,10 @@ #include #include #include +#include #include #include -#include #include static struct class *msr_class; -- cgit v1.2.3 From e17029ad69c7b1518413c00f793d89bb77b8527b Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 12 Jan 2009 14:46:03 +0530 Subject: x86: module_32.c fix style problems Impact: cleanup Fix: ERROR: code indent should use tabs where possible ERROR: trailing whitespace ERROR: spaces required around that '=' (ctx:VxW) total: 3 errors, 0 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/module_32.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/module_32.c b/arch/x86/kernel/module_32.c index 3db0a5442eb..0edd819050e 100644 --- a/arch/x86/kernel/module_32.c +++ b/arch/x86/kernel/module_32.c @@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region) { vfree(module_region); /* FIXME: If module_region == mod->init_region, trim exception - table entries. */ + table entries. */ } /* We don't need anything special. */ @@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr, *para = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { if (!strcmp(".text", secstrings + s->sh_name)) text = s; if (!strcmp(".altinstructions", secstrings + s->sh_name)) alt = s; if (!strcmp(".smp_locks", secstrings + s->sh_name)) - locks= s; + locks = s; if (!strcmp(".parainstructions", secstrings + s->sh_name)) para = s; } -- cgit v1.2.3 From 3b9dc9f2f123286aaade54c45fef6723d587c664 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 12 Jan 2009 14:46:48 +0530 Subject: x86: module_64.c fix style problems Impact: cleanup Fix: ERROR: trailing whitespace ERROR: code indent should use tabs where possible WARNING: %Ld/%Lu are not-standard C, use %lld/%llu WARNING: printk() should include KERN_ facility level ERROR: spaces required around that '=' (ctx:VxW) total: 13 errors, 2 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/module_64.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module_64.c index 6ba87830d4b..c23880b90b5 100644 --- a/arch/x86/kernel/module_64.c +++ b/arch/x86/kernel/module_64.c @@ -30,14 +30,14 @@ #include #include -#define DEBUGP(fmt...) +#define DEBUGP(fmt...) #ifndef CONFIG_UML void module_free(struct module *mod, void *module_region) { vfree(module_region); /* FIXME: If module_region == mod->init_region, trim exception - table entries. */ + table entries. */ } void *module_alloc(unsigned long size) @@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf64_Sym *sym; void *loc; - u64 val; + u64 val; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); @@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + ELF64_R_SYM(rel[i].r_info); - DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", - (int)ELF64_R_TYPE(rel[i].r_info), - sym->st_value, rel[i].r_addend, (u64)loc); + DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", + (int)ELF64_R_TYPE(rel[i].r_info), + sym->st_value, rel[i].r_addend, (u64)loc); - val = sym->st_value + rel[i].r_addend; + val = sym->st_value + rel[i].r_addend; switch (ELF64_R_TYPE(rel[i].r_info)) { case R_X86_64_NONE: @@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, if ((s64)val != *(s32 *)loc) goto overflow; break; - case R_X86_64_PC32: + case R_X86_64_PC32: val -= (u64)loc; *(u32 *)loc = val; #if 0 if ((s64)val != *(s32 *)loc) - goto overflow; + goto overflow; #endif break; default: - printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n", + printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n", me->name, ELF64_R_TYPE(rel[i].r_info)); return -ENOEXEC; } @@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, return 0; overflow: - printk(KERN_ERR "overflow in relocation type %d val %Lx\n", + printk(KERN_ERR "overflow in relocation type %d val %Lx\n", (int)ELF64_R_TYPE(rel[i].r_info), val); printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n", me->name); @@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs, unsigned int relsec, struct module *me) { - printk("non add relocation not supported\n"); + printk(KERN_ERR "non add relocation not supported\n"); return -ENOSYS; -} +} int module_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *me) + const Elf_Shdr *sechdrs, + struct module *me) { const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, *para = NULL; @@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr, if (!strcmp(".altinstructions", secstrings + s->sh_name)) alt = s; if (!strcmp(".smp_locks", secstrings + s->sh_name)) - locks= s; + locks = s; if (!strcmp(".parainstructions", secstrings + s->sh_name)) para = s; } -- cgit v1.2.3 From 42bb8cc5e81028e217105299001070d57eb84ad7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 9 Jan 2009 12:17:40 -0800 Subject: x86: hpet: allow force enable on ICH10 HPET Intel "Smackover" x58 BIOS don't have HPET enabled in the BIOS, so allow to force enable it at least. The register layout is the same as in other recent ICHs, so all the code can be reused. Using numerical PCI-ID because it's unlikely the PCI-ID will be used anywhere else. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/kernel/quirks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 309949e9e1c..697d1b78cfb 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -172,7 +172,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, ich_force_enable_hpet); - +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */ + ich_force_enable_hpet); static struct pci_dev *cached_dev; -- cgit v1.2.3 From 0b0f0b1c2c87de299df6f92a8ffc0a73bd1bb960 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 11 Jan 2009 13:35:56 -0800 Subject: sparseirq: use kstat_irqs_cpu on non-x86 architectures too so we could move kstat_irqs array to irq_desc struct. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/alpha/kernel/irq.c | 2 +- arch/alpha/kernel/irq_alpha.c | 2 +- arch/arm/kernel/irq.c | 2 +- arch/arm/mach-ns9xxx/irq.c | 3 +-- arch/avr32/kernel/irq.c | 2 +- arch/cris/kernel/irq.c | 2 +- arch/mips/kernel/irq.c | 2 +- 7 files changed, 7 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 703731accda..430550bd1eb 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -90,7 +90,7 @@ show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(irq)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[irq].chip->typename); seq_printf(p, " %c%s", diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index e16aeb6e79e..67c19f8a994 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -64,7 +64,7 @@ do_entInt(unsigned long type, unsigned long vector, smp_percpu_timer_interrupt(regs); cpu = smp_processor_id(); if (cpu != boot_cpuid) { - kstat_cpu(cpu).irqs[RTC_IRQ]++; + kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ)); } else { handle_irq(RTC_IRQ); } diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 7141cee1fab..a285e4a636a 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%3d: ", i); for_each_present_cpu(cpu) - seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c index 22e0eb6e9ec..feb0e54a91d 100644 --- a/arch/arm/mach-ns9xxx/irq.c +++ b/arch/arm/mach-ns9xxx/irq.c @@ -63,7 +63,6 @@ static struct irq_chip ns9xxx_chip = { #else static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) { - unsigned int cpu = smp_processor_id(); struct irqaction *action; irqreturn_t action_ret; @@ -72,7 +71,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) BUG_ON(desc->status & IRQ_INPROGRESS); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); - kstat_cpu(cpu).irqs[irq]++; + kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c index a8e767d836a..9f572229d31 100644 --- a/arch/avr32/kernel/irq.c +++ b/arch/avr32/kernel/irq.c @@ -58,7 +58,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%3d: ", i); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-"); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 2dfac8c7909..7f642fcffbf 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -66,7 +66,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 98f336dc3cd..7b845ba9dff 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -108,7 +108,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(i, j)); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); -- cgit v1.2.3 From e65e49d0f3714f4a6a42f6f6a19926ba33fcda75 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 Jan 2009 15:27:13 -0800 Subject: irq: update all arches for new irq_desc Impact: cleanup, update to new cpumask API Irq_desc.affinity and irq_desc.pending_mask are now cpumask_var_t's so access to them should be using the new cpumask API. Signed-off-by: Mike Travis --- arch/alpha/kernel/irq.c | 2 +- arch/arm/kernel/irq.c | 18 ++++++++++++------ arch/arm/oprofile/op_model_mpcore.c | 2 +- arch/blackfin/kernel/irqchip.c | 5 +++++ arch/ia64/kernel/iosapic.c | 2 +- arch/ia64/kernel/irq.c | 4 ++-- arch/ia64/kernel/msi_ia64.c | 4 ++-- arch/ia64/sn/kernel/msi_sn.c | 2 +- arch/mips/include/asm/irq.h | 2 +- arch/mips/kernel/irq-gic.c | 2 +- arch/mips/kernel/smtc.c | 2 +- arch/mips/mti-malta/malta-smtc.c | 5 +++-- arch/parisc/kernel/irq.c | 8 ++++---- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/platforms/pseries/xics.c | 5 +++-- arch/powerpc/sysdev/mpic.c | 3 ++- arch/sparc/kernel/irq_64.c | 5 +++-- 17 files changed, 44 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 703731accda..7bc7489223f 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; - irq_desc[irq].affinity = cpumask_of_cpu(cpu); + cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); return 0; } diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 7141cee1fab..4bb723eadad 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = { .lock = SPIN_LOCK_UNLOCKED }; +#ifdef CONFIG_CPUMASK_OFFSTACK +/* We are not allocating bad_irq_desc.affinity or .pending_mask */ +#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK." +#endif + /* * do_IRQ handles all hardware IRQ's. Decoded IRQs should not * come via this function. Instead, they should provide their @@ -161,7 +166,7 @@ void __init init_IRQ(void) irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; #ifdef CONFIG_SMP - bad_irq_desc.affinity = CPU_MASK_ALL; + cpumask_setall(bad_irq_desc.affinity); bad_irq_desc.cpu = smp_processor_id(); #endif init_arch_irq(); @@ -191,15 +196,16 @@ void migrate_irqs(void) struct irq_desc *desc = irq_desc + i; if (desc->cpu == cpu) { - unsigned int newcpu = any_online_cpu(desc->affinity); - - if (newcpu == NR_CPUS) { + unsigned int newcpu = cpumask_any_and(desc->affinity, + cpu_online_mask); + if (newcpu >= nr_cpu_ids) { if (printk_ratelimit()) printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", i, cpu); - cpus_setall(desc->affinity); - newcpu = any_online_cpu(desc->affinity); + cpumask_setall(desc->affinity); + newcpu = cpumask_any_and(desc->affinity, + cpu_online_mask); } route_irq(desc, i, newcpu); diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 6d6bd589924..853d42bb868 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c @@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu) const struct cpumask *mask = cpumask_of(cpu); spin_lock_irq(&desc->lock); - desc->affinity = *mask; + cpumask_copy(desc->affinity, mask); desc->chip->set_affinity(irq, mask); spin_unlock_irq(&desc->lock); } diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index ab8209cbbad..5780d6df154 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = { #endif }; +#ifdef CONFIG_CPUMASK_OFFSTACK +/* We are not allocating a variable-sized bad_irq_desc.affinity */ +#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK." +#endif + int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 5cfd3d91001..006ad366a45 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi) if (iosapic_intr_info[irq].count == 0) { #ifdef CONFIG_SMP /* Clear affinity */ - cpus_setall(idesc->affinity); + cpumask_setall(idesc->affinity); #endif /* Clear the interrupt information */ iosapic_intr_info[irq].dest = 0; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index a58f64ca9f0..226233a6fa1 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; void set_irq_affinity_info (unsigned int irq, int hwid, int redir) { if (irq < NR_IRQS) { - cpumask_copy(&irq_desc[irq].affinity, + cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu_logical_id(hwid))); irq_redir[irq] = (char) (redir & 0xff); } @@ -148,7 +148,7 @@ static void migrate_irqs(void) if (desc->status == IRQ_PER_CPU) continue; - if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) + if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) >= nr_cpu_ids) { /* * Save it for phase 2 processing diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 89033933903..dcb6b7c51ea 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, msg.data = data; write_msi_msg(irq, &msg); - irq_desc[irq].affinity = cpumask_of_cpu(cpu); + cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); } #endif /* CONFIG_SMP */ @@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); dmar_msi_write(irq, &msg); - irq_desc[irq].affinity = *mask; + cpumask_copy(irq_desc[irq].affinity, mask); } #endif /* CONFIG_SMP */ diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index ca553b0429c..81e428943d7 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c @@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); write_msi_msg(irq, &msg); - irq_desc[irq].affinity = *cpu_mask; + cpumask_copy(irq_desc[irq].affinity, cpu_mask); } #endif /* CONFIG_SMP */ diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index abc62aa744a..3214ade02d1 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq); */ #define IRQ_AFFINITY_HOOK(irq) \ do { \ - if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ + if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\ smtc_forward_irq(irq); \ irq_exit(); \ return; \ diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 494a49a317e..87deb8f6c45 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); } - irq_desc[irq].affinity = *cpumask; + cpumask_copy(irq_desc[irq].affinity, cpumask); spin_unlock_irqrestore(&gic_lock, flags); } diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index b6cca01ff82..d2c1ab12425 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq) * and efficiency, we just pick the easiest one to find. */ - target = first_cpu(irq_desc[irq].affinity); + target = cpumask_first(irq_desc[irq].affinity); /* * We depend on the platform code to have correctly processed diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index aabd7274507..5ba31888fef 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c @@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = { void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) { - cpumask_t tmask = *affinity; + cpumask_t tmask; int cpu = 0; void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); @@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) * be made to forward to an offline "CPU". */ + cpumask_copy(&tmask, affinity); for_each_cpu(cpu, affinity) { if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) cpu_clear(cpu, tmask); } - irq_desc[irq].affinity = tmask; + cpumask_copy(irq_desc[irq].affinity, &tmask); if (cpus_empty(tmask)) /* diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index ac2c822928c..49482806863 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) if (CHECK_IRQ_PER_CPU(irq)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ - irq_desc[irq].affinity = CPU_MASK_ALL; + cpumask_setall(irq_desc[irq].affinity); return -EINVAL; } @@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) if (cpu_check_affinity(irq, dest)) return; - irq_desc[irq].affinity = *dest; + cpumask_copy(irq_desc[irq].affinity, dest); } #endif @@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide) unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP - irq_desc[irq].affinity = cpumask_of_cpu(cpu); + cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); #endif return per_cpu(cpu_data, cpu).txn_addr; @@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) irq = eirr_to_irq(eirr_val); #ifdef CONFIG_SMP - dest = irq_desc[irq].affinity; + cpumask_copy(&dest, irq_desc[irq].affinity); if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && !cpu_isset(smp_processor_id(), dest)) { int cpu = first_cpu(dest); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 23b8b5e36f9..ad1e5ac721d 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map) if (irq_desc[irq].status & IRQ_PER_CPU) continue; - cpus_and(mask, irq_desc[irq].affinity, map); + cpumask_and(&mask, irq_desc[irq].affinity, &map); if (any_online_cpu(mask) == NR_CPUS) { printk("Breaking affinity for irq %i\n", irq); mask = map; diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 84e058f1e1c..80b513449f4 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check) { int server; /* For the moment only implement delivery to all cpus or one cpu */ - cpumask_t cpumask = irq_desc[virq].affinity; + cpumask_t cpumask; cpumask_t tmp = CPU_MASK_NONE; + cpumask_copy(&cpumask, irq_desc[virq].affinity); if (!distribute_irqs) return default_server; @@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void) virq, cpu); /* Reset affinity to all cpus */ - irq_desc[virq].affinity = CPU_MASK_ALL; + cpumask_setall(irq_desc[virq].affinity); desc->chip->set_affinity(virq, cpu_all_mask); unlock: spin_unlock_irqrestore(&desc->lock, flags); diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 3e0d89dcdba..0afd21f9a22 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic) #ifdef CONFIG_SMP static int irq_choose_cpu(unsigned int virt_irq) { - cpumask_t mask = irq_desc[virt_irq].affinity; + cpumask_t mask; int cpuid; + cpumask_copy(&mask, irq_desc[virt_irq].affinity); if (cpus_equal(mask, CPU_MASK_ALL)) { static int irq_rover; static DEFINE_SPINLOCK(irq_rover_lock); diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index cab8e028687..4ac5c651e00 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -247,9 +247,10 @@ struct irq_handler_data { #ifdef CONFIG_SMP static int irq_choose_cpu(unsigned int virt_irq) { - cpumask_t mask = irq_desc[virt_irq].affinity; + cpumask_t mask; int cpuid; + cpumask_copy(&mask, irq_desc[virt_irq].affinity); if (cpus_equal(mask, CPU_MASK_ALL)) { static int irq_rover; static DEFINE_SPINLOCK(irq_rover_lock); @@ -854,7 +855,7 @@ void fixup_irqs(void) !(irq_desc[irq].status & IRQ_PER_CPU)) { if (irq_desc[irq].chip->set_affinity) irq_desc[irq].chip->set_affinity(irq, - &irq_desc[irq].affinity); + irq_desc[irq].affinity); } spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } -- cgit v1.2.3 From 4a046d1754ee6ebb6f399696805ed61ea0444d4c Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 12 Jan 2009 17:39:24 -0800 Subject: x86: arch_probe_nr_irqs Impact: save RAM with large NR_CPUS, get smaller nr_irqs Signed-off-by: Yinghai Lu Signed-off-by: Mike Travis --- arch/x86/include/asm/irq_vectors.h | 7 ++----- arch/x86/kernel/io_apic.c | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 602361ad0e7..a16a2ab2b42 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -115,14 +115,11 @@ # endif #else -/* defined as a macro so nr_irqs = max_nr_irqs(nr_cpu_ids) can be used */ -# define max_nr_irqs(nr_cpus) \ - ((8 * nr_cpus) > (32 * MAX_IO_APICS) ? \ +# define NR_IRQS \ + ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \ (NR_VECTORS + (8 * NR_CPUS)) : \ (NR_VECTORS + (32 * MAX_IO_APICS))) \ -# define NR_IRQS max_nr_irqs(NR_CPUS) - #endif #elif defined(CONFIG_X86_VOYAGER) diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index ae80638012d..157986916cd 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3850,6 +3850,22 @@ void __init probe_nr_irqs_gsi(void) nr_irqs_gsi = nr; } +#ifdef CONFIG_SPARSE_IRQ +int __init arch_probe_nr_irqs(void) +{ + int nr; + + nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ? + (NR_VECTORS + (8 * nr_cpu_ids)) : + (NR_VECTORS + (32 * nr_ioapics))); + + if (nr < nr_irqs && nr > nr_irqs_gsi) + nr_irqs = nr; + + return 0; +} +#endif + /* -------------------------------------------------------------------------- ACPI-based IOAPIC Configuration -------------------------------------------------------------------------- */ -- cgit v1.2.3 From a4a0acf8e17e3d08e28b721ceceb898fbc959ceb Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 13 Jan 2009 18:43:03 -0800 Subject: x86: fix broken flush_tlb_others_ipi() This commit broke flush_tlb_others_ipi() causing boot hangs on a 16 logical cpu system: > commit 4595f9620cda8a1e973588e743cf5f8436dd20c6 > Author: Rusty Russell > Date: Sat Jan 10 21:58:09 2009 -0800 > > x86: change flush_tlb_others to take a const struct cpumask This change resulted in sending the invalidate tlb vector to the sender itself causing the hang. flush_tlb_others_ipi() should exclude the sender itself from the destination list. Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/kernel/tlb_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 7a3f9891302..54ee2ecb5e2 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -188,7 +188,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); + send_IPI_mask(f->flush_cpumask, INVALIDATE_TLB_VECTOR_START + sender); while (!cpumask_empty(to_cpumask(f->flush_cpumask))) cpu_relax(); -- cgit v1.2.3 From b5ba7e6d1e7e2ac808afd21be1e56dc34caf20e6 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 12 Jan 2009 17:46:17 +0530 Subject: x86: replacing mp_config_ioapic with mpc_ioapic Impact: cleanup, solve 80 columns wrap problems Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 10 +------ arch/x86/kernel/acpi/boot.c | 28 ++++++++++---------- arch/x86/kernel/io_apic.c | 60 ++++++++++++++++++++---------------------- arch/x86/kernel/mpparse.c | 12 ++++----- 4 files changed, 50 insertions(+), 60 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 7a1f44ac1f1..5a56ae9b505 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -120,14 +120,6 @@ extern int nr_ioapic_registers[MAX_IO_APICS]; #define MP_MAX_IOAPIC_PIN 127 -struct mp_config_ioapic { - unsigned long mp_apicaddr; - unsigned int mp_apicid; - unsigned char mp_type; - unsigned char mp_apicver; - unsigned char mp_flags; -}; - struct mp_config_intsrc { unsigned int mp_dstapic; unsigned char mp_type; @@ -139,7 +131,7 @@ struct mp_config_intsrc { }; /* I/O APIC entries */ -extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; +extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; /* # of MP IRQ source entries */ extern int mp_irq_entries; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index d37593c2f43..2b27019e64f 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -912,8 +912,8 @@ static u8 __init uniq_ioapic_id(u8 id) DECLARE_BITMAP(used, 256); bitmap_zero(used, 256); for (i = 0; i < nr_ioapics; i++) { - struct mp_config_ioapic *ia = &mp_ioapics[i]; - __set_bit(ia->mp_apicid, used); + struct mpc_ioapic *ia = &mp_ioapics[i]; + __set_bit(ia->apicid, used); } if (!test_bit(id, used)) return id; @@ -945,29 +945,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) idx = nr_ioapics; - mp_ioapics[idx].mp_type = MP_IOAPIC; - mp_ioapics[idx].mp_flags = MPC_APIC_USABLE; - mp_ioapics[idx].mp_apicaddr = address; + mp_ioapics[idx].type = MP_IOAPIC; + mp_ioapics[idx].flags = MPC_APIC_USABLE; + mp_ioapics[idx].apicaddr = address; set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); - mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id); + mp_ioapics[idx].apicid = uniq_ioapic_id(id); #ifdef CONFIG_X86_32 - mp_ioapics[idx].mp_apicver = io_apic_get_version(idx); + mp_ioapics[idx].apicver = io_apic_get_version(idx); #else - mp_ioapics[idx].mp_apicver = 0; + mp_ioapics[idx].apicver = 0; #endif /* * Build basic GSI lookup table to facilitate gsi->io_apic lookups * and to prevent reprogramming of IOAPIC pins (PCI GSIs). */ - mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid; + mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid; mp_ioapic_routing[idx].gsi_base = gsi_base; mp_ioapic_routing[idx].gsi_end = gsi_base + io_apic_get_redir_entries(idx); - printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " - "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid, - mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr, + printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " + "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, + mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); nr_ioapics++; @@ -1026,7 +1026,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) mp_irq.mp_irqflag = (trigger << 2) | polarity; mp_irq.mp_srcbus = MP_ISA_BUS; mp_irq.mp_srcbusirq = bus_irq; /* IRQ */ - mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */ + mp_irq.mp_dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */ mp_irq.mp_dstirq = pin; /* INTIN# */ save_mp_irq(&mp_irq); @@ -1062,7 +1062,7 @@ void __init mp_config_acpi_legacy_irqs(void) ioapic = mp_find_ioapic(0); if (ioapic < 0) return; - dstapic = mp_ioapics[ioapic].mp_apicid; + dstapic = mp_ioapics[ioapic].apicid; /* * Use the default configuration for the IRQs 0-15. Unless diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 109c91db202..6c51ecdfbf4 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -83,7 +83,7 @@ static DEFINE_SPINLOCK(vector_lock); int nr_ioapic_registers[MAX_IO_APICS]; /* I/O APIC entries */ -struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; +struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; int nr_ioapics; /* MP IRQ source entries */ @@ -387,7 +387,7 @@ struct io_apic { static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) { return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) - + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK); + + (mp_ioapics[idx].apicaddr & ~PAGE_MASK); } static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) @@ -946,7 +946,7 @@ static int find_irq_entry(int apic, int pin, int type) for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].mp_irqtype == type && - (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid || + (mp_irqs[i].mp_dstapic == mp_ioapics[apic].apicid || mp_irqs[i].mp_dstapic == MP_APIC_ALL) && mp_irqs[i].mp_dstirq == pin) return i; @@ -988,7 +988,7 @@ static int __init find_isa_irq_apic(int irq, int type) if (i < mp_irq_entries) { int apic; for(apic = 0; apic < nr_ioapics; apic++) { - if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic) + if (mp_ioapics[apic].apicid == mp_irqs[i].mp_dstapic) return apic; } } @@ -1016,7 +1016,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) int lbus = mp_irqs[i].mp_srcbus; for (apic = 0; apic < nr_ioapics; apic++) - if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic || + if (mp_ioapics[apic].apicid == mp_irqs[i].mp_dstapic || mp_irqs[i].mp_dstapic == MP_APIC_ALL) break; @@ -1567,14 +1567,14 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " "IRQ %d Mode:%i Active:%i)\n", - apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, + apic, mp_ioapics[apic].apicid, pin, cfg->vector, irq, trigger, polarity); - if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, + if (setup_ioapic_entry(mp_ioapics[apic].apicid, irq, &entry, dest, trigger, polarity, cfg->vector)) { printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", - mp_ioapics[apic].mp_apicid, pin); + mp_ioapics[apic].apicid, pin); __clear_irq_vector(irq, cfg); return; } @@ -1605,12 +1605,10 @@ static void __init setup_IO_APIC_irqs(void) notcon = 1; apic_printk(APIC_VERBOSE, KERN_DEBUG " %d-%d", - mp_ioapics[apic].mp_apicid, - pin); + mp_ioapics[apic].apicid, pin); } else apic_printk(APIC_VERBOSE, " %d-%d", - mp_ioapics[apic].mp_apicid, - pin); + mp_ioapics[apic].apicid, pin); continue; } if (notcon) { @@ -1700,7 +1698,7 @@ __apicdebuginit(void) print_IO_APIC(void) printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); for (i = 0; i < nr_ioapics; i++) printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", - mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]); + mp_ioapics[i].apicid, nr_ioapic_registers[i]); /* * We are a bit conservative about what we expect. We have to @@ -1720,7 +1718,7 @@ __apicdebuginit(void) print_IO_APIC(void) spin_unlock_irqrestore(&ioapic_lock, flags); printk("\n"); - printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); + printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); @@ -2122,14 +2120,14 @@ static void __init setup_ioapic_ids_from_mpc(void) reg_00.raw = io_apic_read(apic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); - old_id = mp_ioapics[apic].mp_apicid; + old_id = mp_ioapics[apic].apicid; - if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) { + if (mp_ioapics[apic].apicid >= get_physical_broadcast()) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", - apic, mp_ioapics[apic].mp_apicid); + apic, mp_ioapics[apic].apicid); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", reg_00.bits.ID); - mp_ioapics[apic].mp_apicid = reg_00.bits.ID; + mp_ioapics[apic].apicid = reg_00.bits.ID; } /* @@ -2138,9 +2136,9 @@ static void __init setup_ioapic_ids_from_mpc(void) * 'stuck on smp_invalidate_needed IPI wait' messages. */ if (check_apicid_used(phys_id_present_map, - mp_ioapics[apic].mp_apicid)) { + mp_ioapics[apic].apicid)) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", - apic, mp_ioapics[apic].mp_apicid); + apic, mp_ioapics[apic].apicid); for (i = 0; i < get_physical_broadcast(); i++) if (!physid_isset(i, phys_id_present_map)) break; @@ -2149,13 +2147,13 @@ static void __init setup_ioapic_ids_from_mpc(void) printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", i); physid_set(i, phys_id_present_map); - mp_ioapics[apic].mp_apicid = i; + mp_ioapics[apic].apicid = i; } else { physid_mask_t tmp; - tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid); + tmp = apicid_to_cpu_present(mp_ioapics[apic].apicid); apic_printk(APIC_VERBOSE, "Setting %d in the " "phys_id_present_map\n", - mp_ioapics[apic].mp_apicid); + mp_ioapics[apic].apicid); physids_or(phys_id_present_map, phys_id_present_map, tmp); } @@ -2164,11 +2162,11 @@ static void __init setup_ioapic_ids_from_mpc(void) * We need to adjust the IRQ routing table * if the ID changed. */ - if (old_id != mp_ioapics[apic].mp_apicid) + if (old_id != mp_ioapics[apic].apicid) for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].mp_dstapic == old_id) mp_irqs[i].mp_dstapic - = mp_ioapics[apic].mp_apicid; + = mp_ioapics[apic].apicid; /* * Read the right value from the MPC table and @@ -2176,9 +2174,9 @@ static void __init setup_ioapic_ids_from_mpc(void) */ apic_printk(APIC_VERBOSE, KERN_INFO "...changing IO-APIC physical APIC ID to %d ...", - mp_ioapics[apic].mp_apicid); + mp_ioapics[apic].apicid); - reg_00.bits.ID = mp_ioapics[apic].mp_apicid; + reg_00.bits.ID = mp_ioapics[apic].apicid; spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0, reg_00.raw); spin_unlock_irqrestore(&ioapic_lock, flags); @@ -2189,7 +2187,7 @@ static void __init setup_ioapic_ids_from_mpc(void) spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); - if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid) + if (reg_00.bits.ID != mp_ioapics[apic].apicid) printk("could not set ID!\n"); else apic_printk(APIC_VERBOSE, " ok.\n"); @@ -3118,8 +3116,8 @@ static int ioapic_resume(struct sys_device *dev) spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(dev->id, 0); - if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) { - reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid; + if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { + reg_00.bits.ID = mp_ioapics[dev->id].apicid; io_apic_write(dev->id, 0, reg_00.raw); } spin_unlock_irqrestore(&ioapic_lock, flags); @@ -4101,7 +4099,7 @@ void __init ioapic_init_mappings(void) ioapic_res = ioapic_setup_resources(); for (i = 0; i < nr_ioapics; i++) { if (smp_found_config) { - ioapic_phys = mp_ioapics[i].mp_apicaddr; + ioapic_phys = mp_ioapics[i].apicaddr; #ifdef CONFIG_X86_32 if (!ioapic_phys) { printk(KERN_ERR diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 8385d4e7e15..a86a6553743 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -143,11 +143,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m) if (bad_ioapic(m->apicaddr)) return; - mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr; - mp_ioapics[nr_ioapics].mp_apicid = m->apicid; - mp_ioapics[nr_ioapics].mp_type = m->type; - mp_ioapics[nr_ioapics].mp_apicver = m->apicver; - mp_ioapics[nr_ioapics].mp_flags = m->flags; + mp_ioapics[nr_ioapics].apicaddr = m->apicaddr; + mp_ioapics[nr_ioapics].apicid = m->apicid; + mp_ioapics[nr_ioapics].type = m->type; + mp_ioapics[nr_ioapics].apicver = m->apicver; + mp_ioapics[nr_ioapics].flags = m->flags; nr_ioapics++; } @@ -416,7 +416,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) intsrc.type = MP_INTSRC; intsrc.irqflag = 0; /* conforming */ intsrc.srcbus = 0; - intsrc.dstapic = mp_ioapics[0].mp_apicid; + intsrc.dstapic = mp_ioapics[0].apicid; intsrc.irqtype = mp_INT; -- cgit v1.2.3 From c2c21745ecba23c74690a124bcd371f83bd71e45 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 12 Jan 2009 17:47:22 +0530 Subject: x86: replacing mp_config_intsrc with mpc_intsrc Impact: cleanup, solve 80 columns wrap problems Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 16 +--------- arch/x86/kernel/acpi/boot.c | 70 ++++++++++++++++++++---------------------- arch/x86/kernel/io_apic.c | 64 +++++++++++++++++++------------------- arch/x86/kernel/mpparse.c | 68 ++++++++++++++++++++-------------------- 4 files changed, 101 insertions(+), 117 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 5a56ae9b505..08ec793aa04 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -114,22 +114,8 @@ struct IR_IO_APIC_route_entry { extern int nr_ioapics; extern int nr_ioapic_registers[MAX_IO_APICS]; -/* - * MP-BIOS irq configuration table structures: - */ - #define MP_MAX_IOAPIC_PIN 127 -struct mp_config_intsrc { - unsigned int mp_dstapic; - unsigned char mp_type; - unsigned char mp_irqtype; - unsigned short mp_irqflag; - unsigned char mp_srcbus; - unsigned char mp_srcbusirq; - unsigned char mp_dstirq; -}; - /* I/O APIC entries */ extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; @@ -137,7 +123,7 @@ extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; extern int mp_irq_entries; /* MP IRQ source entries */ -extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; +extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* non-0 if default (table-less) MP configuration */ extern int mpc_default_type; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 2b27019e64f..4cb5964f149 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -973,19 +973,19 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) nr_ioapics++; } -static void assign_to_mp_irq(struct mp_config_intsrc *m, - struct mp_config_intsrc *mp_irq) +static void assign_to_mp_irq(struct mpc_intsrc *m, + struct mpc_intsrc *mp_irq) { - memcpy(mp_irq, m, sizeof(struct mp_config_intsrc)); + memcpy(mp_irq, m, sizeof(struct mpc_intsrc)); } -static int mp_irq_cmp(struct mp_config_intsrc *mp_irq, - struct mp_config_intsrc *m) +static int mp_irq_cmp(struct mpc_intsrc *mp_irq, + struct mpc_intsrc *m) { - return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc)); + return memcmp(mp_irq, m, sizeof(struct mpc_intsrc)); } -static void save_mp_irq(struct mp_config_intsrc *m) +static void save_mp_irq(struct mpc_intsrc *m) { int i; @@ -1003,7 +1003,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { int ioapic; int pin; - struct mp_config_intsrc mp_irq; + struct mpc_intsrc mp_irq; /* * Convert 'gsi' to 'ioapic.pin'. @@ -1021,13 +1021,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) if ((bus_irq == 0) && (trigger == 3)) trigger = 1; - mp_irq.mp_type = MP_INTSRC; - mp_irq.mp_irqtype = mp_INT; - mp_irq.mp_irqflag = (trigger << 2) | polarity; - mp_irq.mp_srcbus = MP_ISA_BUS; - mp_irq.mp_srcbusirq = bus_irq; /* IRQ */ - mp_irq.mp_dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */ - mp_irq.mp_dstirq = pin; /* INTIN# */ + mp_irq.type = MP_INTSRC; + mp_irq.irqtype = mp_INT; + mp_irq.irqflag = (trigger << 2) | polarity; + mp_irq.srcbus = MP_ISA_BUS; + mp_irq.srcbusirq = bus_irq; /* IRQ */ + mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */ + mp_irq.dstirq = pin; /* INTIN# */ save_mp_irq(&mp_irq); } @@ -1037,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void) int i; int ioapic; unsigned int dstapic; - struct mp_config_intsrc mp_irq; + struct mpc_intsrc mp_irq; #if defined (CONFIG_MCA) || defined (CONFIG_EISA) /* @@ -1072,16 +1072,14 @@ void __init mp_config_acpi_legacy_irqs(void) int idx; for (idx = 0; idx < mp_irq_entries; idx++) { - struct mp_config_intsrc *irq = mp_irqs + idx; + struct mpc_intsrc *irq = mp_irqs + idx; /* Do we already have a mapping for this ISA IRQ? */ - if (irq->mp_srcbus == MP_ISA_BUS - && irq->mp_srcbusirq == i) + if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i) break; /* Do we already have a mapping for this IOAPIC pin */ - if (irq->mp_dstapic == dstapic && - irq->mp_dstirq == i) + if (irq->dstapic == dstapic && irq->dstirq == i) break; } @@ -1090,13 +1088,13 @@ void __init mp_config_acpi_legacy_irqs(void) continue; /* IRQ already used */ } - mp_irq.mp_type = MP_INTSRC; - mp_irq.mp_irqflag = 0; /* Conforming */ - mp_irq.mp_srcbus = MP_ISA_BUS; - mp_irq.mp_dstapic = dstapic; - mp_irq.mp_irqtype = mp_INT; - mp_irq.mp_srcbusirq = i; /* Identity mapped */ - mp_irq.mp_dstirq = i; + mp_irq.type = MP_INTSRC; + mp_irq.irqflag = 0; /* Conforming */ + mp_irq.srcbus = MP_ISA_BUS; + mp_irq.dstapic = dstapic; + mp_irq.irqtype = mp_INT; + mp_irq.srcbusirq = i; /* Identity mapped */ + mp_irq.dstirq = i; save_mp_irq(&mp_irq); } @@ -1207,22 +1205,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, u32 gsi, int triggering, int polarity) { #ifdef CONFIG_X86_MPPARSE - struct mp_config_intsrc mp_irq; + struct mpc_intsrc mp_irq; int ioapic; if (!acpi_ioapic) return 0; /* print the entry should happen on mptable identically */ - mp_irq.mp_type = MP_INTSRC; - mp_irq.mp_irqtype = mp_INT; - mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | + mp_irq.type = MP_INTSRC; + mp_irq.irqtype = mp_INT; + mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); - mp_irq.mp_srcbus = number; - mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); + mp_irq.srcbus = number; + mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); ioapic = mp_find_ioapic(gsi); - mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id; - mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base; + mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id; + mp_irq.dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base; save_mp_irq(&mp_irq); #endif diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 6c51ecdfbf4..79b8c0c72d3 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -87,7 +87,7 @@ struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; int nr_ioapics; /* MP IRQ source entries */ -struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; +struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* # of MP IRQ source entries */ int mp_irq_entries; @@ -945,10 +945,10 @@ static int find_irq_entry(int apic, int pin, int type) int i; for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].mp_irqtype == type && - (mp_irqs[i].mp_dstapic == mp_ioapics[apic].apicid || - mp_irqs[i].mp_dstapic == MP_APIC_ALL) && - mp_irqs[i].mp_dstirq == pin) + if (mp_irqs[i].irqtype == type && + (mp_irqs[i].dstapic == mp_ioapics[apic].apicid || + mp_irqs[i].dstapic == MP_APIC_ALL) && + mp_irqs[i].dstirq == pin) return i; return -1; @@ -962,13 +962,13 @@ static int __init find_isa_irq_pin(int irq, int type) int i; for (i = 0; i < mp_irq_entries; i++) { - int lbus = mp_irqs[i].mp_srcbus; + int lbus = mp_irqs[i].srcbus; if (test_bit(lbus, mp_bus_not_pci) && - (mp_irqs[i].mp_irqtype == type) && - (mp_irqs[i].mp_srcbusirq == irq)) + (mp_irqs[i].irqtype == type) && + (mp_irqs[i].srcbusirq == irq)) - return mp_irqs[i].mp_dstirq; + return mp_irqs[i].dstirq; } return -1; } @@ -978,17 +978,17 @@ static int __init find_isa_irq_apic(int irq, int type) int i; for (i = 0; i < mp_irq_entries; i++) { - int lbus = mp_irqs[i].mp_srcbus; + int lbus = mp_irqs[i].srcbus; if (test_bit(lbus, mp_bus_not_pci) && - (mp_irqs[i].mp_irqtype == type) && - (mp_irqs[i].mp_srcbusirq == irq)) + (mp_irqs[i].irqtype == type) && + (mp_irqs[i].srcbusirq == irq)) break; } if (i < mp_irq_entries) { int apic; for(apic = 0; apic < nr_ioapics; apic++) { - if (mp_ioapics[apic].apicid == mp_irqs[i].mp_dstapic) + if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic) return apic; } } @@ -1013,23 +1013,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) return -1; } for (i = 0; i < mp_irq_entries; i++) { - int lbus = mp_irqs[i].mp_srcbus; + int lbus = mp_irqs[i].srcbus; for (apic = 0; apic < nr_ioapics; apic++) - if (mp_ioapics[apic].apicid == mp_irqs[i].mp_dstapic || - mp_irqs[i].mp_dstapic == MP_APIC_ALL) + if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || + mp_irqs[i].dstapic == MP_APIC_ALL) break; if (!test_bit(lbus, mp_bus_not_pci) && - !mp_irqs[i].mp_irqtype && + !mp_irqs[i].irqtype && (bus == lbus) && - (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) { - int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq); + (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { + int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); if (!(apic || IO_APIC_IRQ(irq))) continue; - if (pin == (mp_irqs[i].mp_srcbusirq & 3)) + if (pin == (mp_irqs[i].srcbusirq & 3)) return irq; /* * Use the first all-but-pin matching entry as a @@ -1072,7 +1072,7 @@ static int EISA_ELCR(unsigned int irq) * EISA conforming in the MP table, that means its trigger type must * be read in from the ELCR */ -#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq)) +#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) #define default_EISA_polarity(idx) default_ISA_polarity(idx) /* PCI interrupts are always polarity one level triggered, @@ -1089,13 +1089,13 @@ static int EISA_ELCR(unsigned int irq) static int MPBIOS_polarity(int idx) { - int bus = mp_irqs[idx].mp_srcbus; + int bus = mp_irqs[idx].srcbus; int polarity; /* * Determine IRQ line polarity (high active or low active): */ - switch (mp_irqs[idx].mp_irqflag & 3) + switch (mp_irqs[idx].irqflag & 3) { case 0: /* conforms, ie. bus-type dependent polarity */ if (test_bit(bus, mp_bus_not_pci)) @@ -1131,13 +1131,13 @@ static int MPBIOS_polarity(int idx) static int MPBIOS_trigger(int idx) { - int bus = mp_irqs[idx].mp_srcbus; + int bus = mp_irqs[idx].srcbus; int trigger; /* * Determine IRQ trigger mode (edge or level sensitive): */ - switch ((mp_irqs[idx].mp_irqflag>>2) & 3) + switch ((mp_irqs[idx].irqflag>>2) & 3) { case 0: /* conforms, ie. bus-type dependent */ if (test_bit(bus, mp_bus_not_pci)) @@ -1215,16 +1215,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq); static int pin_2_irq(int idx, int apic, int pin) { int irq, i; - int bus = mp_irqs[idx].mp_srcbus; + int bus = mp_irqs[idx].srcbus; /* * Debugging check, we are in big trouble if this message pops up! */ - if (mp_irqs[idx].mp_dstirq != pin) + if (mp_irqs[idx].dstirq != pin) printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); if (test_bit(bus, mp_bus_not_pci)) { - irq = mp_irqs[idx].mp_srcbusirq; + irq = mp_irqs[idx].srcbusirq; } else { /* * PCI IRQs are mapped in order @@ -2164,8 +2164,8 @@ static void __init setup_ioapic_ids_from_mpc(void) */ if (old_id != mp_ioapics[apic].apicid) for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].mp_dstapic == old_id) - mp_irqs[i].mp_dstapic + if (mp_irqs[i].dstapic == old_id) + mp_irqs[i].dstapic = mp_ioapics[apic].apicid; /* @@ -3983,8 +3983,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) return -1; for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].mp_irqtype == mp_INT && - mp_irqs[i].mp_srcbusirq == bus_irq) + if (mp_irqs[i].irqtype == mp_INT && + mp_irqs[i].srcbusirq == bus_irq) break; if (i >= mp_irq_entries) return -1; diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a86a6553743..ad36377dc93 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -159,55 +159,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m) m->srcbusirq, m->dstapic, m->dstirq); } -static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) +static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq) { apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", - mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, - (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, - mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq); + mp_irq->irqtype, mp_irq->irqflag & 3, + (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus, + mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq); } static void __init assign_to_mp_irq(struct mpc_intsrc *m, - struct mp_config_intsrc *mp_irq) + struct mpc_intsrc *mp_irq) { - mp_irq->mp_dstapic = m->dstapic; - mp_irq->mp_type = m->type; - mp_irq->mp_irqtype = m->irqtype; - mp_irq->mp_irqflag = m->irqflag; - mp_irq->mp_srcbus = m->srcbus; - mp_irq->mp_srcbusirq = m->srcbusirq; - mp_irq->mp_dstirq = m->dstirq; + mp_irq->dstapic = m->dstapic; + mp_irq->type = m->type; + mp_irq->irqtype = m->irqtype; + mp_irq->irqflag = m->irqflag; + mp_irq->srcbus = m->srcbus; + mp_irq->srcbusirq = m->srcbusirq; + mp_irq->dstirq = m->dstirq; } -static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, +static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq, struct mpc_intsrc *m) { - m->dstapic = mp_irq->mp_dstapic; - m->type = mp_irq->mp_type; - m->irqtype = mp_irq->mp_irqtype; - m->irqflag = mp_irq->mp_irqflag; - m->srcbus = mp_irq->mp_srcbus; - m->srcbusirq = mp_irq->mp_srcbusirq; - m->dstirq = mp_irq->mp_dstirq; + m->dstapic = mp_irq->dstapic; + m->type = mp_irq->type; + m->irqtype = mp_irq->irqtype; + m->irqflag = mp_irq->irqflag; + m->srcbus = mp_irq->srcbus; + m->srcbusirq = mp_irq->srcbusirq; + m->dstirq = mp_irq->dstirq; } -static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, +static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq, struct mpc_intsrc *m) { - if (mp_irq->mp_dstapic != m->dstapic) + if (mp_irq->dstapic != m->dstapic) return 1; - if (mp_irq->mp_type != m->type) + if (mp_irq->type != m->type) return 2; - if (mp_irq->mp_irqtype != m->irqtype) + if (mp_irq->irqtype != m->irqtype) return 3; - if (mp_irq->mp_irqflag != m->irqflag) + if (mp_irq->irqflag != m->irqflag) return 4; - if (mp_irq->mp_srcbus != m->srcbus) + if (mp_irq->srcbus != m->srcbus) return 5; - if (mp_irq->mp_srcbusirq != m->srcbusirq) + if (mp_irq->srcbusirq != m->srcbusirq) return 6; - if (mp_irq->mp_dstirq != m->dstirq) + if (mp_irq->dstirq != m->dstirq) return 7; return 0; @@ -808,15 +808,15 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m) /* not legacy */ for (i = 0; i < mp_irq_entries; i++) { - if (mp_irqs[i].mp_irqtype != mp_INT) + if (mp_irqs[i].irqtype != mp_INT) continue; - if (mp_irqs[i].mp_irqflag != 0x0f) + if (mp_irqs[i].irqflag != 0x0f) continue; - if (mp_irqs[i].mp_srcbus != m->srcbus) + if (mp_irqs[i].srcbus != m->srcbus) continue; - if (mp_irqs[i].mp_srcbusirq != m->srcbusirq) + if (mp_irqs[i].srcbusirq != m->srcbusirq) continue; if (irq_used[i]) { /* already claimed */ @@ -921,10 +921,10 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, if (irq_used[i]) continue; - if (mp_irqs[i].mp_irqtype != mp_INT) + if (mp_irqs[i].irqtype != mp_INT) continue; - if (mp_irqs[i].mp_irqflag != 0x0f) + if (mp_irqs[i].irqflag != 0x0f) continue; if (nr_m_spare > 0) { -- cgit v1.2.3 From 09b3ec7315a18d885127544204f1e389d41058d0 Mon Sep 17 00:00:00 2001 From: Frederik Deweerdt Date: Mon, 12 Jan 2009 22:35:42 +0100 Subject: x86, tlb flush_data: replace per_cpu with an array Impact: micro-optimization, memory reduction On x86_64 flush tlb data is stored in per_cpu variables. This is unnecessary because only the first NUM_INVALIDATE_TLB_VECTORS entries are accessed. This patch aims at making the code less confusing (there's nothing really "per_cpu") by using a plain array. It also would save some memory on most distros out there (Ubuntu x86_64 has NR_CPUS=64 by default). [ Ravikiran G Thirumalai also pointed out that the correct alignment is ____cacheline_internodealigned_in_smp, so that there's no bouncing on vsmp. ] Signed-off-by: Frederik Deweerdt Acked-by: Ravikiran Thirumalai Signed-off-by: Ingo Molnar --- arch/x86/kernel/tlb_64.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index f8be6f1d2e4..8cfea5d1451 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -33,7 +33,7 @@ * To avoid global state use 8 different call vectors. * Each CPU uses a specific vector to trigger flushes on other * CPUs. Depending on the received vector the target CPUs look into - * the right per cpu variable for the flush data. + * the right array slot for the flush data. * * With more than 8 CPUs they are hashed to the 8 available * vectors. The limited global vector space forces us to this right now. @@ -48,13 +48,13 @@ union smp_flush_state { unsigned long flush_va; spinlock_t tlbstate_lock; }; - char pad[SMP_CACHE_BYTES]; -} ____cacheline_aligned; + char pad[CONFIG_X86_INTERNODE_CACHE_BYTES]; +} ____cacheline_internodealigned_in_smp; /* State is put into the per CPU data section, but padded to a full cache line because other CPUs can access it and we don't want false sharing in the per cpu data segment. */ -static DEFINE_PER_CPU(union smp_flush_state, flush_state); +static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; /* * We cannot call mmdrop() because we are in interrupt context, @@ -129,7 +129,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) * Use that to determine where the sender put the data. */ sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; - f = &per_cpu(flush_state, sender); + f = &flush_state[sender]; if (!cpu_isset(cpu, f->flush_cpumask)) goto out; @@ -169,7 +169,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, /* Caller has disabled preemption */ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; - f = &per_cpu(flush_state, sender); + f = &flush_state[sender]; /* * Could avoid this lock when @@ -205,8 +205,8 @@ static int __cpuinit init_smp_flush(void) { int i; - for_each_possible_cpu(i) - spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); + for (i = 0; i < ARRAY_SIZE(flush_state); i++) + spin_lock_init(&flush_state[i].tlbstate_lock); return 0; } -- cgit v1.2.3 From 0a2a18b721abc960fbcada406746877d22340a60 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 12 Jan 2009 23:37:16 +0100 Subject: x86: change the default cache size to 64 bytes Right now the generic cacheline size is 128 bytes - that is wasteful when structures are aligned, as all modern x86 CPUs have an (effective) cacheline sizes of 64 bytes. It was set to 128 bytes due to some cacheline aliasing problems on older P4 systems, but those are many years old and we dont optimize for them anymore. (They'll still get the 128 bytes cacheline size if the kernel is specifically built for Pentium 4) Signed-off-by: Ingo Molnar Acked-by: Arjan van de Ven --- arch/x86/Kconfig.cpu | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 8078955845a..cdf4a962323 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -307,10 +307,10 @@ config X86_CMPXCHG config X86_L1_CACHE_SHIFT int - default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC + default "7" if MPENTIUM4 || MPSC default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX - default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 + default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU config X86_XADD def_bool y -- cgit v1.2.3 From b665967979d0e990f196e7c4ba88e17c9ed9b781 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 12 Jan 2009 11:54:27 -0800 Subject: x86: make 32bit MAX_HARDIRQS_PER_CPU to be NR_VECTORS Impact: clean up to be same as 64bit 32-bit is using per-cpu vector too, so don't use default NR_IRQS. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/hardirq_32.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h index cf7954d1405..d4b5d731073 100644 --- a/arch/x86/include/asm/hardirq_32.h +++ b/arch/x86/include/asm/hardirq_32.h @@ -19,6 +19,9 @@ typedef struct { DECLARE_PER_CPU(irq_cpustat_t, irq_stat); +/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ +#define MAX_HARDIRQS_PER_CPU NR_VECTORS + #define __ARCH_IRQ_STAT #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) -- cgit v1.2.3 From 444027031cd069ea7e48b016cb33bbf201c8a9f0 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 14 Jan 2009 23:37:47 +0300 Subject: x86: headers cleanup - prctl.h Impact: cleanup (internal kernel function exported) 'make headers_check' warn us about leaking of kernel private (mostly compile time vars) data to userspace in headers. Fix it. sys_arch_prctl is completely removed from header since frankly I don't even understand why we describe it here. It is described like __SYSCALL(__NR_arch_prctl, sys_arch_prctl) in unistd_64.h and implemented in process_64.c. User-mode linux involved? So this one in fact is suspicious. Signed-off-by: Cyrill Gorcunov Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/prctl.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h index a8894647dd9..3ac5032fae0 100644 --- a/arch/x86/include/asm/prctl.h +++ b/arch/x86/include/asm/prctl.h @@ -6,8 +6,4 @@ #define ARCH_GET_FS 0x1003 #define ARCH_GET_GS 0x1004 -#ifdef CONFIG_X86_64 -extern long sys_arch_prctl(int, unsigned long); -#endif /* CONFIG_X86_64 */ - #endif /* _ASM_X86_PRCTL_H */ -- cgit v1.2.3 From a7c4e68615e20771f279c51a2bec8980675c78c7 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 14 Jan 2009 23:37:49 +0300 Subject: x86: headers cleanup - sigcontext32.h Impact: cleanup 'make headers_check' warn us about lack of linux/types.h here. Lets add it. Signed-off-by: Cyrill Gorcunov Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/sigcontext32.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/sigcontext32.h b/arch/x86/include/asm/sigcontext32.h index 6126188cf3a..ad1478c4ae1 100644 --- a/arch/x86/include/asm/sigcontext32.h +++ b/arch/x86/include/asm/sigcontext32.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_SIGCONTEXT32_H #define _ASM_X86_SIGCONTEXT32_H +#include + /* signal context for 32bit programs. */ #define X86_FXSR_MAGIC 0x0000 -- cgit v1.2.3 From dbca1df48e89d8aa59254fdc10ef16c16e73d94e Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 14 Jan 2009 23:37:50 +0300 Subject: x86: headers cleanup - setup.h Impact: cleanup 'make headers_check' warn us about leaking of kernel private (mostly compile time vars) data to userspace in headers. Fix it. Guard this one by __KERNEL__. Signed-off-by: Cyrill Gorcunov Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/setup.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ebe858cdc8a..29d31c0d13d 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_SETUP_H #define _ASM_X86_SETUP_H +#ifdef __KERNEL__ + #define COMMAND_LINE_SIZE 2048 #ifndef __ASSEMBLY__ @@ -8,10 +10,8 @@ /* Interrupt control for vSMPowered x86_64 systems */ void vsmp_init(void); - void setup_bios_corruption_check(void); - #ifdef CONFIG_X86_VISWS extern void visws_early_detect(void); extern int is_visws_box(void); @@ -43,7 +43,7 @@ struct x86_quirks { void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); void (*mpc_oem_pci_bus)(struct mpc_bus *m); void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, - unsigned short oemsize); + unsigned short oemsize); int (*setup_ioapic_ids)(void); int (*update_genapic)(void); }; @@ -56,8 +56,6 @@ extern unsigned long saved_video_mode; #endif #endif /* __ASSEMBLY__ */ -#ifdef __KERNEL__ - #ifdef __i386__ #include -- cgit v1.2.3 From 95c4bff0308eb0819436b730a836846d3e784657 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 14 Jan 2009 23:37:46 +0300 Subject: x86: headers cleanup - boot.h Impact: cleanup 'make headers_check' warn us about leaking of kernel private (mostly compile time vars) data to userspace in headers. Fix it. Neither BOOT_HEAP_SIZE, BOOT_STACK_SIZE refs was found by searching thru net (ie in user-space area) so fence this all by __KERNEL__ guard. Signed-off-by: Cyrill Gorcunov Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/boot.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index c0e8e68a31f..6526cf08b0e 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -10,14 +10,16 @@ #define EXTENDED_VGA 0xfffe /* 80x50 mode */ #define ASK_VGA 0xfffd /* ask for it at bootup */ +#ifdef __KERNEL__ + /* Physical address where kernel should be loaded. */ #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + (CONFIG_PHYSICAL_ALIGN - 1)) \ & ~(CONFIG_PHYSICAL_ALIGN - 1)) -#if (defined CONFIG_KERNEL_BZIP2) +#ifdef CONFIG_KERNEL_BZIP2 #define BOOT_HEAP_SIZE 0x400000 -#else +#else /* !CONFIG_KERNEL_BZIP2 */ #ifdef CONFIG_X86_64 #define BOOT_HEAP_SIZE 0x7000 @@ -25,7 +27,7 @@ #define BOOT_HEAP_SIZE 0x4000 #endif -#endif +#endif /* !CONFIG_KERNEL_BZIP2 */ #ifdef CONFIG_X86_64 #define BOOT_STACK_SIZE 0x4000 @@ -33,4 +35,6 @@ #define BOOT_STACK_SIZE 0x1000 #endif +#endif /* __KERNEL__ */ + #endif /* _ASM_X86_BOOT_H */ -- cgit v1.2.3 From d2287f5ebea9ff2487d614719775f0b03fce15f6 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Wed, 14 Jan 2009 15:43:54 -0800 Subject: irq: update all arches for new irq_desc, fix Impact: fix build errors Since the SPARSE IRQS changes redefined how the kstat irqs are organized, arch's must use the new accessor function: kstat_incr_irqs_this_cpu(irq, DESC); If CONFIG_SPARSE_IRQS is set, then DESC is a pointer to the irq_desc which has a pointer to the kstat_irqs. If not, then the .irqs field of struct kernel_stat is used instead. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/ia64/kernel/irq_ia64.c | 12 ++++++++---- arch/mips/kernel/smtc.c | 4 +++- arch/mips/sgi-ip22/ip22-int.c | 2 +- arch/mips/sgi-ip22/ip22-time.c | 2 +- arch/mips/sibyte/bcm1480/smp.c | 3 ++- arch/mips/sibyte/sb1250/smp.c | 3 ++- arch/mn10300/kernel/mn10300-watchdog.c | 3 ++- arch/sparc/kernel/time_64.c | 2 +- 8 files changed, 20 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 28d3d483db9..927ad027820 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { + struct irq_desc *desc = irq_to_desc(vector); + if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); - kstat_this_cpu.irqs[vector]++; + kstat_incr_irqs_this_cpu(vector, desc); } else if (unlikely(IS_RESCHEDULE(vector))) - kstat_this_cpu.irqs[vector]++; + kstat_incr_irqs_this_cpu(vector, desc); else { int irq = local_vector_to_irq(vector); @@ -551,11 +553,13 @@ void ia64_process_pending_intr(void) * Perform normal interrupt style processing */ while (vector != IA64_SPURIOUS_INT_VECTOR) { + struct irq_desc *desc = irq_to_desc(vector); + if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); - kstat_this_cpu.irqs[vector]++; + kstat_incr_irqs_this_cpu(vector, desc); } else if (unlikely(IS_RESCHEDULE(vector))) - kstat_this_cpu.irqs[vector]++; + kstat_incr_irqs_this_cpu(vector, desc); else { struct pt_regs *old_regs = set_irq_regs(NULL); int irq = local_vector_to_irq(vector); diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index d2c1ab12425..5f5af7d4c89 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) struct clock_event_device *cd; void *arg_copy = pipi->arg; int type_copy = pipi->type; + int irq = MIPS_CPU_IRQ_BASE + 1; + smtc_ipi_nq(&freeIPIq, pipi); switch (type_copy) { case SMTC_CLOCK_TICK: irq_enter(); - kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); cd = &per_cpu(mips_clockevent_device, cpu); cd->event_handler(cd); irq_exit(); diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c index f8b18af141a..0ecd5fe9486 100644 --- a/arch/mips/sgi-ip22/ip22-int.c +++ b/arch/mips/sgi-ip22/ip22-int.c @@ -155,7 +155,7 @@ static void indy_buserror_irq(void) int irq = SGI_BUSERR_IRQ; irq_enter(); - kstat_this_cpu.irqs[irq]++; + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); ip22_be_interrupt(irq); irq_exit(); } diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c index 3dcb27ec0c5..c8f7d2328b2 100644 --- a/arch/mips/sgi-ip22/ip22-time.c +++ b/arch/mips/sgi-ip22/ip22-time.c @@ -122,7 +122,7 @@ void indy_8254timer_irq(void) char c; irq_enter(); - kstat_this_cpu.irqs[irq]++; + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); printk(KERN_ALERT "Oops, got 8254 interrupt.\n"); ArcRead(0, &c, 1, &cnt); ArcEnterInteractiveMode(); diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index dddfda8e829..314691648c9 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = { void bcm1480_mailbox_interrupt(void) { int cpu = smp_processor_id(); + int irq = K_BCM1480_INT_MBOX_0_0; unsigned int action; - kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++; + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); /* Load the mailbox register to figure out what we're supposed to do */ action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff; diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 5950a288a7d..cad14003b84 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = { void sb1250_mailbox_interrupt(void) { int cpu = smp_processor_id(); + int irq = K_INT_MBOX_0; unsigned int action; - kstat_this_cpu.irqs[K_INT_MBOX_0]++; + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); /* Load the mailbox register to figure out what we're supposed to do */ action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff; diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c index 10811e981d2..2e370d88a87 100644 --- a/arch/mn10300/kernel/mn10300-watchdog.c +++ b/arch/mn10300/kernel/mn10300-watchdog.c @@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) * the stack NMI-atomically, it's safe to use smp_processor_id(). */ int sum, cpu = smp_processor_id(); + int irq = NMIIRQ; u8 wdt, tmp; wdt = WDCTR & ~WDCTR_WDCNE; @@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) NMICR = NMICR_WDIF; nmi_count(cpu)++; - kstat_this_cpu.irqs[NMIIRQ]++; + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); sum = irq_stat[cpu].__irq_count; if (last_irq_sums[cpu] == sum) { diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 54405d36214..28b48f3eb25 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -727,7 +727,7 @@ void timer_interrupt(int irq, struct pt_regs *regs) irq_enter(); - kstat_this_cpu.irqs[0]++; + kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); if (unlikely(!evt->event_handler)) { printk(KERN_WARNING -- cgit v1.2.3 From f11826385b63566d98c02d35f592232ee77cd791 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 14 Jan 2009 12:27:35 +0000 Subject: x86: fully honor "nolapic" Impact: widen the effect of the 'nolapic' boot parameter "nolapic" should not only suppress SMP and use of the LAPIC, but it also ought to have the effect of disabling all IO-APIC related activity as well as PCI MSI and HT-IRQs. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 7 ++++++- arch/x86/kernel/io_apic.c | 6 ++++++ arch/x86/kernel/smpboot.c | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 0f830e4f567..c3dd64fabcf 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1126,6 +1126,11 @@ void __cpuinit setup_local_APIC(void) unsigned int value; int i, j; + if (disable_apic) { + disable_ioapic_setup(); + return; + } + #ifdef CONFIG_X86_32 /* Pound the ESR really hard over the head with a big hammer - mbligh */ if (lapic_is_integrated() && esr_disable) { @@ -1566,11 +1571,11 @@ int apic_version[MAX_APICS]; int __init APIC_init_uniprocessor(void) { -#ifdef CONFIG_X86_64 if (disable_apic) { pr_info("Apic disabled\n"); return -1; } +#ifdef CONFIG_X86_64 if (!cpu_has_apic) { disable_apic = 1; pr_info("Apic disabled by BIOS\n"); diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1c4a1302536..40747e58f30 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3258,6 +3258,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms int err; unsigned dest; + if (disable_apic) + return -ENXIO; + cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, TARGET_CPUS); if (err) @@ -3726,6 +3729,9 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) struct irq_cfg *cfg; int err; + if (disable_apic) + return -ENXIO; + cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, TARGET_CPUS); if (!err) { diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index bb1a3b1fc87..31f99ec2e0f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1125,6 +1125,7 @@ static int __init smp_sanity_check(unsigned max_cpus) printk(KERN_ERR "... forcing use of dummy APIC emulation." "(tell your hw vendor)\n"); smpboot_clear_io_apic(); + disable_ioapic_setup(); return -1; } -- cgit v1.2.3 From a08c4743ed5b861c4fa3d75be00da7106c926296 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 14 Jan 2009 12:28:51 +0000 Subject: x86: avoid early crash in disable_local_APIC() E.g. when called due to an early panic. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index c3dd64fabcf..38d6aab2358 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -895,6 +895,10 @@ void disable_local_APIC(void) { unsigned int value; + /* APIC hasn't been mapped yet */ + if (!apic_phys) + return; + clear_local_APIC(); /* -- cgit v1.2.3 From 54da5b3d44238eeb7417bacf792fb416d473bf4d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Jan 2009 13:04:58 +0100 Subject: x86: fix broken flush_tlb_others_ipi(), fix Impact: cleanup Use the proper type. Signed-off-by: Ingo Molnar --- arch/x86/kernel/tlb_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 54ee2ecb5e2..7f4141d3b66 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -188,7 +188,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(f->flush_cpumask, INVALIDATE_TLB_VECTOR_START + sender); + send_IPI_mask(to_cpumask(f->flush_cpumask), + INVALIDATE_TLB_VECTOR_START + sender); while (!cpumask_empty(to_cpumask(f->flush_cpumask))) cpu_relax(); -- cgit v1.2.3 From 5cd7376200be7b8bab085557ff5876b04bd84191 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Jan 2009 15:46:08 +0100 Subject: fix: crash: IP: __bitmap_intersects+0x48/0x73 -tip testing found this crash: > [ 35.258515] calling acpi_cpufreq_init+0x0/0x127 @ 1 > [ 35.264127] BUG: unable to handle kernel NULL pointer dereference at (null) > [ 35.267554] IP: [] __bitmap_intersects+0x48/0x73 > [ 35.267554] PGD 0 > [ 35.267554] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c is still broken: there's no allocation of the variable mask, so we pass in an uninitialized cmd.mask field to drv_read(), which then passes it to the scheduler which then crashes ... Switch it over to the much simpler constant-cpumask-pointers approach. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 6f11e029e8c..019276717a7 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -145,7 +145,7 @@ typedef union { struct drv_cmd { unsigned int type; - cpumask_var_t mask; + const struct cpumask *mask; drv_addr_union addr; u32 val; }; @@ -235,8 +235,7 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } - cpumask_copy(cmd.mask, mask); - + cmd.mask = mask; drv_read(&cmd); dprintk("get_cur_val = %u\n", cmd.val); @@ -403,9 +402,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, return -ENODEV; } - if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) - return -ENOMEM; - perf = data->acpi_data; result = cpufreq_frequency_table_target(policy, data->freq_table, @@ -450,9 +446,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); + cmd.mask = policy->cpus; else - cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); + cmd.mask = cpumask_of(policy->cpu); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; @@ -479,7 +475,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, perf->state = next_perf_state; out: - free_cpumask_var(cmd.mask); return result; } -- cgit v1.2.3 From f2a082711905312dc7b6675e913fee0c4689f7ae Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Thu, 15 Jan 2009 09:19:32 -0800 Subject: x86: fix build warning when CONFIG_NUMA not defined. Impact: fix build warning The macro cpu_to_node did not reference it's argument, and instead simply returned a 0. This causes a "unused variable" warning if it's the only reference in a function (show_cache_disable). Replace it with the more correct inline function. Signed-off-by: Mike Travis --- arch/x86/include/asm/topology.h | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 4e2f2e0aab2..d0c68e29163 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -192,9 +192,20 @@ extern int __node_distance(int, int); #else /* !CONFIG_NUMA */ -#define numa_node_id() 0 -#define cpu_to_node(cpu) 0 -#define early_cpu_to_node(cpu) 0 +static inline int numa_node_id(void) +{ + return 0; +} + +static inline int cpu_to_node(int cpu) +{ + return 0; +} + +static inline int early_cpu_to_node(int cpu) +{ + return 0; +} static inline const cpumask_t *cpumask_of_node(int node) { -- cgit v1.2.3 From c99dbbe9f8f6b3e9383e64710217e873431d1c31 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Thu, 15 Jan 2009 12:09:44 -0800 Subject: sched: fix warning on ia64 Andrew Morton reported this warning on ia64: kernel/sched.c: In function `sd_init_NODE': kernel/sched.c:7449: warning: comparison of distinct pointer types lacks a cast Using the untyped min() function produces such warnings. Fix: type the constant 32 as unsigned int to match typeof(num_online_cpus). Reported-by: Andrew Morton Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/topology.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 32f3af1641c..3193f4417e1 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -84,7 +84,7 @@ void build_cpu_to_node_map(void); .child = NULL, \ .groups = NULL, \ .min_interval = 8, \ - .max_interval = 8*(min(num_online_cpus(), 32)), \ + .max_interval = 8*(min(num_online_cpus(), 32U)), \ .busy_factor = 64, \ .imbalance_pct = 125, \ .cache_nice_tries = 2, \ -- cgit v1.2.3 From 8d29b7b9f81d6b83d869ff054e6c189d6da73f1f Mon Sep 17 00:00:00 2001 From: Ben Nizette Date: Wed, 14 Jan 2009 09:32:19 +1100 Subject: avr32: Fix out-of-range rcalls in large kernels Replace handcoded rcall instructions with the call pseudo-instruction. For kernels too far over 1MB the rcall instruction can't reach and linking will fail. We already call the final linker with --relax which converts call pseudo-instructions to the right things anyway. This fixes arch/avr32/kernel/built-in.o: In function `syscall_exit_work': (.ex.text+0x198): relocation truncated to fit: R_AVR32_22H_PCREL against symbol `schedule' defined in .sched.text section in kernel/built-in.o arch/avr32/kernel/built-in.o: In function `fault_exit_work': (.ex.text+0x3b6): relocation truncated to fit: R_AVR32_22H_PCREL against symbol `schedule' defined in .sched.text section in kernel/built-in.o But I'm still left with arch/avr32/kernel/built-in.o:(.fixup+0x2): relocation truncated to fit: R_AVR32_22H_PCREL against `.text'+45a arch/avr32/kernel/built-in.o:(.fixup+0x8): relocation truncated to fit: R_AVR32_22H_PCREL against `.text'+8ea arch/avr32/kernel/built-in.o:(.fixup+0xe): relocation truncated to fit: R_AVR32_22H_PCREL against `.text'+abe arch/avr32/kernel/built-in.o:(.fixup+0x14): relocation truncated to fit: R_AVR32_22H_PCREL against `.text'+ac8 arch/avr32/kernel/built-in.o:(.fixup+0x1a): relocation truncated to fit: R_AVR32_22H_PCREL against `.text'+ad2 arch/avr32/kernel/built-in.o:(.fixup+0x20): relocation truncated to fit: R_AVR32_22H_PCREL against `.text'+adc arch/avr32/kernel/built-in.o:(.fixup+0x26): relocation truncated to fit: R_AVR32_22H_PCREL against `.text'+ae6 arch/avr32/kernel/built-in.o:(.fixup+0x2c): relocation truncated to fit: R_AVR32_22H_PCREL against `.text'+af0 arch/avr32/kernel/built-in.o:(.fixup+0x32): additional relocation overflows omitted from the output These are caused by a similar problem with 'rjmp' instructions. Unfortunately, there's no easy fix for these at the moment since we don't have a arbitrary-range 'jmp' instruction similar to 'call'. Signed-off-by: Ben Nizette Signed-off-by: Haavard Skinnemoen --- arch/avr32/kernel/entry-avr32b.S | 60 +++++++++++++++++++-------------------- arch/avr32/kernel/syscall-stubs.S | 14 ++++----- arch/avr32/lib/strnlen_user.S | 2 +- 3 files changed, 38 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index 33d49377b8b..009a80155d6 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S @@ -150,10 +150,10 @@ page_not_present: tlbmiss_restore sub sp, 4 stmts --sp, r0-lr - rcall save_full_context_ex + call save_full_context_ex mfsr r12, SYSREG_ECR mov r11, sp - rcall do_page_fault + call do_page_fault rjmp ret_from_exception .align 2 @@ -250,7 +250,7 @@ syscall_badsys: .global ret_from_fork ret_from_fork: - rcall schedule_tail + call schedule_tail /* check for syscall tracing */ get_thread_info r0 @@ -261,7 +261,7 @@ ret_from_fork: syscall_trace_enter: pushm r8-r12 - rcall syscall_trace + call syscall_trace popm r8-r12 rjmp syscall_trace_cont @@ -269,14 +269,14 @@ syscall_exit_work: bld r1, TIF_SYSCALL_TRACE brcc 1f unmask_interrupts - rcall syscall_trace + call syscall_trace mask_interrupts ld.w r1, r0[TI_flags] 1: bld r1, TIF_NEED_RESCHED brcc 2f unmask_interrupts - rcall schedule + call schedule mask_interrupts ld.w r1, r0[TI_flags] rjmp 1b @@ -287,7 +287,7 @@ syscall_exit_work: unmask_interrupts mov r12, sp mov r11, r0 - rcall do_notify_resume + call do_notify_resume mask_interrupts ld.w r1, r0[TI_flags] rjmp 1b @@ -394,7 +394,7 @@ handle_critical: mfsr r12, SYSREG_ECR mov r11, sp - rcall do_critical_exception + call do_critical_exception /* We should never get here... */ bad_return: @@ -407,18 +407,18 @@ bad_return: do_bus_error_write: sub sp, 4 stmts --sp, r0-lr - rcall save_full_context_ex + call save_full_context_ex mov r11, 1 rjmp 1f do_bus_error_read: sub sp, 4 stmts --sp, r0-lr - rcall save_full_context_ex + call save_full_context_ex mov r11, 0 1: mfsr r12, SYSREG_BEAR mov r10, sp - rcall do_bus_error + call do_bus_error rjmp ret_from_exception .align 1 @@ -433,7 +433,7 @@ do_nmi_ll: 1: pushm r8, r9 /* PC and SR */ mfsr r12, SYSREG_ECR mov r11, sp - rcall do_nmi + call do_nmi popm r8-r9 mtsr SYSREG_RAR_NMI, r8 tst r0, r0 @@ -457,29 +457,29 @@ do_nmi_ll: handle_address_fault: sub sp, 4 stmts --sp, r0-lr - rcall save_full_context_ex + call save_full_context_ex mfsr r12, SYSREG_ECR mov r11, sp - rcall do_address_exception + call do_address_exception rjmp ret_from_exception handle_protection_fault: sub sp, 4 stmts --sp, r0-lr - rcall save_full_context_ex + call save_full_context_ex mfsr r12, SYSREG_ECR mov r11, sp - rcall do_page_fault + call do_page_fault rjmp ret_from_exception .align 1 do_illegal_opcode_ll: sub sp, 4 stmts --sp, r0-lr - rcall save_full_context_ex + call save_full_context_ex mfsr r12, SYSREG_ECR mov r11, sp - rcall do_illegal_opcode + call do_illegal_opcode rjmp ret_from_exception do_dtlb_modified: @@ -513,11 +513,11 @@ do_dtlb_modified: do_fpe_ll: sub sp, 4 stmts --sp, r0-lr - rcall save_full_context_ex + call save_full_context_ex unmask_interrupts mov r12, 26 mov r11, sp - rcall do_fpe + call do_fpe rjmp ret_from_exception ret_from_exception: @@ -553,7 +553,7 @@ fault_resume_kernel: lddsp r4, sp[REG_SR] bld r4, SYSREG_GM_OFFSET brcs 1f - rcall preempt_schedule_irq + call preempt_schedule_irq 1: #endif @@ -582,7 +582,7 @@ fault_exit_work: bld r1, TIF_NEED_RESCHED brcc 1f unmask_interrupts - rcall schedule + call schedule mask_interrupts ld.w r1, r0[TI_flags] rjmp fault_exit_work @@ -593,7 +593,7 @@ fault_exit_work: unmask_interrupts mov r12, sp mov r11, r0 - rcall do_notify_resume + call do_notify_resume mask_interrupts ld.w r1, r0[TI_flags] rjmp fault_exit_work @@ -616,10 +616,10 @@ handle_debug: .Ldebug_fixup_cont: #ifdef CONFIG_TRACE_IRQFLAGS - rcall trace_hardirqs_off + call trace_hardirqs_off #endif mov r12, sp - rcall do_debug + call do_debug mov sp, r12 lddsp r2, sp[REG_SR] @@ -643,7 +643,7 @@ handle_debug: mtsr SYSREG_RSR_DBG, r11 mtsr SYSREG_RAR_DBG, r10 #ifdef CONFIG_TRACE_IRQFLAGS - rcall trace_hardirqs_on + call trace_hardirqs_on 1: #endif ldmts sp++, r0-lr @@ -676,7 +676,7 @@ debug_resume_kernel: #ifdef CONFIG_TRACE_IRQFLAGS bld r11, SYSREG_GM_OFFSET brcc 1f - rcall trace_hardirqs_on + call trace_hardirqs_on 1: #endif mfsr r2, SYSREG_SR @@ -747,7 +747,7 @@ irq_level\level: mov r11, sp mov r12, \level - rcall do_IRQ + call do_IRQ lddsp r4, sp[REG_SR] bfextu r4, r4, SYSREG_M0_OFFSET, 3 @@ -767,7 +767,7 @@ irq_level\level: 1: #ifdef CONFIG_TRACE_IRQFLAGS - rcall trace_hardirqs_on + call trace_hardirqs_on #endif popm r8-r9 mtsr rar_int\level, r8 @@ -807,7 +807,7 @@ irq_level\level: lddsp r4, sp[REG_SR] bld r4, SYSREG_GM_OFFSET brcs 1b - rcall preempt_schedule_irq + call preempt_schedule_irq #endif rjmp 1b .endm diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S index 673178e235f..f7244cd02fb 100644 --- a/arch/avr32/kernel/syscall-stubs.S +++ b/arch/avr32/kernel/syscall-stubs.S @@ -61,7 +61,7 @@ __sys_execve: __sys_mmap2: pushm lr st.w --sp, ARG6 - rcall sys_mmap2 + call sys_mmap2 sub sp, -4 popm pc @@ -70,7 +70,7 @@ __sys_mmap2: __sys_sendto: pushm lr st.w --sp, ARG6 - rcall sys_sendto + call sys_sendto sub sp, -4 popm pc @@ -79,7 +79,7 @@ __sys_sendto: __sys_recvfrom: pushm lr st.w --sp, ARG6 - rcall sys_recvfrom + call sys_recvfrom sub sp, -4 popm pc @@ -88,7 +88,7 @@ __sys_recvfrom: __sys_pselect6: pushm lr st.w --sp, ARG6 - rcall sys_pselect6 + call sys_pselect6 sub sp, -4 popm pc @@ -97,7 +97,7 @@ __sys_pselect6: __sys_splice: pushm lr st.w --sp, ARG6 - rcall sys_splice + call sys_splice sub sp, -4 popm pc @@ -106,7 +106,7 @@ __sys_splice: __sys_epoll_pwait: pushm lr st.w --sp, ARG6 - rcall sys_epoll_pwait + call sys_epoll_pwait sub sp, -4 popm pc @@ -115,6 +115,6 @@ __sys_epoll_pwait: __sys_sync_file_range: pushm lr st.w --sp, ARG6 - rcall sys_sync_file_range + call sys_sync_file_range sub sp, -4 popm pc diff --git a/arch/avr32/lib/strnlen_user.S b/arch/avr32/lib/strnlen_user.S index 65ce11afa66..e46f4724962 100644 --- a/arch/avr32/lib/strnlen_user.S +++ b/arch/avr32/lib/strnlen_user.S @@ -48,7 +48,7 @@ adjust_length: lddpc lr, _task_size sub r11, lr, r12 mov r9, r11 - rcall __strnlen_user + call __strnlen_user cp.w r12, r9 brgt 1f popm pc -- cgit v1.2.3 From 61f3632fdcdcf547f6487f56b45976d7964756c4 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Wed, 14 Jan 2009 13:32:53 +0100 Subject: avr32: fix out-of-range rjmp instruction on large kernels Use .subsection to place fixups closer to their jump targets. This increases the maximum size of the kernel before we get link errors significantly. The problem here is that we don't have a "call"-ish pseudo-instruction to use instead of rjmp...we could add one, but that means we'll have to wait for a new toolchain release, wait until we're fairly sure most people are using it, etc... As an added bonus, it should decrease the RAM footprint slightly, though it might pollute the icache a bit more. Signed-off-by: Haavard Skinnemoen --- arch/avr32/include/asm/uaccess.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index ed092395215..245b2ee213c 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -230,10 +230,10 @@ extern int __put_user_bad(void); asm volatile( \ "1: ld." suffix " %1, %3 \n" \ "2: \n" \ - " .section .fixup, \"ax\" \n" \ + " .subsection 1 \n" \ "3: mov %0, %4 \n" \ " rjmp 2b \n" \ - " .previous \n" \ + " .subsection 0 \n" \ " .section __ex_table, \"a\" \n" \ " .long 1b, 3b \n" \ " .previous \n" \ @@ -295,10 +295,10 @@ extern int __put_user_bad(void); asm volatile( \ "1: st." suffix " %1, %3 \n" \ "2: \n" \ - " .section .fixup, \"ax\" \n" \ + " .subsection 1 \n" \ "3: mov %0, %4 \n" \ " rjmp 2b \n" \ - " .previous \n" \ + " .subsection 0 \n" \ " .section __ex_table, \"a\" \n" \ " .long 1b, 3b \n" \ " .previous \n" \ -- cgit v1.2.3 From 7de6883faad71e3a253d55b9e1a47b89ebce0a31 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:34 +0900 Subject: x86: fix pda_to_op() There's no instruction to move a 64bit immediate into memory location. Drop "i". Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pda.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 2fbfff88df3..cbd3f48a832 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -78,7 +78,7 @@ do { \ case 8: \ asm(op "q %1,%%gs:%c2": \ "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ + "r" ((T__)val), \ "i"(pda_offset(field))); \ break; \ default: \ -- cgit v1.2.3 From f10fcd47120e80f66665567dbe17f5071c7aef52 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:34 +0900 Subject: x86: make early_per_cpu() a lvalue and use it Make early_per_cpu() a lvalue as per_cpu() is and use it where applicable. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/percpu.h | 6 +++--- arch/x86/include/asm/topology.h | 5 +---- arch/x86/kernel/apic.c | 13 ++----------- 3 files changed, 6 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index ece72053ba6..df644f3e53e 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -195,9 +195,9 @@ do { \ #define early_per_cpu_ptr(_name) (_name##_early_ptr) #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) #define early_per_cpu(_name, _cpu) \ - (early_per_cpu_ptr(_name) ? \ - early_per_cpu_ptr(_name)[_cpu] : \ - per_cpu(_name, _cpu)) + *(early_per_cpu_ptr(_name) ? \ + &early_per_cpu_ptr(_name)[_cpu] : \ + &per_cpu(_name, _cpu)) #else /* !CONFIG_SMP */ #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 4e2f2e0aab2..87ca3fd86e8 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -102,10 +102,7 @@ static inline int cpu_to_node(int cpu) /* Same function but used if called before per_cpu areas are setup */ static inline int early_cpu_to_node(int cpu) { - if (early_per_cpu_ptr(x86_cpu_to_node_map)) - return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; - - return per_cpu(x86_cpu_to_node_map, cpu); + return early_per_cpu(x86_cpu_to_node_map, cpu); } /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 38d6aab2358..48578795583 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1877,17 +1877,8 @@ void __cpuinit generic_processor_info(int apicid, int version) #endif #if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64) - /* are we being called early in kernel startup? */ - if (early_per_cpu_ptr(x86_cpu_to_apicid)) { - u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); - u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); - - cpu_to_apicid[cpu] = apicid; - bios_cpu_apicid[cpu] = apicid; - } else { - per_cpu(x86_cpu_to_apicid, cpu) = apicid; - per_cpu(x86_bios_cpu_apicid, cpu) = apicid; - } + early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; + early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; #endif set_cpu_possible(cpu, true); -- cgit v1.2.3 From c90aa894f0240084f2c6e42e2333b211d6cfe2b2 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 13 Jan 2009 20:41:34 +0900 Subject: x86: cleanup early setup_percpu references [ Based on original patch from Christoph Lameter and Mike Travis. ] * Ruggedize some calls in setup_percpu.c to prevent mishaps in early calls, particularly for non-critical functions. * Cleanup DEBUG_PER_CPU_MAPS usages and some comments. Signed-off-by: Mike Travis Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 56 ++++++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index bf63de72b64..56c63ac62b1 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -15,6 +15,12 @@ #include #include +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +# define DBG(x...) printk(KERN_DEBUG x) +#else +# define DBG(x...) +#endif + #ifdef CONFIG_X86_LOCAL_APIC unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; @@ -27,31 +33,39 @@ unsigned int max_physical_apicid; physid_mask_t phys_cpu_present_map; #endif -/* map cpu index to physical APIC ID */ +/* + * Map cpu index to physical APIC ID + */ DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) -#define X86_64_NUMA 1 +#define X86_64_NUMA 1 /* (used later) */ -/* map cpu index to node index */ +/* + * Map cpu index to node index + */ DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); -/* which logical CPUs are on which nodes */ +/* + * Which logical CPUs are on which nodes + */ cpumask_t *node_to_cpumask_map; EXPORT_SYMBOL(node_to_cpumask_map); -/* setup node_to_cpumask_map */ +/* + * Setup node_to_cpumask_map + */ static void __init setup_node_to_cpumask_map(void); #else static inline void setup_node_to_cpumask_map(void) { } #endif -#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA /* * Copy data used in early init routines from the initial arrays to the * per cpu data areas. These arrays then become expendable and the @@ -200,6 +214,8 @@ void __init setup_per_cpu_areas(void) #endif per_cpu_offset(cpu) = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + + DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } /* Setup percpu data maps */ @@ -221,6 +237,7 @@ void __init setup_per_cpu_areas(void) * Requires node_possible_map to be valid. * * Note: node_to_cpumask() is not valid until after this is done. + * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) */ static void __init setup_node_to_cpumask_map(void) { @@ -236,6 +253,7 @@ static void __init setup_node_to_cpumask_map(void) /* allocate the map */ map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); + DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); pr_debug("Node to cpumask map at %p for %d nodes\n", map, nr_node_ids); @@ -248,17 +266,23 @@ void __cpuinit numa_set_node(int cpu, int node) { int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); - if (cpu_pda(cpu) && node != NUMA_NO_NODE) - cpu_pda(cpu)->nodenumber = node; - - if (cpu_to_node_map) + /* early setting, no percpu area yet */ + if (cpu_to_node_map) { cpu_to_node_map[cpu] = node; + return; + } - else if (per_cpu_offset(cpu)) - per_cpu(x86_cpu_to_node_map, cpu) = node; +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { + printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); + dump_stack(); + return; + } +#endif + per_cpu(x86_cpu_to_node_map, cpu) = node; - else - pr_debug("Setting node for non-present cpu %d\n", cpu); + if (node != NUMA_NO_NODE) + cpu_pda(cpu)->nodenumber = node; } void __cpuinit numa_clear_node(int cpu) @@ -275,7 +299,7 @@ void __cpuinit numa_add_cpu(int cpu) void __cpuinit numa_remove_cpu(int cpu) { - cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); + cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } #else /* CONFIG_DEBUG_PER_CPU_MAPS */ @@ -285,7 +309,7 @@ void __cpuinit numa_remove_cpu(int cpu) */ static void __cpuinit numa_set_cpumask(int cpu, int enable) { - int node = cpu_to_node(cpu); + int node = early_cpu_to_node(cpu); cpumask_t *mask; char buf[64]; -- cgit v1.2.3 From a698c823e15149941b0f0281527d0c0d1daf2639 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: make vmlinux_32.lds.S use PERCPU() macro Make vmlinux_32.lds.S use the generic PERCPU() macro instead of open coding it. This will ease future changes. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/kernel/vmlinux_32.lds.S | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index 82c67559dde..3eba7f7bac0 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S @@ -178,14 +178,7 @@ SECTIONS __initramfs_end = .; } #endif - . = ALIGN(PAGE_SIZE); - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { - __per_cpu_start = .; - *(.data.percpu.page_aligned) - *(.data.percpu) - *(.data.percpu.shared_aligned) - __per_cpu_end = .; - } + PERCPU(PAGE_SIZE) . = ALIGN(PAGE_SIZE); /* freed after init ends here */ -- cgit v1.2.3 From 3e5d8f978435bb9ba4dfe3f4514e65e7885db1a9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: make percpu symbols zerobased on SMP [ Based on original patch from Christoph Lameter and Mike Travis. ] This patch makes percpu symbols zerobased on x86_64 SMP by adding PERCPU_VADDR() to vmlinux.lds.h which helps setting explicit vaddr on the percpu output section and using it in vmlinux_64.lds.S. A new PHDR is added as existing ones cannot contain sections near address zero. PERCPU_VADDR() also adds a new symbol __per_cpu_load which always points to the vaddr of the loaded percpu data.init region. The following adjustments have been made to accomodate the address change. * code to locate percpu gdt_page in head_64.S is updated to add the load address to the gdt_page offset. * __per_cpu_load is used in places where access to the init data area is necessary. * pda->data_offset is initialized soon after C code is entered as zero value doesn't work anymore. This patch is mostly taken from Mike Travis' "x86_64: Base percpu variables at zero" patch. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 2 ++ arch/x86/kernel/head_64.S | 24 +++++++++++++++++++++++- arch/x86/kernel/setup_percpu.c | 2 +- arch/x86/kernel/vmlinux_64.lds.S | 17 ++++++++++++++++- 4 files changed, 42 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index b9a4d8c4b93..bc2900ca82c 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -44,6 +44,8 @@ void __init x86_64_init_pda(void) { _cpu_pda = __cpu_pda; cpu_pda(0) = &_boot_cpu_pda; + cpu_pda(0)->data_offset = + (unsigned long)(__per_cpu_load - __per_cpu_start); pda_init(0); } diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 0e275d49556..7ee0363871e 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -204,6 +204,23 @@ ENTRY(secondary_startup_64) pushq $0 popfq +#ifdef CONFIG_SMP + /* + * early_gdt_base should point to the gdt_page in static percpu init + * data area. Computing this requires two symbols - __per_cpu_load + * and per_cpu__gdt_page. As linker can't do no such relocation, do + * it by hand. As early_gdt_descr is manipulated by C code for + * secondary CPUs, this should be done only once for the boot CPU + * when early_gdt_descr_base contains zero. + */ + movq early_gdt_descr_base(%rip), %rax + testq %rax, %rax + jnz 1f + movq $__per_cpu_load, %rax + addq $per_cpu__gdt_page, %rax + movq %rax, early_gdt_descr_base(%rip) +1: +#endif /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace @@ -401,7 +418,12 @@ NEXT_PAGE(level2_spare_pgt) .globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 - .quad per_cpu__gdt_page +#ifdef CONFIG_SMP +early_gdt_descr_base: + .quad 0x0000000000000000 +#else + .quad per_cpu__gdt_page +#endif ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 56c63ac62b1..44845842e72 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -213,7 +213,7 @@ void __init setup_per_cpu_areas(void) } #endif per_cpu_offset(cpu) = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 1a614c0e6be..f50280db0df 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -19,6 +19,9 @@ PHDRS { data PT_LOAD FLAGS(7); /* RWE */ user PT_LOAD FLAGS(7); /* RWE */ data.init PT_LOAD FLAGS(7); /* RWE */ +#ifdef CONFIG_SMP + percpu PT_LOAD FLAGS(7); /* RWE */ +#endif note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS @@ -208,14 +211,26 @@ SECTIONS __initramfs_end = .; #endif +#ifdef CONFIG_SMP + /* + * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the + * output PHDR, so the next output section - __data_nosave - should + * switch it back to data.init. + */ + . = ALIGN(PAGE_SIZE); + PERCPU_VADDR(0, :percpu) +#else PERCPU(PAGE_SIZE) +#endif . = ALIGN(PAGE_SIZE); __init_end = .; . = ALIGN(PAGE_SIZE); __nosave_begin = .; - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } + .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { + *(.data.nosave) + } :data.init /* switch back to data.init, see PERCPU_VADDR() above */ . = ALIGN(PAGE_SIZE); __nosave_end = .; -- cgit v1.2.3 From f32ff5388d86518c0375ccdb330d3b459b9c405e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: load pointer to pda into %gs while brining up a CPU [ Based on original patch from Christoph Lameter and Mike Travis. ] CPU startup code in head_64.S loaded address of a zero page into %gs for temporary use till pda is loaded but address to the actual pda is available at the point. Load the real address directly instead. This will help unifying percpu and pda handling later on. This patch is mostly taken from Mike Travis' "x86_64: Fold pda into per cpu area" patch. Signed-off-by: Tejun Heo --- arch/x86/include/asm/trampoline.h | 1 + arch/x86/kernel/acpi/sleep.c | 1 + arch/x86/kernel/head64.c | 4 ++-- arch/x86/kernel/head_64.S | 15 ++++++++++----- arch/x86/kernel/smpboot.c | 1 + 5 files changed, 15 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index 780ba0ab94f..90f06c25221 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h @@ -13,6 +13,7 @@ extern unsigned char *trampoline_base; extern unsigned long init_rsp; extern unsigned long initial_code; +extern unsigned long initial_gs; #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) #define TRAMPOLINE_BASE 0x6000 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 707c1f6f95f..9ff67f8dc2c 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -101,6 +101,7 @@ int acpi_save_state_mem(void) stack_start.sp = temp_stack + sizeof(temp_stack); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); + initial_gs = (unsigned long)cpu_pda(smp_processor_id()); #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index bc2900ca82c..76ffba2aa66 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -26,8 +26,8 @@ #include #include -/* boot cpu pda */ -static struct x8664_pda _boot_cpu_pda; +/* boot cpu pda, referenced by head_64.S to initialize %gs for boot CPU */ +struct x8664_pda _boot_cpu_pda; #ifdef CONFIG_SMP /* diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 7ee0363871e..2f0ab008988 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -243,12 +243,15 @@ ENTRY(secondary_startup_64) movl %eax,%fs movl %eax,%gs - /* - * Setup up a dummy PDA. this is just for some early bootup code - * that does in_interrupt() - */ + /* Set up %gs. + * + * %gs should point to the pda. For initial boot, make %gs point + * to the _boot_cpu_pda in data section. For a secondary CPU, + * initial_gs should be set to its pda address before the CPU runs + * this code. + */ movl $MSR_GS_BASE,%ecx - movq $empty_zero_page,%rax + movq initial_gs(%rip),%rax movq %rax,%rdx shrq $32,%rdx wrmsr @@ -274,6 +277,8 @@ ENTRY(secondary_startup_64) .align 8 ENTRY(initial_code) .quad x86_64_start_kernel + ENTRY(initial_gs) + .quad _boot_cpu_pda __FINITDATA ENTRY(stack_start) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 1a712da1dfa..70d846628bb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -854,6 +854,7 @@ do_rest: #else cpu_pda(cpu)->pcurrent = c_idle.idle; clear_tsk_thread_flag(c_idle.idle, TIF_FORK); + initial_gs = (unsigned long)cpu_pda(cpu); #endif early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); initial_code = (unsigned long)start_secondary; -- cgit v1.2.3 From c8f3329a0ddd751241e96b4100df7eda14b2cbc6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: use static _cpu_pda array _cpu_pda array first uses statically allocated storage in data.init and then switches to allocated bootmem to conserve space. However, after folding pda area into percpu area, _cpu_pda array will be removed completely. Drop the reallocation part to simplify the code for soon-to-follow changes. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pda.h | 3 ++- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/head64.c | 12 ------------ arch/x86/kernel/setup_percpu.c | 14 +++----------- 4 files changed, 6 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index cbd3f48a832..2d5b49c3248 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -5,6 +5,7 @@ #include #include #include +#include #include /* Per processor datastructure. %gs points to it while the kernel runs */ @@ -39,7 +40,7 @@ struct x8664_pda { unsigned irq_spurious_count; } ____cacheline_aligned_in_smp; -extern struct x8664_pda **_cpu_pda; +extern struct x8664_pda *_cpu_pda[NR_CPUS]; extern void pda_init(int); #define cpu_pda(i) (_cpu_pda[i]) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f0025846244..c116c599326 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -879,7 +879,7 @@ static __init int setup_disablecpuid(char *arg) __setup("clearcpuid=", setup_disablecpuid); #ifdef CONFIG_X86_64 -struct x8664_pda **_cpu_pda __read_mostly; +struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; EXPORT_SYMBOL(_cpu_pda); struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 76ffba2aa66..462d0beccb6 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -29,20 +29,8 @@ /* boot cpu pda, referenced by head_64.S to initialize %gs for boot CPU */ struct x8664_pda _boot_cpu_pda; -#ifdef CONFIG_SMP -/* - * We install an empty cpu_pda pointer table to indicate to early users - * (numa_set_node) that the cpu_pda pointer table for cpus other than - * the boot cpu is not yet setup. - */ -static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata; -#else -static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly; -#endif - void __init x86_64_init_pda(void) { - _cpu_pda = __cpu_pda; cpu_pda(0) = &_boot_cpu_pda; cpu_pda(0)->data_offset = (unsigned long)(__per_cpu_load - __per_cpu_start); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 44845842e72..73ab01b297c 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -114,7 +114,6 @@ static inline void setup_cpu_pda_map(void) { } static void __init setup_cpu_pda_map(void) { char *pda; - struct x8664_pda **new_cpu_pda; unsigned long size; int cpu; @@ -122,28 +121,21 @@ static void __init setup_cpu_pda_map(void) /* allocate cpu_pda array and pointer table */ { - unsigned long tsize = nr_cpu_ids * sizeof(void *); unsigned long asize = size * (nr_cpu_ids - 1); - tsize = roundup(tsize, cache_line_size()); - new_cpu_pda = alloc_bootmem(tsize + asize); - pda = (char *)new_cpu_pda + tsize; + pda = alloc_bootmem(asize); } /* initialize pointer table to static pda's */ for_each_possible_cpu(cpu) { if (cpu == 0) { /* leave boot cpu pda in place */ - new_cpu_pda[0] = cpu_pda(0); continue; } - new_cpu_pda[cpu] = (struct x8664_pda *)pda; - new_cpu_pda[cpu]->in_bootmem = 1; + cpu_pda(cpu) = (struct x8664_pda *)pda; + cpu_pda(cpu)->in_bootmem = 1; pda += size; } - - /* point to new pointer table */ - _cpu_pda = new_cpu_pda; } #endif /* CONFIG_SMP && CONFIG_X86_64 */ -- cgit v1.2.3 From 1a51e3a0aed18767cf2762e95456ecfeb0bca5e6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: fold pda into percpu area on SMP [ Based on original patch from Christoph Lameter and Mike Travis. ] Currently pdas and percpu areas are allocated separately. %gs points to local pda and percpu area can be reached using pda->data_offset. This patch folds pda into percpu area. Due to strange gcc requirement, pda needs to be at the beginning of the percpu area so that pda->stack_canary is at %gs:40. To achieve this, a new percpu output section macro - PERCPU_VADDR_PREALLOC() - is added and used to reserve pda sized chunk at the start of the percpu area. After this change, for boot cpu, %gs first points to pda in the data.init area and later during setup_per_cpu_areas() gets updated to point to the actual pda. This means that setup_per_cpu_areas() need to reload %gs for CPU0 while clearing pda area for other cpus as cpu0 already has modified it when control reaches setup_per_cpu_areas(). This patch also removes now unnecessary get_local_pda() and its call sites. A lot of this patch is taken from Mike Travis' "x86_64: Fold pda into per cpu area" patch. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/percpu.h | 8 +++ arch/x86/include/asm/smp.h | 2 - arch/x86/kernel/asm-offsets_64.c | 1 + arch/x86/kernel/cpu/common.c | 6 +-- arch/x86/kernel/head64.c | 8 ++- arch/x86/kernel/head_64.S | 15 ++++-- arch/x86/kernel/setup_percpu.c | 107 +++++++++++++++++---------------------- arch/x86/kernel/smpboot.c | 60 +--------------------- arch/x86/kernel/vmlinux_64.lds.S | 6 ++- arch/x86/xen/smp.c | 10 ---- 10 files changed, 80 insertions(+), 143 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index df644f3e53e..0ed77cf33f7 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -1,6 +1,14 @@ #ifndef _ASM_X86_PERCPU_H #define _ASM_X86_PERCPU_H +#ifndef __ASSEMBLY__ +#ifdef CONFIG_X86_64 +extern void load_pda_offset(int cpu); +#else +static inline void load_pda_offset(int cpu) { } +#endif +#endif + #ifdef CONFIG_X86_64 #include diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index a8cea7b0943..127415402ea 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -19,8 +19,6 @@ #include #include -extern int __cpuinit get_local_pda(int cpu); - extern int smp_num_siblings; extern unsigned int num_processors; diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 1d41d3f1edb..f8d1b047ef4 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -56,6 +56,7 @@ int main(void) ENTRY(cpunumber); ENTRY(irqstackptr); ENTRY(data_offset); + DEFINE(pda_size, sizeof(struct x8664_pda)); BLANK(); #undef ENTRY #ifdef CONFIG_PARAVIRT diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c116c599326..7041acdf557 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -893,10 +893,8 @@ void __cpuinit pda_init(int cpu) /* Setup up data that may be needed in __get_free_pages early */ loadsegment(fs, 0); loadsegment(gs, 0); - /* Memory clobbers used to order PDA accessed */ - mb(); - wrmsrl(MSR_GS_BASE, pda); - mb(); + + load_pda_offset(cpu); pda->cpunumber = cpu; pda->irqcount = -1; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 462d0beccb6..1a311293f73 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -26,12 +26,18 @@ #include #include -/* boot cpu pda, referenced by head_64.S to initialize %gs for boot CPU */ +#ifndef CONFIG_SMP +/* boot cpu pda, referenced by head_64.S to initialize %gs on UP */ struct x8664_pda _boot_cpu_pda; +#endif void __init x86_64_init_pda(void) { +#ifdef CONFIG_SMP + cpu_pda(0) = (void *)__per_cpu_load; +#else cpu_pda(0) = &_boot_cpu_pda; +#endif cpu_pda(0)->data_offset = (unsigned long)(__per_cpu_load - __per_cpu_start); pda_init(0); diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 2f0ab008988..7a995d0e9f7 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -245,10 +245,13 @@ ENTRY(secondary_startup_64) /* Set up %gs. * - * %gs should point to the pda. For initial boot, make %gs point - * to the _boot_cpu_pda in data section. For a secondary CPU, - * initial_gs should be set to its pda address before the CPU runs - * this code. + * On SMP, %gs should point to the per-cpu area. For initial + * boot, make %gs point to the init data section. For a + * secondary CPU,initial_gs should be set to its pda address + * before the CPU runs this code. + * + * On UP, initial_gs points to _boot_cpu_pda and doesn't + * change. */ movl $MSR_GS_BASE,%ecx movq initial_gs(%rip),%rax @@ -278,7 +281,11 @@ ENTRY(secondary_startup_64) ENTRY(initial_code) .quad x86_64_start_kernel ENTRY(initial_gs) +#ifdef CONFIG_SMP + .quad __per_cpu_load +#else .quad _boot_cpu_pda +#endif __FINITDATA ENTRY(stack_start) diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 73ab01b297c..63d46280227 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #ifdef CONFIG_DEBUG_PER_CPU_MAPS @@ -65,6 +66,36 @@ static void __init setup_node_to_cpumask_map(void); static inline void setup_node_to_cpumask_map(void) { } #endif +#ifdef CONFIG_X86_64 +void __cpuinit load_pda_offset(int cpu) +{ + /* Memory clobbers used to order pda/percpu accesses */ + mb(); + wrmsrl(MSR_GS_BASE, cpu_pda(cpu)); + mb(); +} + +#endif /* CONFIG_SMP && CONFIG_X86_64 */ + +#ifdef CONFIG_X86_64 + +/* correctly size the local cpu masks */ +static void setup_cpu_local_masks(void) +{ + alloc_bootmem_cpumask_var(&cpu_initialized_mask); + alloc_bootmem_cpumask_var(&cpu_callin_mask); + alloc_bootmem_cpumask_var(&cpu_callout_mask); + alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); +} + +#else /* CONFIG_X86_32 */ + +static inline void setup_cpu_local_masks(void) +{ +} + +#endif /* CONFIG_X86_32 */ + #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA /* * Copy data used in early init routines from the initial arrays to the @@ -101,63 +132,7 @@ static void __init setup_per_cpu_maps(void) */ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); -static inline void setup_cpu_pda_map(void) { } - -#elif !defined(CONFIG_SMP) -static inline void setup_cpu_pda_map(void) { } - -#else /* CONFIG_SMP && CONFIG_X86_64 */ - -/* - * Allocate cpu_pda pointer table and array via alloc_bootmem. - */ -static void __init setup_cpu_pda_map(void) -{ - char *pda; - unsigned long size; - int cpu; - - size = roundup(sizeof(struct x8664_pda), cache_line_size()); - - /* allocate cpu_pda array and pointer table */ - { - unsigned long asize = size * (nr_cpu_ids - 1); - - pda = alloc_bootmem(asize); - } - - /* initialize pointer table to static pda's */ - for_each_possible_cpu(cpu) { - if (cpu == 0) { - /* leave boot cpu pda in place */ - continue; - } - cpu_pda(cpu) = (struct x8664_pda *)pda; - cpu_pda(cpu)->in_bootmem = 1; - pda += size; - } -} - -#endif /* CONFIG_SMP && CONFIG_X86_64 */ - -#ifdef CONFIG_X86_64 - -/* correctly size the local cpu masks */ -static void setup_cpu_local_masks(void) -{ - alloc_bootmem_cpumask_var(&cpu_initialized_mask); - alloc_bootmem_cpumask_var(&cpu_callin_mask); - alloc_bootmem_cpumask_var(&cpu_callout_mask); - alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); -} - -#else /* CONFIG_X86_32 */ - -static inline void setup_cpu_local_masks(void) -{ -} - -#endif /* CONFIG_X86_32 */ +#endif /* * Great future plan: @@ -171,9 +146,6 @@ void __init setup_per_cpu_areas(void) int cpu; unsigned long align = 1; - /* Setup cpu_pda map */ - setup_cpu_pda_map(); - /* Copy section for each CPU (we discard the original) */ old_size = PERCPU_ENOUGH_ROOM; align = max_t(unsigned long, PAGE_SIZE, align); @@ -204,8 +176,21 @@ void __init setup_per_cpu_areas(void) cpu, node, __pa(ptr)); } #endif - per_cpu_offset(cpu) = ptr - __per_cpu_start; + memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); +#ifdef CONFIG_X86_64 + cpu_pda(cpu) = (void *)ptr; + + /* + * CPU0 modified pda in the init data area, reload pda + * offset for CPU0 and clear the area for others. + */ + if (cpu == 0) + load_pda_offset(0); + else + memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); +#endif + per_cpu_offset(cpu) = ptr - __per_cpu_start; DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 70d846628bb..f2f77ca494d 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -744,52 +744,6 @@ static void __cpuinit do_fork_idle(struct work_struct *work) complete(&c_idle->done); } -#ifdef CONFIG_X86_64 - -/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */ -static void __ref free_bootmem_pda(struct x8664_pda *oldpda) -{ - if (!after_bootmem) - free_bootmem((unsigned long)oldpda, sizeof(*oldpda)); -} - -/* - * Allocate node local memory for the AP pda. - * - * Must be called after the _cpu_pda pointer table is initialized. - */ -int __cpuinit get_local_pda(int cpu) -{ - struct x8664_pda *oldpda, *newpda; - unsigned long size = sizeof(struct x8664_pda); - int node = cpu_to_node(cpu); - - if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem) - return 0; - - oldpda = cpu_pda(cpu); - newpda = kmalloc_node(size, GFP_ATOMIC, node); - if (!newpda) { - printk(KERN_ERR "Could not allocate node local PDA " - "for CPU %d on node %d\n", cpu, node); - - if (oldpda) - return 0; /* have a usable pda */ - else - return -1; - } - - if (oldpda) { - memcpy(newpda, oldpda, size); - free_bootmem_pda(oldpda); - } - - newpda->in_bootmem = 0; - cpu_pda(cpu) = newpda; - return 0; -} -#endif /* CONFIG_X86_64 */ - static int __cpuinit do_boot_cpu(int apicid, int cpu) /* * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad @@ -807,16 +761,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) }; INIT_WORK(&c_idle.work, do_fork_idle); -#ifdef CONFIG_X86_64 - /* Allocate node local memory for AP pdas */ - if (cpu > 0) { - boot_error = get_local_pda(cpu); - if (boot_error) - goto restore_state; - /* if can't get pda memory, can't start cpu */ - } -#endif - alternatives_smp_switch(1); c_idle.idle = get_idle_for_cpu(cpu); @@ -931,9 +875,7 @@ do_rest: inquire_remote_apic(apicid); } } -#ifdef CONFIG_X86_64 -restore_state: -#endif + if (boot_error) { /* Try to put things back the way they were before ... */ numa_remove_cpu(cpu); /* was set by numa_add_cpu */ diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index f50280db0df..962f21f1d4d 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -5,6 +5,7 @@ #define LOAD_OFFSET __START_KERNEL_map #include +#include #include #undef i386 /* in case the preprocessor is a 32bit one */ @@ -215,10 +216,11 @@ SECTIONS /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - __data_nosave - should - * switch it back to data.init. + * switch it back to data.init. Also, pda should be at the head of + * percpu area. Preallocate it. */ . = ALIGN(PAGE_SIZE); - PERCPU_VADDR(0, :percpu) + PERCPU_VADDR_PREALLOC(0, :percpu, pda_size) #else PERCPU(PAGE_SIZE) #endif diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c44e2069c7c..83fa4236477 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -283,16 +283,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) struct task_struct *idle = idle_task(cpu); int rc; -#ifdef CONFIG_X86_64 - /* Allocate node local memory for AP pdas */ - WARN_ON(cpu == 0); - if (cpu > 0) { - rc = get_local_pda(cpu); - if (rc) - return rc; - } -#endif - #ifdef CONFIG_X86_32 init_gdt(cpu); per_cpu(current_task, cpu) = idle; -- cgit v1.2.3 From 9939ddaff52787b2a7c1adf1b2afc95421aa0884 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: merge 64 and 32 SMP percpu handling Now that pda is allocated as part of percpu, percpu doesn't need to be accessed through pda. Unify x86_64 SMP percpu access with x86_32 SMP one. Other than the segment register, operand size and the base of percpu symbols, they behave identical now. This patch replaces now unnecessary pda->data_offset with a dummy field which is necessary to keep stack_canary at its place. This patch also moves per_cpu_offset initialization out of init_gdt() into setup_per_cpu_areas(). Note that this change also necessitates explicit per_cpu_offset initializations in voyager_smp.c. With this change, x86_OP_percpu()'s are as efficient on x86_64 as on x86_32 and also x86_64 can use assembly PER_CPU macros. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pda.h | 3 +- arch/x86/include/asm/percpu.h | 127 +++++++++++------------------------- arch/x86/kernel/asm-offsets_64.c | 1 - arch/x86/kernel/entry_64.S | 7 +- arch/x86/kernel/head64.c | 2 - arch/x86/kernel/setup_percpu.c | 15 +++-- arch/x86/kernel/smpcommon.c | 3 +- arch/x86/mach-voyager/voyager_smp.c | 2 + 8 files changed, 55 insertions(+), 105 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 2d5b49c3248..e91558e3785 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -11,8 +11,7 @@ /* Per processor datastructure. %gs points to it while the kernel runs */ struct x8664_pda { struct task_struct *pcurrent; /* 0 Current process */ - unsigned long data_offset; /* 8 Per cpu data offset from linker - address */ + unsigned long dummy; unsigned long kernelstack; /* 16 top of kernel stack for current */ unsigned long oldrsp; /* 24 user rsp for system call */ int irqcount; /* 32 Irq nesting counter. Starts -1 */ diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0ed77cf33f7..556f84b9ea9 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -1,62 +1,13 @@ #ifndef _ASM_X86_PERCPU_H #define _ASM_X86_PERCPU_H -#ifndef __ASSEMBLY__ #ifdef CONFIG_X86_64 -extern void load_pda_offset(int cpu); +#define __percpu_seg gs +#define __percpu_mov_op movq #else -static inline void load_pda_offset(int cpu) { } -#endif -#endif - -#ifdef CONFIG_X86_64 -#include - -/* Same as asm-generic/percpu.h, except that we store the per cpu offset - in the PDA. Longer term the PDA and every per cpu variable - should be just put into a single section and referenced directly - from %gs */ - -#ifdef CONFIG_SMP -#include - -#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) -#define __my_cpu_offset read_pda(data_offset) - -#define per_cpu_offset(x) (__per_cpu_offset(x)) - +#define __percpu_seg fs +#define __percpu_mov_op movl #endif -#include - -DECLARE_PER_CPU(struct x8664_pda, pda); - -/* - * These are supposed to be implemented as a single instruction which - * operates on the per-cpu data base segment. x86-64 doesn't have - * that yet, so this is a fairly inefficient workaround for the - * meantime. The single instruction is atomic with respect to - * preemption and interrupts, so we need to explicitly disable - * interrupts here to achieve the same effect. However, because it - * can be used from within interrupt-disable/enable, we can't actually - * disable interrupts; disabling preemption is enough. - */ -#define x86_read_percpu(var) \ - ({ \ - typeof(per_cpu_var(var)) __tmp; \ - preempt_disable(); \ - __tmp = __get_cpu_var(var); \ - preempt_enable(); \ - __tmp; \ - }) - -#define x86_write_percpu(var, val) \ - do { \ - preempt_disable(); \ - __get_cpu_var(var) = (val); \ - preempt_enable(); \ - } while(0) - -#else /* CONFIG_X86_64 */ #ifdef __ASSEMBLY__ @@ -73,42 +24,26 @@ DECLARE_PER_CPU(struct x8664_pda, pda); * PER_CPU(cpu_gdt_descr, %ebx) */ #ifdef CONFIG_SMP -#define PER_CPU(var, reg) \ - movl %fs:per_cpu__##this_cpu_off, reg; \ +#define PER_CPU(var, reg) \ + __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ lea per_cpu__##var(reg), reg -#define PER_CPU_VAR(var) %fs:per_cpu__##var +#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var #else /* ! SMP */ -#define PER_CPU(var, reg) \ - movl $per_cpu__##var, reg +#define PER_CPU(var, reg) \ + __percpu_mov_op $per_cpu__##var, reg #define PER_CPU_VAR(var) per_cpu__##var #endif /* SMP */ #else /* ...!ASSEMBLY */ -/* - * PER_CPU finds an address of a per-cpu variable. - * - * Args: - * var - variable name - * cpu - 32bit register containing the current CPU number - * - * The resulting address is stored in the "cpu" argument. - * - * Example: - * PER_CPU(cpu_gdt_descr, %ebx) - */ -#ifdef CONFIG_SMP - -#define __my_cpu_offset x86_read_percpu(this_cpu_off) - -/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ -#define __percpu_seg "%%fs:" +#include -#else /* !SMP */ - -#define __percpu_seg "" - -#endif /* SMP */ +#ifdef CONFIG_SMP +#define __percpu_seg_str "%%"__stringify(__percpu_seg)":" +#define __my_cpu_offset x86_read_percpu(this_cpu_off) +#else +#define __percpu_seg_str +#endif #include @@ -128,20 +63,25 @@ do { \ } \ switch (sizeof(var)) { \ case 1: \ - asm(op "b %1,"__percpu_seg"%0" \ + asm(op "b %1,"__percpu_seg_str"%0" \ : "+m" (var) \ : "ri" ((T__)val)); \ break; \ case 2: \ - asm(op "w %1,"__percpu_seg"%0" \ + asm(op "w %1,"__percpu_seg_str"%0" \ : "+m" (var) \ : "ri" ((T__)val)); \ break; \ case 4: \ - asm(op "l %1,"__percpu_seg"%0" \ + asm(op "l %1,"__percpu_seg_str"%0" \ : "+m" (var) \ : "ri" ((T__)val)); \ break; \ + case 8: \ + asm(op "q %1,"__percpu_seg_str"%0" \ + : "+m" (var) \ + : "r" ((T__)val)); \ + break; \ default: __bad_percpu_size(); \ } \ } while (0) @@ -151,17 +91,22 @@ do { \ typeof(var) ret__; \ switch (sizeof(var)) { \ case 1: \ - asm(op "b "__percpu_seg"%1,%0" \ + asm(op "b "__percpu_seg_str"%1,%0" \ : "=r" (ret__) \ : "m" (var)); \ break; \ case 2: \ - asm(op "w "__percpu_seg"%1,%0" \ + asm(op "w "__percpu_seg_str"%1,%0" \ : "=r" (ret__) \ : "m" (var)); \ break; \ case 4: \ - asm(op "l "__percpu_seg"%1,%0" \ + asm(op "l "__percpu_seg_str"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + case 8: \ + asm(op "q "__percpu_seg_str"%1,%0" \ : "=r" (ret__) \ : "m" (var)); \ break; \ @@ -175,8 +120,14 @@ do { \ #define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) #define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) #define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) + +#ifdef CONFIG_X86_64 +extern void load_pda_offset(int cpu); +#else +static inline void load_pda_offset(int cpu) { } +#endif + #endif /* !__ASSEMBLY__ */ -#endif /* !CONFIG_X86_64 */ #ifdef CONFIG_SMP diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index f8d1b047ef4..f4cc81bfbf8 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -55,7 +55,6 @@ int main(void) ENTRY(irqcount); ENTRY(cpunumber); ENTRY(irqstackptr); - ENTRY(data_offset); DEFINE(pda_size, sizeof(struct x8664_pda)); BLANK(); #undef ENTRY diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e28c7a98779..4833f3a1965 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -52,6 +52,7 @@ #include #include #include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -1072,10 +1073,10 @@ ENTRY(\sym) TRACE_IRQS_OFF movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ - movq %gs:pda_data_offset, %rbp - subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) + PER_CPU(init_tss, %rbp) + subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) call \do_sym - addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) + addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) jmp paranoid_exit /* %ebx: no swapgs flag */ CFI_ENDPROC END(\sym) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 1a311293f73..e99b661a97f 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -38,8 +38,6 @@ void __init x86_64_init_pda(void) #else cpu_pda(0) = &_boot_cpu_pda; #endif - cpu_pda(0)->data_offset = - (unsigned long)(__per_cpu_load - __per_cpu_start); pda_init(0); } diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 63d46280227..be1ff34db11 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -125,14 +125,14 @@ static void __init setup_per_cpu_maps(void) #endif } -#ifdef CONFIG_X86_32 -/* - * Great future not-so-futuristic plan: make i386 and x86_64 do it - * the same way - */ +#ifdef CONFIG_X86_64 +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { + [0] = (unsigned long)__per_cpu_load, +}; +#else unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; -EXPORT_SYMBOL(__per_cpu_offset); #endif +EXPORT_SYMBOL(__per_cpu_offset); /* * Great future plan: @@ -178,6 +178,7 @@ void __init setup_per_cpu_areas(void) #endif memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); + per_cpu_offset(cpu) = ptr - __per_cpu_start; #ifdef CONFIG_X86_64 cpu_pda(cpu) = (void *)ptr; @@ -190,7 +191,7 @@ void __init setup_per_cpu_areas(void) else memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); #endif - per_cpu_offset(cpu) = ptr - __per_cpu_start; + per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 397e309839d..84395fabc41 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c @@ -4,10 +4,10 @@ #include #include -#ifdef CONFIG_X86_32 DEFINE_PER_CPU(unsigned long, this_cpu_off); EXPORT_PER_CPU_SYMBOL(this_cpu_off); +#ifdef CONFIG_X86_32 /* * Initialize the CPU's GDT. This is either the boot CPU doing itself * (still using the master per-cpu area), or a CPU doing it for a @@ -24,7 +24,6 @@ __cpuinit void init_gdt(int cpu) write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); - per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; per_cpu(cpu_number, cpu) = cpu; } #endif diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 9840b7ec749..1a48368acb0 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -531,6 +531,7 @@ static void __init do_boot_cpu(__u8 cpu) stack_start.sp = (void *)idle->thread.sp; init_gdt(cpu); + per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; per_cpu(current_task, cpu) = idle; early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); irq_ctx_init(cpu); @@ -1748,6 +1749,7 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) static void __cpuinit voyager_smp_prepare_boot_cpu(void) { init_gdt(smp_processor_id()); + per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; switch_to_new_gdt(); cpu_set(smp_processor_id(), cpu_online_map); -- cgit v1.2.3 From b12d8db8fbfaed1e8222a15333a3645599636854 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: make pda a percpu variable [ Based on original patch from Christoph Lameter and Mike Travis. ] As pda is now allocated in percpu area, it can easily be made a proper percpu variable. Make it so by defining per cpu symbol from linker script and declaring it in C code for SMP and simply defining it for UP. This change cleans up code and brings SMP and UP closer a bit. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pda.h | 5 +++-- arch/x86/kernel/cpu/common.c | 3 --- arch/x86/kernel/head64.c | 10 ---------- arch/x86/kernel/head_64.S | 5 +++-- arch/x86/kernel/setup_percpu.c | 16 ++++++++++++++-- arch/x86/kernel/vmlinux_64.lds.S | 4 +++- 6 files changed, 23 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index e91558e3785..66ae1043393 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -7,6 +7,7 @@ #include #include #include +#include /* Per processor datastructure. %gs points to it while the kernel runs */ struct x8664_pda { @@ -39,10 +40,10 @@ struct x8664_pda { unsigned irq_spurious_count; } ____cacheline_aligned_in_smp; -extern struct x8664_pda *_cpu_pda[NR_CPUS]; +DECLARE_PER_CPU(struct x8664_pda, __pda); extern void pda_init(int); -#define cpu_pda(i) (_cpu_pda[i]) +#define cpu_pda(cpu) (&per_cpu(__pda, cpu)) /* * There is no fast way to get the base address of the PDA, all the accesses diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7041acdf557..c49498d4083 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -879,9 +879,6 @@ static __init int setup_disablecpuid(char *arg) __setup("clearcpuid=", setup_disablecpuid); #ifdef CONFIG_X86_64 -struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; -EXPORT_SYMBOL(_cpu_pda); - struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index e99b661a97f..71b6f6ec96a 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -26,18 +26,8 @@ #include #include -#ifndef CONFIG_SMP -/* boot cpu pda, referenced by head_64.S to initialize %gs on UP */ -struct x8664_pda _boot_cpu_pda; -#endif - void __init x86_64_init_pda(void) { -#ifdef CONFIG_SMP - cpu_pda(0) = (void *)__per_cpu_load; -#else - cpu_pda(0) = &_boot_cpu_pda; -#endif pda_init(0); } diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 7a995d0e9f7..c8ace880661 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -19,6 +19,7 @@ #include #include #include +#include #ifdef CONFIG_PARAVIRT #include @@ -250,7 +251,7 @@ ENTRY(secondary_startup_64) * secondary CPU,initial_gs should be set to its pda address * before the CPU runs this code. * - * On UP, initial_gs points to _boot_cpu_pda and doesn't + * On UP, initial_gs points to PER_CPU_VAR(__pda) and doesn't * change. */ movl $MSR_GS_BASE,%ecx @@ -284,7 +285,7 @@ ENTRY(secondary_startup_64) #ifdef CONFIG_SMP .quad __per_cpu_load #else - .quad _boot_cpu_pda + .quad PER_CPU_VAR(__pda) #endif __FINITDATA diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index be1ff34db11..daeedf82c15 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -66,6 +66,16 @@ static void __init setup_node_to_cpumask_map(void); static inline void setup_node_to_cpumask_map(void) { } #endif +/* + * Define load_pda_offset() and per-cpu __pda for x86_64. + * load_pda_offset() is responsible for loading the offset of pda into + * %gs. + * + * On SMP, pda offset also duals as percpu base address and thus it + * should be at the start of per-cpu area. To achieve this, it's + * preallocated in vmlinux_64.lds.S directly instead of using + * DEFINE_PER_CPU(). + */ #ifdef CONFIG_X86_64 void __cpuinit load_pda_offset(int cpu) { @@ -74,6 +84,10 @@ void __cpuinit load_pda_offset(int cpu) wrmsrl(MSR_GS_BASE, cpu_pda(cpu)); mb(); } +#ifndef CONFIG_SMP +DEFINE_PER_CPU(struct x8664_pda, __pda); +EXPORT_PER_CPU_SYMBOL(__pda); +#endif #endif /* CONFIG_SMP && CONFIG_X86_64 */ @@ -180,8 +194,6 @@ void __init setup_per_cpu_areas(void) memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); per_cpu_offset(cpu) = ptr - __per_cpu_start; #ifdef CONFIG_X86_64 - cpu_pda(cpu) = (void *)ptr; - /* * CPU0 modified pda in the init data area, reload pda * offset for CPU0 and clear the area for others. diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 962f21f1d4d..d2a0baa87d1 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -217,10 +217,12 @@ SECTIONS * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - __data_nosave - should * switch it back to data.init. Also, pda should be at the head of - * percpu area. Preallocate it. + * percpu area. Preallocate it and define the percpu offset symbol + * so that it can be accessed as a percpu variable. */ . = ALIGN(PAGE_SIZE); PERCPU_VADDR_PREALLOC(0, :percpu, pda_size) + per_cpu____pda = __per_cpu_start; #else PERCPU(PAGE_SIZE) #endif -- cgit v1.2.3 From 49357d19e4fb31e28796eaff83499e7584c26878 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: convert pda ops to wrappers around x86 percpu accessors pda is now a percpu variable and there's no reason it can't use plain x86 percpu accessors. Add x86_test_and_clear_bit_percpu() and replace pda op implementations with wrappers around x86 percpu accessors. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pda.h | 88 +++------------------------------------- arch/x86/include/asm/percpu.h | 10 +++++ arch/x86/kernel/vmlinux_64.lds.S | 1 - arch/x86/kernel/x8664_ksyms_64.c | 2 - 4 files changed, 16 insertions(+), 85 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 66ae1043393..e3d3a081d79 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -45,91 +45,15 @@ extern void pda_init(int); #define cpu_pda(cpu) (&per_cpu(__pda, cpu)) -/* - * There is no fast way to get the base address of the PDA, all the accesses - * have to mention %fs/%gs. So it needs to be done this Torvaldian way. - */ -extern void __bad_pda_field(void) __attribute__((noreturn)); - -/* - * proxy_pda doesn't actually exist, but tell gcc it is accessed for - * all PDA accesses so it gets read/write dependencies right. - */ -extern struct x8664_pda _proxy_pda; - -#define pda_offset(field) offsetof(struct x8664_pda, field) - -#define pda_to_op(op, field, val) \ -do { \ - typedef typeof(_proxy_pda.field) T__; \ - if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ - switch (sizeof(_proxy_pda.field)) { \ - case 2: \ - asm(op "w %1,%%gs:%c2" : \ - "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ - "i"(pda_offset(field))); \ - break; \ - case 4: \ - asm(op "l %1,%%gs:%c2" : \ - "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ - "i" (pda_offset(field))); \ - break; \ - case 8: \ - asm(op "q %1,%%gs:%c2": \ - "+m" (_proxy_pda.field) : \ - "r" ((T__)val), \ - "i"(pda_offset(field))); \ - break; \ - default: \ - __bad_pda_field(); \ - } \ -} while (0) - -#define pda_from_op(op, field) \ -({ \ - typeof(_proxy_pda.field) ret__; \ - switch (sizeof(_proxy_pda.field)) { \ - case 2: \ - asm(op "w %%gs:%c1,%0" : \ - "=r" (ret__) : \ - "i" (pda_offset(field)), \ - "m" (_proxy_pda.field)); \ - break; \ - case 4: \ - asm(op "l %%gs:%c1,%0": \ - "=r" (ret__): \ - "i" (pda_offset(field)), \ - "m" (_proxy_pda.field)); \ - break; \ - case 8: \ - asm(op "q %%gs:%c1,%0": \ - "=r" (ret__) : \ - "i" (pda_offset(field)), \ - "m" (_proxy_pda.field)); \ - break; \ - default: \ - __bad_pda_field(); \ - } \ - ret__; \ -}) - -#define read_pda(field) pda_from_op("mov", field) -#define write_pda(field, val) pda_to_op("mov", field, val) -#define add_pda(field, val) pda_to_op("add", field, val) -#define sub_pda(field, val) pda_to_op("sub", field, val) -#define or_pda(field, val) pda_to_op("or", field, val) +#define read_pda(field) x86_read_percpu(__pda.field) +#define write_pda(field, val) x86_write_percpu(__pda.field, val) +#define add_pda(field, val) x86_add_percpu(__pda.field, val) +#define sub_pda(field, val) x86_sub_percpu(__pda.field, val) +#define or_pda(field, val) x86_or_percpu(__pda.field, val) /* This is not atomic against other CPUs -- CPU preemption needs to be off */ #define test_and_clear_bit_pda(bit, field) \ -({ \ - int old__; \ - asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ - : "=r" (old__), "+m" (_proxy_pda.field) \ - : "dIr" (bit), "i" (pda_offset(field)) : "memory");\ - old__; \ -}) + x86_test_and_clear_bit_percpu(bit, __pda.field) #endif diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 556f84b9ea9..328b31a429d 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -121,6 +121,16 @@ do { \ #define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) #define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) +/* This is not atomic against other CPUs -- CPU preemption needs to be off */ +#define x86_test_and_clear_bit_percpu(bit, var) \ +({ \ + int old__; \ + asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0" \ + : "=r" (old__) \ + : "dIr" (bit), "i" (&per_cpu__##var) : "memory"); \ + old__; \ +}) + #ifdef CONFIG_X86_64 extern void load_pda_offset(int cpu); #else diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index d2a0baa87d1..a09abb8fb97 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -14,7 +14,6 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) jiffies_64 = jiffies; -_proxy_pda = 1; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(7); /* RWE */ diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 695e426aa35..3909e3ba5ce 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(init_level4_pgt); EXPORT_SYMBOL(load_gs_index); - -EXPORT_SYMBOL(_proxy_pda); -- cgit v1.2.3 From 004aa322f855a765741d9437a98dd8fe2e4f32a6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Jan 2009 20:41:35 +0900 Subject: x86: misc clean up after the percpu update Do the following cleanups: * kill x86_64_init_pda() which now is equivalent to pda_init() * use per_cpu_offset() instead of cpu_pda() when initializing initial_gs Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/setup.h | 1 - arch/x86/kernel/acpi/sleep.c | 2 +- arch/x86/kernel/head64.c | 7 +------ arch/x86/kernel/smpboot.c | 2 +- arch/x86/xen/enlighten.c | 2 +- 5 files changed, 4 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ebe858cdc8a..536949749bc 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -100,7 +100,6 @@ extern unsigned long init_pg_tables_start; extern unsigned long init_pg_tables_end; #else -void __init x86_64_init_pda(void); void __init x86_64_start_kernel(char *real_mode); void __init x86_64_start_reservations(char *real_mode_data); diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 9ff67f8dc2c..4abff454c55 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -101,7 +101,7 @@ int acpi_save_state_mem(void) stack_start.sp = temp_stack + sizeof(temp_stack); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); - initial_gs = (unsigned long)cpu_pda(smp_processor_id()); + initial_gs = per_cpu_offset(smp_processor_id()); #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 71b6f6ec96a..af67d3227ea 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -26,11 +26,6 @@ #include #include -void __init x86_64_init_pda(void) -{ - pda_init(0); -} - static void __init zap_identity_mappings(void) { pgd_t *pgd = pgd_offset_k(0UL); @@ -96,7 +91,7 @@ void __init x86_64_start_kernel(char * real_mode_data) if (console_loglevel == 10) early_printk("Kernel alive\n"); - x86_64_init_pda(); + pda_init(0); x86_64_start_reservations(real_mode_data); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f2f77ca494d..2f0e0f1090f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -798,7 +798,7 @@ do_rest: #else cpu_pda(cpu)->pcurrent = c_idle.idle; clear_tsk_thread_flag(c_idle.idle, TIF_FORK); - initial_gs = (unsigned long)cpu_pda(cpu); + initial_gs = per_cpu_offset(cpu); #endif early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); initial_code = (unsigned long)start_secondary; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 965539ec425..312414ef936 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1645,7 +1645,7 @@ asmlinkage void __init xen_start_kernel(void) #ifdef CONFIG_X86_64 /* Disable until direct per-cpu data access. */ have_vcpu_info_placement = 0; - x86_64_init_pda(); + pda_init(0); #endif xen_smp_init(); -- cgit v1.2.3 From 6dbde3530850d4d8bfc1b6bd4006d92786a2787f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Jan 2009 22:15:53 +0900 Subject: percpu: add optimized generic percpu accessors It is an optimization and a cleanup, and adds the following new generic percpu methods: percpu_read() percpu_write() percpu_add() percpu_sub() percpu_and() percpu_or() percpu_xor() and implements support for them on x86. (other architectures will fall back to a default implementation) The advantage is that for example to read a local percpu variable, instead of this sequence: return __get_cpu_var(var); ffffffff8102ca2b: 48 8b 14 fd 80 09 74 mov -0x7e8bf680(,%rdi,8),%rdx ffffffff8102ca32: 81 ffffffff8102ca33: 48 c7 c0 d8 59 00 00 mov $0x59d8,%rax ffffffff8102ca3a: 48 8b 04 10 mov (%rax,%rdx,1),%rax We can get a single instruction by using the optimized variants: return percpu_read(var); ffffffff8102ca3f: 65 48 8b 05 91 8f fd mov %gs:0x7efd8f91(%rip),%rax I also cleaned up the x86-specific APIs and made the x86 code use these new generic percpu primitives. tj: * fixed generic percpu_sub() definition as Roel Kluin pointed out * added percpu_and() for completeness's sake * made generic percpu ops atomic against preemption Signed-off-by: Ingo Molnar Signed-off-by: Tejun Heo --- arch/x86/include/asm/current.h | 2 +- arch/x86/include/asm/irq_regs_32.h | 4 ++-- arch/x86/include/asm/mmu_context_32.h | 12 ++++++------ arch/x86/include/asm/pda.h | 10 +++++----- arch/x86/include/asm/percpu.h | 24 +++++++++++++----------- arch/x86/include/asm/smp.h | 2 +- arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/tlb_32.c | 10 +++++----- arch/x86/mach-voyager/voyager_smp.c | 4 ++-- arch/x86/xen/enlighten.c | 14 +++++++------- arch/x86/xen/irq.c | 8 ++++---- arch/x86/xen/mmu.c | 2 +- arch/x86/xen/multicalls.h | 2 +- arch/x86/xen/smp.c | 2 +- 14 files changed, 50 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index 0930b4f8d67..0728480f5c5 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h @@ -10,7 +10,7 @@ struct task_struct; DECLARE_PER_CPU(struct task_struct *, current_task); static __always_inline struct task_struct *get_current(void) { - return x86_read_percpu(current_task); + return percpu_read(current_task); } #else /* X86_32 */ diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h index 86afd747345..d7ed33ee94e 100644 --- a/arch/x86/include/asm/irq_regs_32.h +++ b/arch/x86/include/asm/irq_regs_32.h @@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs); static inline struct pt_regs *get_irq_regs(void) { - return x86_read_percpu(irq_regs); + return percpu_read(irq_regs); } static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) @@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) struct pt_regs *old_regs; old_regs = get_irq_regs(); - x86_write_percpu(irq_regs, new_regs); + percpu_write(irq_regs, new_regs); return old_regs; } diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h index 7e98ce1d2c0..08b53454f83 100644 --- a/arch/x86/include/asm/mmu_context_32.h +++ b/arch/x86/include/asm/mmu_context_32.h @@ -4,8 +4,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP - if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) - x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY); + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); #endif } @@ -19,8 +19,8 @@ static inline void switch_mm(struct mm_struct *prev, /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP - x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK); - x86_write_percpu(cpu_tlbstate.active_mm, next); + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + percpu_write(cpu_tlbstate.active_mm, next); #endif cpu_set(cpu, next->cpu_vm_mask); @@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, } #ifdef CONFIG_SMP else { - x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK); - BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next); + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index e3d3a081d79..47f274fe695 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -45,11 +45,11 @@ extern void pda_init(int); #define cpu_pda(cpu) (&per_cpu(__pda, cpu)) -#define read_pda(field) x86_read_percpu(__pda.field) -#define write_pda(field, val) x86_write_percpu(__pda.field, val) -#define add_pda(field, val) x86_add_percpu(__pda.field, val) -#define sub_pda(field, val) x86_sub_percpu(__pda.field, val) -#define or_pda(field, val) x86_or_percpu(__pda.field, val) +#define read_pda(field) percpu_read(__pda.field) +#define write_pda(field, val) percpu_write(__pda.field, val) +#define add_pda(field, val) percpu_add(__pda.field, val) +#define sub_pda(field, val) percpu_sub(__pda.field, val) +#define or_pda(field, val) percpu_or(__pda.field, val) /* This is not atomic against other CPUs -- CPU preemption needs to be off */ #define test_and_clear_bit_pda(bit, field) \ diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 328b31a429d..03aa4b00a1c 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -40,16 +40,11 @@ #ifdef CONFIG_SMP #define __percpu_seg_str "%%"__stringify(__percpu_seg)":" -#define __my_cpu_offset x86_read_percpu(this_cpu_off) +#define __my_cpu_offset percpu_read(this_cpu_off) #else #define __percpu_seg_str #endif -#include - -/* We can use this directly for local CPU (faster). */ -DECLARE_PER_CPU(unsigned long, this_cpu_off); - /* For arch-specific code, we can use direct single-insn ops (they * don't give an lvalue though). */ extern void __bad_percpu_size(void); @@ -115,11 +110,13 @@ do { \ ret__; \ }) -#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) -#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) -#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) -#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) -#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) +#define percpu_read(var) percpu_from_op("mov", per_cpu__##var) +#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) +#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) +#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) +#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) +#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) +#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) /* This is not atomic against other CPUs -- CPU preemption needs to be off */ #define x86_test_and_clear_bit_percpu(bit, var) \ @@ -131,6 +128,11 @@ do { \ old__; \ }) +#include + +/* We can use this directly for local CPU (faster). */ +DECLARE_PER_CPU(unsigned long, this_cpu_off); + #ifdef CONFIG_X86_64 extern void load_pda_offset(int cpu); #else diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 127415402ea..c7bbbbe65d3 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -160,7 +160,7 @@ extern unsigned disabled_cpus __cpuinitdata; * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ -#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) +#define raw_smp_processor_id() (percpu_read(cpu_number)) extern int safe_smp_processor_id(void); #elif defined(CONFIG_X86_64_SMP) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a546f55c77b..77d546817d9 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -591,7 +591,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (prev->gs | next->gs) loadsegment(gs, next->gs); - x86_write_percpu(current_task, next_p); + percpu_write(current_task, next_p); return prev_p; } diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index ec53818f4e3..e65449d0f7d 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -34,8 +34,8 @@ static DEFINE_SPINLOCK(tlbstate_lock); */ void leave_mm(int cpu) { - BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK); - cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask); + BUG_ON(percpu_read(cpu_tlbstate.state) == TLBSTATE_OK); + cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); load_cr3(swapper_pg_dir); } EXPORT_SYMBOL_GPL(leave_mm); @@ -103,8 +103,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs) * BUG(); */ - if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) { - if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) { + if (flush_mm == percpu_read(cpu_tlbstate.active_mm)) { + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else @@ -222,7 +222,7 @@ static void do_flush_tlb_all(void *info) unsigned long cpu = smp_processor_id(); __flush_tlb_all(); - if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY) + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(cpu); } diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 1a48368acb0..96f15b09a4c 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -402,7 +402,7 @@ void __init find_smp_config(void) VOYAGER_SUS_IN_CONTROL_PORT); current_thread_info()->cpu = boot_cpu_id; - x86_write_percpu(cpu_number, boot_cpu_id); + percpu_write(cpu_number, boot_cpu_id); } /* @@ -1782,7 +1782,7 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus) void __init smp_setup_processor_id(void) { current_thread_info()->cpu = hard_smp_processor_id(); - x86_write_percpu(cpu_number, hard_smp_processor_id()); + percpu_write(cpu_number, hard_smp_processor_id()); } static void voyager_send_call_func(cpumask_t callmask) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 312414ef936..75b94139e1f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -695,17 +695,17 @@ static void xen_write_cr0(unsigned long cr0) static void xen_write_cr2(unsigned long cr2) { - x86_read_percpu(xen_vcpu)->arch.cr2 = cr2; + percpu_read(xen_vcpu)->arch.cr2 = cr2; } static unsigned long xen_read_cr2(void) { - return x86_read_percpu(xen_vcpu)->arch.cr2; + return percpu_read(xen_vcpu)->arch.cr2; } static unsigned long xen_read_cr2_direct(void) { - return x86_read_percpu(xen_vcpu_info.arch.cr2); + return percpu_read(xen_vcpu_info.arch.cr2); } static void xen_write_cr4(unsigned long cr4) @@ -718,12 +718,12 @@ static void xen_write_cr4(unsigned long cr4) static unsigned long xen_read_cr3(void) { - return x86_read_percpu(xen_cr3); + return percpu_read(xen_cr3); } static void set_current_cr3(void *v) { - x86_write_percpu(xen_current_cr3, (unsigned long)v); + percpu_write(xen_current_cr3, (unsigned long)v); } static void __xen_write_cr3(bool kernel, unsigned long cr3) @@ -748,7 +748,7 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3) MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); if (kernel) { - x86_write_percpu(xen_cr3, cr3); + percpu_write(xen_cr3, cr3); /* Update xen_current_cr3 once the batch has actually been submitted. */ @@ -764,7 +764,7 @@ static void xen_write_cr3(unsigned long cr3) /* Update while interrupts are disabled, so its atomic with respect to ipis */ - x86_write_percpu(xen_cr3, cr3); + percpu_write(xen_cr3, cr3); __xen_write_cr3(true, cr3); diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index bb042608c60..2e8271431e1 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void) struct vcpu_info *vcpu; unsigned long flags; - vcpu = x86_read_percpu(xen_vcpu); + vcpu = percpu_read(xen_vcpu); /* flag has opposite sense of mask */ flags = !vcpu->evtchn_upcall_mask; @@ -62,7 +62,7 @@ static void xen_restore_fl(unsigned long flags) make sure we're don't switch CPUs between getting the vcpu pointer and updating the mask. */ preempt_disable(); - vcpu = x86_read_percpu(xen_vcpu); + vcpu = percpu_read(xen_vcpu); vcpu->evtchn_upcall_mask = flags; preempt_enable_no_resched(); @@ -83,7 +83,7 @@ static void xen_irq_disable(void) make sure we're don't switch CPUs between getting the vcpu pointer and updating the mask. */ preempt_disable(); - x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; + percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; preempt_enable_no_resched(); } @@ -96,7 +96,7 @@ static void xen_irq_enable(void) the caller is confused and is trying to re-enable interrupts on an indeterminate processor. */ - vcpu = x86_read_percpu(xen_vcpu); + vcpu = percpu_read(xen_vcpu); vcpu->evtchn_upcall_mask = 0; /* Doesn't matter if we get preempted here, because any diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 503c240e26c..7bc7852cc5c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1074,7 +1074,7 @@ static void drop_other_mm_ref(void *info) /* If this cpu still has a stale cr3 reference, then make sure it has been flushed. */ - if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) { + if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) { load_cr3(swapper_pg_dir); arch_flush_lazy_cpu_mode(); } diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index 85893824161..e786fa7f261 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h @@ -39,7 +39,7 @@ static inline void xen_mc_issue(unsigned mode) xen_mc_flush(); /* restore flags saved in xen_mc_batch */ - local_irq_restore(x86_read_percpu(xen_mc_irq_flags)); + local_irq_restore(percpu_read(xen_mc_irq_flags)); } /* Set up a callback to be called when the current batch is flushed */ diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 83fa4236477..3bfd6dd0b47 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -78,7 +78,7 @@ static __cpuinit void cpu_bringup(void) xen_setup_cpu_clockevents(); cpu_set(cpu, cpu_online_map); - x86_write_percpu(cpu_state, CPU_ONLINE); + percpu_write(cpu_state, CPU_ONLINE); wmb(); /* We can take interrupts now: we're officially "up". */ -- cgit v1.2.3 From a338af2c648f5e07c582154745a6c60cd2d8bf12 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 16 Jan 2009 11:19:03 +0900 Subject: x86: fix build bug introduced during merge EXPORT_PER_CPU_SYMBOL() got misplaced during merge leading to build failure. Fix it. Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index daeedf82c15..b5c35af2011 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -86,9 +86,8 @@ void __cpuinit load_pda_offset(int cpu) } #ifndef CONFIG_SMP DEFINE_PER_CPU(struct x8664_pda, __pda); -EXPORT_PER_CPU_SYMBOL(__pda); #endif - +EXPORT_PER_CPU_SYMBOL(__pda); #endif /* CONFIG_SMP && CONFIG_X86_64 */ #ifdef CONFIG_X86_64 -- cgit v1.2.3 From cd3adf52309867955d6e2175246b526442235805 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 16 Jan 2009 12:11:43 +0900 Subject: x86_64: initialize this_cpu_off to __per_cpu_load On x86_64, if get_per_cpu_var() is used before per cpu area is setup (if lockdep is turned on, it happens), it needs this_cpu_off to point to __per_cpu_load. Initialize accordingly. Signed-off-by: Tejun Heo --- arch/x86/kernel/smpcommon.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 84395fabc41..7e157810062 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c @@ -3,8 +3,13 @@ */ #include #include +#include +#ifdef CONFIG_X86_64 +DEFINE_PER_CPU(unsigned long, this_cpu_off) = (unsigned long)__per_cpu_load; +#else DEFINE_PER_CPU(unsigned long, this_cpu_off); +#endif EXPORT_PER_CPU_SYMBOL(this_cpu_off); #ifdef CONFIG_X86_32 -- cgit v1.2.3 From a7e2e735dcf98717150d3c8eaa731de8038af05a Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Thu, 8 Jan 2009 21:03:55 +0000 Subject: ASoC: machine driver for Toshiba e750 This patch adds support for the wm9705 ac97 codec as used in the Toshiba e750 PDA. It includes support for powering up / down the external headphone and speaker amplifiers on this machine. Signed-off-by: Ian Molton Signed-off-by: Mark Brown --- arch/arm/mach-pxa/e750.c | 5 +++++ arch/arm/mach-pxa/include/mach/eseries-gpio.h | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-pxa/e750.c b/arch/arm/mach-pxa/e750.c index be1ab8edb97..665066fd280 100644 --- a/arch/arm/mach-pxa/e750.c +++ b/arch/arm/mach-pxa/e750.c @@ -133,6 +133,11 @@ static unsigned long e750_pin_config[] __initdata = { /* IrDA */ GPIO38_GPIO | MFP_LPM_DRIVE_HIGH, + /* Audio power control */ + GPIO4_GPIO, /* Headphone amp power */ + GPIO7_GPIO, /* Speaker amp power */ + GPIO37_GPIO, /* Headphone detect */ + /* PC Card */ GPIO8_GPIO, /* CD0 */ GPIO44_GPIO, /* CD1 */ diff --git a/arch/arm/mach-pxa/include/mach/eseries-gpio.h b/arch/arm/mach-pxa/include/mach/eseries-gpio.h index efbd2aa9ece..02b28e0ed73 100644 --- a/arch/arm/mach-pxa/include/mach/eseries-gpio.h +++ b/arch/arm/mach-pxa/include/mach/eseries-gpio.h @@ -45,6 +45,11 @@ /* e7xx IrDA power control */ #define GPIO_E7XX_IR_OFF 38 +/* e750 audio control GPIOs */ +#define GPIO_E750_HP_AMP_OFF 4 +#define GPIO_E750_SPK_AMP_OFF 7 +#define GPIO_E750_HP_DETECT 37 + /* ASIC related GPIOs */ #define GPIO_ESERIES_TMIO_IRQ 5 #define GPIO_ESERIES_TMIO_PCLR 19 -- cgit v1.2.3 From 0465c7aa6fbab89de820442aed449ceb8d9145a6 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Thu, 8 Jan 2009 21:16:05 +0000 Subject: ASoC: machine driver for Toshiba e800 This patch adds support for the wm9712 ac97 codec as used in the Toshiba e800 PDA. It includes support for powering up / down the external headphone and speaker amplifiers on this machine. Signed-off-by: Ian Molton Signed-off-by: Mark Brown --- arch/arm/mach-pxa/include/mach/eseries-gpio.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-pxa/include/mach/eseries-gpio.h b/arch/arm/mach-pxa/include/mach/eseries-gpio.h index 02b28e0ed73..6d6e4d8fa4c 100644 --- a/arch/arm/mach-pxa/include/mach/eseries-gpio.h +++ b/arch/arm/mach-pxa/include/mach/eseries-gpio.h @@ -50,6 +50,11 @@ #define GPIO_E750_SPK_AMP_OFF 7 #define GPIO_E750_HP_DETECT 37 +/* e800 audio control GPIOs */ +#define GPIO_E800_HP_DETECT 81 +#define GPIO_E800_HP_AMP_OFF 82 +#define GPIO_E800_SPK_AMP_ON 83 + /* ASIC related GPIOs */ #define GPIO_ESERIES_TMIO_IRQ 5 #define GPIO_ESERIES_TMIO_PCLR 19 -- cgit v1.2.3 From 6eb714c63ed5bd663627f7dda8c4d5258f3b64ef Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 16 Jan 2009 15:31:15 -0800 Subject: cpufreq: use work_on_cpu in acpi-cpufreq.c for drv_read and drv_write Impact: use new work_on_cpu function to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for drv_read() and drv_write(). Basically converts do_drv_{read,write} into "work_on_cpu" functions that are now called by drv_read and drv_write. Note: This patch basically reverts 50c668d6 which reverted 7503bfba, now that the work_on_cpu() function is more stable. Signed-off-by: Mike Travis Acked-by: Rusty Russell Tested-by: Dieter Ries Tested-by: Maciej Rutecki Cc: Dave Jones Cc: --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 019276717a7..4b1c319d30c 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -150,8 +150,9 @@ struct drv_cmd { u32 val; }; -static void do_drv_read(struct drv_cmd *cmd) +static long do_drv_read(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { @@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd) default: break; } + return 0; } -static void do_drv_write(struct drv_cmd *cmd) +static long do_drv_write(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { @@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd) default: break; } + return 0; } static void drv_read(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - set_cpus_allowed_ptr(current, cmd->mask); - do_drv_read(cmd); - set_cpus_allowed_ptr(current, &saved_mask); + work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); } static void drv_write(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; unsigned int i; for_each_cpu(i, cmd->mask) { - set_cpus_allowed_ptr(current, cpumask_of(i)); - do_drv_write(cmd); + work_on_cpu(i, do_drv_write, cmd); } - - set_cpus_allowed_ptr(current, &saved_mask); - return; } static u32 get_cur_val(const struct cpumask *mask) @@ -367,7 +363,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) return freq; } -static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, +static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, struct acpi_cpufreq_data *data) { unsigned int cur_freq; -- cgit v1.2.3 From cef30b3a84e1c7cbd49987d83c5863c001445842 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 16 Jan 2009 15:58:13 -0800 Subject: x86: put trigger in to detect mismatched apic versions. Fire off one message if two apic's discovered with different apic versions. Signed-off-by: Mike Travis --- arch/x86/kernel/apic.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 0f830e4f567..db0998641c5 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1833,6 +1833,11 @@ void __cpuinit generic_processor_info(int apicid, int version) num_processors++; cpu = cpumask_next_zero(-1, cpu_present_mask); + if (version != apic_version[boot_cpu_physical_apicid]) + WARN_ONCE(1, + "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", + apic_version[boot_cpu_physical_apicid], cpu, version); + physid_set(apicid, phys_cpu_present_map); if (apicid == boot_cpu_physical_apicid) { /* -- cgit v1.2.3 From 74e7904559a10cbb9fbf9139c5c42fc87c0f62a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 17 Jan 2009 15:26:32 +0900 Subject: linker script: add missing .data.percpu.page_aligned arm, arm/mach-integrator and powerpc were missing .data.percpu.page_aligned in their percpu output section definitions. Add it. Signed-off-by: Tejun Heo --- arch/arm/kernel/vmlinux.lds.S | 1 + arch/ia64/kernel/vmlinux.lds.S | 1 + arch/powerpc/kernel/vmlinux.lds.S | 1 + 3 files changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 00216071eaf..85598f7da40 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -65,6 +65,7 @@ SECTIONS #endif . = ALIGN(4096); __per_cpu_start = .; + *(.data.percpu.page_aligned) *(.data.percpu) *(.data.percpu.shared_aligned) __per_cpu_end = .; diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 10a7d47e851..f45e4e508ec 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -219,6 +219,7 @@ SECTIONS .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) { __per_cpu_start = .; + *(.data.percpu.page_aligned) *(.data.percpu) *(.data.percpu.shared_aligned) __per_cpu_end = .; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 47bf15cd2c9..04e8ecea9b4 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -182,6 +182,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { __per_cpu_start = .; + *(.data.percpu.page_aligned) *(.data.percpu) *(.data.percpu.shared_aligned) __per_cpu_end = .; -- cgit v1.2.3 From 1b437c8c73a36daa471dd54a63c426d72af5723d Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:57 +0900 Subject: x86-64: Move irq stats from PDA to per-cpu and consolidate with 32-bit. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/hardirq_64.h | 24 +++++++++++++++++++----- arch/x86/include/asm/pda.h | 10 ---------- arch/x86/kernel/irq.c | 6 +----- arch/x86/kernel/irq_64.c | 3 +++ arch/x86/kernel/nmi.c | 10 +--------- arch/x86/xen/smp.c | 18 +++--------------- 6 files changed, 27 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h index b5a6b5d5670..a65bab20f6c 100644 --- a/arch/x86/include/asm/hardirq_64.h +++ b/arch/x86/include/asm/hardirq_64.h @@ -3,22 +3,36 @@ #include #include -#include #include +typedef struct { + unsigned int __softirq_pending; + unsigned int __nmi_count; /* arch dependent */ + unsigned int apic_timer_irqs; /* arch dependent */ + unsigned int irq0_irqs; + unsigned int irq_resched_count; + unsigned int irq_call_count; + unsigned int irq_tlb_count; + unsigned int irq_thermal_count; + unsigned int irq_spurious_count; + unsigned int irq_threshold_count; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU(irq_cpustat_t, irq_stat); + /* We can have at most NR_VECTORS irqs routed to a cpu at a time */ #define MAX_HARDIRQS_PER_CPU NR_VECTORS #define __ARCH_IRQ_STAT 1 -#define inc_irq_stat(member) add_pda(member, 1) +#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) -#define local_softirq_pending() read_pda(__softirq_pending) +#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING 1 -#define set_softirq_pending(x) write_pda(__softirq_pending, (x)) -#define or_softirq_pending(x) or_pda(__softirq_pending, (x)) +#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) +#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) extern void ack_bad_irq(unsigned int irq); diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 47f274fe695..69a40757e21 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -25,19 +25,9 @@ struct x8664_pda { char *irqstackptr; short nodenumber; /* number of current node (32k max) */ short in_bootmem; /* pda lives in bootmem */ - unsigned int __softirq_pending; - unsigned int __nmi_count; /* number of NMI on this CPUs */ short mmu_state; short isidle; struct mm_struct *active_mm; - unsigned apic_timer_irqs; - unsigned irq0_irqs; - unsigned irq_resched_count; - unsigned irq_call_count; - unsigned irq_tlb_count; - unsigned irq_thermal_count; - unsigned irq_threshold_count; - unsigned irq_spurious_count; } ____cacheline_aligned_in_smp; DECLARE_PER_CPU(struct x8664_pda, __pda); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 3973e2df7f8..8b30d0c2512 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -36,11 +36,7 @@ void ack_bad_irq(unsigned int irq) #endif } -#ifdef CONFIG_X86_32 -# define irq_stats(x) (&per_cpu(irq_stat, x)) -#else -# define irq_stats(x) cpu_pda(x) -#endif +#define irq_stats(x) (&per_cpu(irq_stat, x)) /* * /proc/interrupts printing: */ diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 0b21cb1ea11..1db05247b47 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -19,6 +19,9 @@ #include #include +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); +EXPORT_PER_CPU_SYMBOL(irq_stat); + /* * Probabilistic stack overflow check: * diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 7228979f1e7..23b6d9e6e4f 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -61,11 +61,7 @@ static int endflag __initdata; static inline unsigned int get_nmi_count(int cpu) { -#ifdef CONFIG_X86_64 - return cpu_pda(cpu)->__nmi_count; -#else - return nmi_count(cpu); -#endif + return per_cpu(irq_stat, cpu).__nmi_count; } static inline int mce_in_progress(void) @@ -82,12 +78,8 @@ static inline int mce_in_progress(void) */ static inline unsigned int get_timer_irqs(int cpu) { -#ifdef CONFIG_X86_64 - return read_pda(apic_timer_irqs) + read_pda(irq0_irqs); -#else return per_cpu(irq_stat, cpu).apic_timer_irqs + per_cpu(irq_stat, cpu).irq0_irqs; -#endif } #ifdef CONFIG_SMP diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 3bfd6dd0b47..9ff3b0999cf 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); */ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) { -#ifdef CONFIG_X86_32 - __get_cpu_var(irq_stat).irq_resched_count++; -#else - add_pda(irq_resched_count, 1); -#endif + inc_irq_stat(irq_resched_count); return IRQ_HANDLED; } @@ -435,11 +431,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) { irq_enter(); generic_smp_call_function_interrupt(); -#ifdef CONFIG_X86_32 - __get_cpu_var(irq_stat).irq_call_count++; -#else - add_pda(irq_call_count, 1); -#endif + inc_irq_stat(irq_call_count); irq_exit(); return IRQ_HANDLED; @@ -449,11 +441,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) { irq_enter(); generic_smp_call_function_single_interrupt(); -#ifdef CONFIG_X86_32 - __get_cpu_var(irq_stat).irq_call_count++; -#else - add_pda(irq_call_count, 1); -#endif + inc_irq_stat(irq_call_count); irq_exit(); return IRQ_HANDLED; -- cgit v1.2.3 From 9eb912d1aa6b8106e06a73ea6702ec3dab0d6a1a Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:57 +0900 Subject: x86-64: Move TLB state from PDA to per-cpu and consolidate with 32-bit. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/mmu_context_64.h | 16 +++++++--------- arch/x86/include/asm/pda.h | 2 -- arch/x86/include/asm/tlbflush.h | 7 ++----- arch/x86/kernel/cpu/common.c | 2 -- arch/x86/kernel/tlb_32.c | 12 ++---------- arch/x86/kernel/tlb_64.c | 13 ++++++++----- arch/x86/xen/mmu.c | 6 +----- 7 files changed, 20 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h index 677d36e9540..c4572505ab3 100644 --- a/arch/x86/include/asm/mmu_context_64.h +++ b/arch/x86/include/asm/mmu_context_64.h @@ -1,13 +1,11 @@ #ifndef _ASM_X86_MMU_CONTEXT_64_H #define _ASM_X86_MMU_CONTEXT_64_H -#include - static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP - if (read_pda(mmu_state) == TLBSTATE_OK) - write_pda(mmu_state, TLBSTATE_LAZY); + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); #endif } @@ -19,8 +17,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP - write_pda(mmu_state, TLBSTATE_OK); - write_pda(active_mm, next); + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + percpu_write(cpu_tlbstate.active_mm, next); #endif cpu_set(cpu, next->cpu_vm_mask); load_cr3(next->pgd); @@ -30,9 +28,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, } #ifdef CONFIG_SMP else { - write_pda(mmu_state, TLBSTATE_OK); - if (read_pda(active_mm) != next) - BUG(); + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); + if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 69a40757e21..8ee835ed10e 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -25,9 +25,7 @@ struct x8664_pda { char *irqstackptr; short nodenumber; /* number of current node (32k max) */ short in_bootmem; /* pda lives in bootmem */ - short mmu_state; short isidle; - struct mm_struct *active_mm; } ____cacheline_aligned_in_smp; DECLARE_PER_CPU(struct x8664_pda, __pda); diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 17feaa9c7e7..d3539f998f8 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -148,20 +148,17 @@ void native_flush_tlb_others(const struct cpumask *cpumask, #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 -#ifdef CONFIG_X86_32 struct tlb_state { struct mm_struct *active_mm; int state; - char __cacheline_padding[L1_CACHE_BYTES-8]; }; DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); -void reset_lazy_tlbstate(void); -#else static inline void reset_lazy_tlbstate(void) { + percpu_write(cpu_tlbstate.state, 0); + percpu_write(cpu_tlbstate.active_mm, &init_mm); } -#endif #endif /* SMP */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c49498d4083..3d0cc6f1711 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -897,8 +897,6 @@ void __cpuinit pda_init(int cpu) pda->irqcount = -1; pda->kernelstack = (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; - pda->active_mm = &init_mm; - pda->mmu_state = 0; if (cpu == 0) { /* others are initialized in smpboot.c */ diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index e65449d0f7d..abf0808d6fc 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -4,8 +4,8 @@ #include -DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) - ____cacheline_aligned = { &init_mm, 0, }; +DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) + = { &init_mm, 0, }; /* must come after the send_IPI functions above for inlining */ #include @@ -231,14 +231,6 @@ void flush_tlb_all(void) on_each_cpu(do_flush_tlb_all, NULL, 1); } -void reset_lazy_tlbstate(void) -{ - int cpu = raw_smp_processor_id(); - - per_cpu(cpu_tlbstate, cpu).state = 0; - per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; -} - static int init_flush_cpumask(void) { alloc_cpumask_var(&flush_cpumask, GFP_KERNEL); diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 7f4141d3b66..e64a32c4882 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -18,6 +18,9 @@ #include #include +DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) + = { &init_mm, 0, }; + #include /* * Smarter SMP flushing macros. @@ -62,9 +65,9 @@ static DEFINE_PER_CPU(union smp_flush_state, flush_state); */ void leave_mm(int cpu) { - if (read_pda(mmu_state) == TLBSTATE_OK) + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); - cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); + cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); load_cr3(swapper_pg_dir); } EXPORT_SYMBOL_GPL(leave_mm); @@ -142,8 +145,8 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) * BUG(); */ - if (f->flush_mm == read_pda(active_mm)) { - if (read_pda(mmu_state) == TLBSTATE_OK) { + if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else @@ -281,7 +284,7 @@ static void do_flush_tlb_all(void *info) unsigned long cpu = smp_processor_id(); __flush_tlb_all(); - if (read_pda(mmu_state) == TLBSTATE_LAZY) + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(cpu); } diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 7bc7852cc5c..98cb9869eb2 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1063,11 +1063,7 @@ static void drop_other_mm_ref(void *info) struct mm_struct *mm = info; struct mm_struct *active_mm; -#ifdef CONFIG_X86_64 - active_mm = read_pda(active_mm); -#else - active_mm = __get_cpu_var(cpu_tlbstate).active_mm; -#endif + active_mm = percpu_read(cpu_tlbstate.active_mm); if (active_mm == mm) leave_mm(smp_processor_id()); -- cgit v1.2.3 From 26f80bd6a9ab17bc8a60b6092e7c0d05c5927ce5 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:58 +0900 Subject: x86-64: Convert irqstacks to per-cpu Move the irqstackptr variable from the PDA to per-cpu. Make the stacks themselves per-cpu, removing some specific allocation code. Add a seperate flag (is_boot_cpu) to simplify the per-cpu boot adjustments. tj: * sprinkle some underbars around. * irq_stack_ptr is not used till traps_init(), no reason to initialize it early. On SMP, just leaving it NULL till proper initialization in setup_per_cpu_areas() works. Dropped is_boot_cpu and early irq_stack_ptr initialization. * do DECLARE/DEFINE_PER_CPU(char[IRQ_STACK_SIZE], irq_stack) instead of (char, irq_stack[IRQ_STACK_SIZE]). Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/page_64.h | 4 ++-- arch/x86/include/asm/pda.h | 1 - arch/x86/include/asm/processor.h | 3 +++ arch/x86/kernel/asm-offsets_64.c | 1 - arch/x86/kernel/cpu/common.c | 19 +++++++------------ arch/x86/kernel/dumpstack_64.c | 33 +++++++++++++++++---------------- arch/x86/kernel/entry_64.S | 6 +++--- arch/x86/kernel/setup_percpu.c | 4 +++- 8 files changed, 35 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 5ebca29f44f..e27fdbe5f9e 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -13,8 +13,8 @@ #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) -#define IRQSTACK_ORDER 2 -#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) +#define IRQ_STACK_ORDER 2 +#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) #define STACKFAULT_STACK 1 #define DOUBLEFAULT_STACK 2 diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 8ee835ed10e..09965f7a216 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -22,7 +22,6 @@ struct x8664_pda { /* gcc-ABI: this canary MUST be at offset 40!!! */ #endif - char *irqstackptr; short nodenumber; /* number of current node (32k max) */ short in_bootmem; /* pda lives in bootmem */ short isidle; diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 091cd8855f2..f511246fa6c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -378,6 +378,9 @@ union thread_xstate { #ifdef CONFIG_X86_64 DECLARE_PER_CPU(struct orig_ist, orig_ist); + +DECLARE_PER_CPU(char[IRQ_STACK_SIZE], irq_stack); +DECLARE_PER_CPU(char *, irq_stack_ptr); #endif extern void print_cpu_info(struct cpuinfo_x86 *); diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index f4cc81bfbf8..5b821fbdaf7 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -54,7 +54,6 @@ int main(void) ENTRY(pcurrent); ENTRY(irqcount); ENTRY(cpunumber); - ENTRY(irqstackptr); DEFINE(pda_size, sizeof(struct x8664_pda)); BLANK(); #undef ENTRY diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3d0cc6f1711..496f0a01919 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -881,7 +881,13 @@ __setup("clearcpuid=", setup_disablecpuid); #ifdef CONFIG_X86_64 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; -static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; +DEFINE_PER_CPU_PAGE_ALIGNED(char[IRQ_STACK_SIZE], irq_stack); +#ifdef CONFIG_SMP +DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */ +#else +DEFINE_PER_CPU(char *, irq_stack_ptr) = + per_cpu_var(irq_stack) + IRQ_STACK_SIZE - 64; +#endif void __cpuinit pda_init(int cpu) { @@ -901,18 +907,7 @@ void __cpuinit pda_init(int cpu) if (cpu == 0) { /* others are initialized in smpboot.c */ pda->pcurrent = &init_task; - pda->irqstackptr = boot_cpu_stack; - pda->irqstackptr += IRQSTACKSIZE - 64; } else { - if (!pda->irqstackptr) { - pda->irqstackptr = (char *) - __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); - if (!pda->irqstackptr) - panic("cannot allocate irqstack for cpu %d", - cpu); - pda->irqstackptr += IRQSTACKSIZE - 64; - } - if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) pda->nodenumber = cpu_to_node(cpu); } diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index c302d070704..28e26a4315d 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, const struct stacktrace_ops *ops, void *data) { const unsigned cpu = get_cpu(); - unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; + unsigned long *irq_stack_end = + (unsigned long *)per_cpu(irq_stack_ptr, cpu); unsigned used = 0; struct thread_info *tinfo; int graph = 0; @@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, stack = (unsigned long *) estack_end[-2]; continue; } - if (irqstack_end) { - unsigned long *irqstack; - irqstack = irqstack_end - - (IRQSTACKSIZE - 64) / sizeof(*irqstack); + if (irq_stack_end) { + unsigned long *irq_stack; + irq_stack = irq_stack_end - + (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); - if (stack >= irqstack && stack < irqstack_end) { + if (stack >= irq_stack && stack < irq_stack_end) { if (ops->stack(data, "IRQ") < 0) break; bp = print_context_stack(tinfo, stack, bp, - ops, data, irqstack_end, &graph); + ops, data, irq_stack_end, &graph); /* * We link to the next stack (which would be * the process stack normally) the last * pointer (index -1 to end) in the IRQ stack: */ - stack = (unsigned long *) (irqstack_end[-1]); - irqstack_end = NULL; + stack = (unsigned long *) (irq_stack_end[-1]); + irq_stack_end = NULL; ops->stack(data, "EOI"); continue; } @@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack; int i; const int cpu = smp_processor_id(); - unsigned long *irqstack_end = - (unsigned long *) (cpu_pda(cpu)->irqstackptr); - unsigned long *irqstack = - (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); + unsigned long *irq_stack_end = + (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); + unsigned long *irq_stack = + (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); /* * debugging aid: "show_stack(NULL, NULL);" prints the @@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, stack = sp; for (i = 0; i < kstack_depth_to_print; i++) { - if (stack >= irqstack && stack <= irqstack_end) { - if (stack == irqstack_end) { - stack = (unsigned long *) (irqstack_end[-1]); + if (stack >= irq_stack && stack <= irq_stack_end) { + if (stack == irq_stack_end) { + stack = (unsigned long *) (irq_stack_end[-1]); printk(" "); } } else { diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 4833f3a1965..d22677a6643 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -345,7 +345,7 @@ ENTRY(save_args) 1: incl %gs:pda_irqcount jne 2f popq_cfi %rax /* move return address... */ - mov %gs:pda_irqstackptr,%rsp + mov PER_CPU_VAR(irq_stack_ptr),%rsp EMPTY_FRAME 0 pushq_cfi %rax /* ... to the new stack */ /* @@ -1261,7 +1261,7 @@ ENTRY(call_softirq) mov %rsp,%rbp CFI_DEF_CFA_REGISTER rbp incl %gs:pda_irqcount - cmove %gs:pda_irqstackptr,%rsp + cmove PER_CPU_VAR(irq_stack_ptr),%rsp push %rbp # backlink for old unwinder call __do_softirq leaveq @@ -1300,7 +1300,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) 11: incl %gs:pda_irqcount movq %rsp,%rbp CFI_DEF_CFA_REGISTER rbp - cmovzq %gs:pda_irqstackptr,%rsp + cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp pushq %rbp # backlink for old unwinder call xen_evtchn_do_upcall popq %rsp diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index b5c35af2011..8b53ef83c61 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -192,7 +192,10 @@ void __init setup_per_cpu_areas(void) memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); per_cpu_offset(cpu) = ptr - __per_cpu_start; + per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); #ifdef CONFIG_X86_64 + per_cpu(irq_stack_ptr, cpu) = + (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64; /* * CPU0 modified pda in the init data area, reload pda * offset for CPU0 and clear the area for others. @@ -202,7 +205,6 @@ void __init setup_per_cpu_areas(void) else memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); #endif - per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } -- cgit v1.2.3 From 92d65b2371d86d40807e1dbfdccadc4d501edcde Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:58 +0900 Subject: x86-64: Convert exception stacks to per-cpu Move the exception stacks to per-cpu, removing specific allocation code. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/cpu/common.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 496f0a01919..b6d7eec0be7 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -913,8 +913,9 @@ void __cpuinit pda_init(int cpu) } } -static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + - DEBUG_STKSZ] __page_aligned_bss; +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) + __aligned(PAGE_SIZE); extern asmlinkage void ignore_sysret(void); @@ -972,15 +973,12 @@ void __cpuinit cpu_init(void) struct tss_struct *t = &per_cpu(init_tss, cpu); struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); unsigned long v; - char *estacks = NULL; struct task_struct *me; int i; /* CPU 0 is initialised in head64.c */ if (cpu != 0) pda_init(cpu); - else - estacks = boot_exception_stacks; me = current; @@ -1014,18 +1012,13 @@ void __cpuinit cpu_init(void) * set up and load the per-CPU TSS */ if (!orig_ist->ist[0]) { - static const unsigned int order[N_EXCEPTION_STACKS] = { - [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, - [DEBUG_STACK - 1] = DEBUG_STACK_ORDER + static const unsigned int sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, + [DEBUG_STACK - 1] = DEBUG_STKSZ }; + char *estacks = per_cpu(exception_stacks, cpu); for (v = 0; v < N_EXCEPTION_STACKS; v++) { - if (cpu) { - estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); - if (!estacks) - panic("Cannot allocate exception " - "stack %ld %d\n", v, cpu); - } - estacks += PAGE_SIZE << order[v]; + estacks += sizes[v]; orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; } -- cgit v1.2.3 From ea9279066de44053d0c20ea855bc9f4706652d84 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:58 +0900 Subject: x86-64: Move cpu number from PDA to per-cpu and consolidate with 32-bit. tj: moved cpu_number definition out of CONFIG_HAVE_SETUP_PER_CPU_AREA for voyager. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/pda.h | 2 +- arch/x86/include/asm/smp.h | 4 +--- arch/x86/kernel/asm-offsets_64.c | 1 - arch/x86/kernel/cpu/common.c | 1 - arch/x86/kernel/process_32.c | 3 --- arch/x86/kernel/setup_percpu.c | 10 ++++++++++ arch/x86/kernel/smpcommon.c | 2 -- 7 files changed, 12 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 09965f7a216..668d5a5b6f7 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -16,7 +16,7 @@ struct x8664_pda { unsigned long kernelstack; /* 16 top of kernel stack for current */ unsigned long oldrsp; /* 24 user rsp for system call */ int irqcount; /* 32 Irq nesting counter. Starts -1 */ - unsigned int cpunumber; /* 36 Logical CPU number */ + unsigned int unused6; /* 36 was cpunumber */ #ifdef CONFIG_CC_STACKPROTECTOR unsigned long stack_canary; /* 40 stack canary value */ /* gcc-ABI: this canary MUST be at diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index c7bbbbe65d3..68636e767a9 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -25,9 +25,7 @@ extern unsigned int num_processors; DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); -#ifdef CONFIG_X86_32 DECLARE_PER_CPU(int, cpu_number); -#endif static inline struct cpumask *cpu_sibling_mask(int cpu) { @@ -164,7 +162,7 @@ extern unsigned disabled_cpus __cpuinitdata; extern int safe_smp_processor_id(void); #elif defined(CONFIG_X86_64_SMP) -#define raw_smp_processor_id() read_pda(cpunumber) +#define raw_smp_processor_id() (percpu_read(cpu_number)) #define stack_smp_processor_id() \ ({ \ diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 5b821fbdaf7..cae6697c099 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -53,7 +53,6 @@ int main(void) ENTRY(oldrsp); ENTRY(pcurrent); ENTRY(irqcount); - ENTRY(cpunumber); DEFINE(pda_size, sizeof(struct x8664_pda)); BLANK(); #undef ENTRY diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b6d7eec0be7..4221e920886 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -899,7 +899,6 @@ void __cpuinit pda_init(int cpu) load_pda_offset(cpu); - pda->cpunumber = cpu; pda->irqcount = -1; pda->kernelstack = (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 77d546817d9..2c00a57ccb9 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -66,9 +66,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); -DEFINE_PER_CPU(int, cpu_number); -EXPORT_PER_CPU_SYMBOL(cpu_number); - /* * Return saved PC of a blocked thread. */ diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 8b53ef83c61..258497f93f4 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -22,6 +22,15 @@ # define DBG(x...) #endif +/* + * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but + * voyager wants cpu_number too. + */ +#ifdef CONFIG_SMP +DEFINE_PER_CPU(int, cpu_number); +EXPORT_PER_CPU_SYMBOL(cpu_number); +#endif + #ifdef CONFIG_X86_LOCAL_APIC unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; @@ -193,6 +202,7 @@ void __init setup_per_cpu_areas(void) memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); per_cpu_offset(cpu) = ptr - __per_cpu_start; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); + per_cpu(cpu_number, cpu) = cpu; #ifdef CONFIG_X86_64 per_cpu(irq_stack_ptr, cpu) = (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64; diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 7e157810062..add36b4e37c 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c @@ -28,7 +28,5 @@ __cpuinit void init_gdt(int cpu) write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); - - per_cpu(cpu_number, cpu) = cpu; } #endif -- cgit v1.2.3 From c6f5e0acd5d12ee23f701f15889872e67b47caa6 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:58 +0900 Subject: x86-64: Move current task from PDA to per-cpu and consolidate with 32-bit. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/current.h | 24 +++--------------------- arch/x86/include/asm/pda.h | 4 ++-- arch/x86/include/asm/system.h | 4 ++-- arch/x86/kernel/asm-offsets_64.c | 1 - arch/x86/kernel/cpu/common.c | 5 +---- arch/x86/kernel/dumpstack_64.c | 2 +- arch/x86/kernel/process_64.c | 5 ++++- arch/x86/kernel/smpboot.c | 3 +-- arch/x86/xen/smp.c | 3 +-- 9 files changed, 15 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index 0728480f5c5..c68c361697e 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h @@ -1,39 +1,21 @@ #ifndef _ASM_X86_CURRENT_H #define _ASM_X86_CURRENT_H -#ifdef CONFIG_X86_32 #include #include +#ifndef __ASSEMBLY__ struct task_struct; DECLARE_PER_CPU(struct task_struct *, current_task); -static __always_inline struct task_struct *get_current(void) -{ - return percpu_read(current_task); -} - -#else /* X86_32 */ - -#ifndef __ASSEMBLY__ -#include - -struct task_struct; static __always_inline struct task_struct *get_current(void) { - return read_pda(pcurrent); + return percpu_read(current_task); } -#else /* __ASSEMBLY__ */ - -#include -#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg +#define current get_current() #endif /* __ASSEMBLY__ */ -#endif /* X86_32 */ - -#define current get_current() - #endif /* _ASM_X86_CURRENT_H */ diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 668d5a5b6f7..7209302d922 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -11,8 +11,8 @@ /* Per processor datastructure. %gs points to it while the kernel runs */ struct x8664_pda { - struct task_struct *pcurrent; /* 0 Current process */ - unsigned long dummy; + unsigned long unused1; + unsigned long unused2; unsigned long kernelstack; /* 16 top of kernel stack for current */ unsigned long oldrsp; /* 24 user rsp for system call */ int irqcount; /* 32 Irq nesting counter. Starts -1 */ diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 8e626ea33a1..4399aac680e 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -94,7 +94,7 @@ do { \ "call __switch_to\n\t" \ ".globl thread_return\n" \ "thread_return:\n\t" \ - "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ + "movq "__percpu_seg_str"%P[current_task],%%rsi\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ @@ -106,7 +106,7 @@ do { \ [ti_flags] "i" (offsetof(struct thread_info, flags)), \ [tif_fork] "i" (TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, stack)), \ - [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ + [current_task] "m" (per_cpu_var(current_task)) \ : "memory", "cc" __EXTRA_CLOBBER) #endif diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index cae6697c099..4f7a210e1e5 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -51,7 +51,6 @@ int main(void) #define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry)) ENTRY(kernelstack); ENTRY(oldrsp); - ENTRY(pcurrent); ENTRY(irqcount); DEFINE(pda_size, sizeof(struct x8664_pda)); BLANK(); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4221e920886..b50e38d1688 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -903,10 +903,7 @@ void __cpuinit pda_init(int cpu) pda->kernelstack = (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; - if (cpu == 0) { - /* others are initialized in smpboot.c */ - pda->pcurrent = &init_task; - } else { + if (cpu != 0) { if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) pda->nodenumber = cpu_to_node(cpu); } diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 28e26a4315d..d35db5993fd 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -242,7 +242,7 @@ void show_registers(struct pt_regs *regs) int i; unsigned long sp; const int cpu = smp_processor_id(); - struct task_struct *cur = cpu_pda(cpu)->pcurrent; + struct task_struct *cur = current; sp = regs->sp; printk("CPU %d ", cpu); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 416fb9282f4..e00c31a4b3c 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -57,6 +57,9 @@ asmlinkage extern void ret_from_fork(void); +DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; +EXPORT_PER_CPU_SYMBOL(current_task); + unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; static ATOMIC_NOTIFIER_HEAD(idle_notifier); @@ -615,7 +618,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ prev->usersp = read_pda(oldrsp); write_pda(oldrsp, next->usersp); - write_pda(pcurrent, next_p); + percpu_write(current_task, next_p); write_pda(kernelstack, (unsigned long)task_stack_page(next_p) + diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2f0e0f1090f..5854be0fb80 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -790,13 +790,12 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) set_idle_for_cpu(cpu, c_idle.idle); do_rest: -#ifdef CONFIG_X86_32 per_cpu(current_task, cpu) = c_idle.idle; +#ifdef CONFIG_X86_32 init_gdt(cpu); /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); #else - cpu_pda(cpu)->pcurrent = c_idle.idle; clear_tsk_thread_flag(c_idle.idle, TIF_FORK); initial_gs = per_cpu_offset(cpu); #endif diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 9ff3b0999cf..72c2eb9b64c 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -279,12 +279,11 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) struct task_struct *idle = idle_task(cpu); int rc; + per_cpu(current_task, cpu) = idle; #ifdef CONFIG_X86_32 init_gdt(cpu); - per_cpu(current_task, cpu) = idle; irq_ctx_init(cpu); #else - cpu_pda(cpu)->pcurrent = idle; clear_tsk_thread_flag(idle, TIF_FORK); #endif xen_setup_timer(cpu); -- cgit v1.2.3 From 9af45651f1f7c89942e016a1a00a7ebddfa727f8 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:58 +0900 Subject: x86-64: Move kernelstack from PDA to per-cpu. Also clean up PER_CPU_VAR usage in xen-asm_64.S tj: * remove now unused stack_thread_info() * s/kernelstack/kernel_stack/ * added FIXME comment in xen-asm_64.S Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/ia32/ia32entry.S | 8 ++++---- arch/x86/include/asm/pda.h | 4 +--- arch/x86/include/asm/thread_info.h | 20 ++++++++------------ arch/x86/kernel/asm-offsets_64.c | 1 - arch/x86/kernel/cpu/common.c | 6 ++++-- arch/x86/kernel/entry_64.S | 4 ++-- arch/x86/kernel/process_64.c | 4 ++-- arch/x86/kernel/smpboot.c | 3 +++ arch/x86/xen/xen-asm_64.S | 23 +++++++++++------------ 9 files changed, 35 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 256b00b6189..9c79b247700 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target) CFI_DEF_CFA rsp,0 CFI_REGISTER rsp,rbp SWAPGS_UNSAFE_STACK - movq %gs:pda_kernelstack, %rsp - addq $(PDA_STACKOFFSET),%rsp + movq PER_CPU_VAR(kernel_stack), %rsp + addq $(KERNEL_STACK_OFFSET),%rsp /* * No need to follow this irqs on/off section: the syscall * disabled irqs, here we enable it straight after entry: @@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target) ENTRY(ia32_cstar_target) CFI_STARTPROC32 simple CFI_SIGNAL_FRAME - CFI_DEF_CFA rsp,PDA_STACKOFFSET + CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ SWAPGS_UNSAFE_STACK movl %esp,%r8d CFI_REGISTER rsp,r8 - movq %gs:pda_kernelstack,%rsp + movq PER_CPU_VAR(kernel_stack),%rsp /* * No need to follow this irqs on/off section: the syscall * disabled irqs and here we enable it straight after entry: diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 7209302d922..4d28ffba6e1 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -13,7 +13,7 @@ struct x8664_pda { unsigned long unused1; unsigned long unused2; - unsigned long kernelstack; /* 16 top of kernel stack for current */ + unsigned long unused3; unsigned long oldrsp; /* 24 user rsp for system call */ int irqcount; /* 32 Irq nesting counter. Starts -1 */ unsigned int unused6; /* 36 was cpunumber */ @@ -44,6 +44,4 @@ extern void pda_init(int); #endif -#define PDA_STACKOFFSET (5*8) - #endif /* _ASM_X86_PDA_H */ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 98789647baa..b46f8ca007b 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -194,25 +194,21 @@ static inline struct thread_info *current_thread_info(void) #else /* X86_32 */ -#include +#include +#define KERNEL_STACK_OFFSET (5*8) /* * macros/functions for gaining access to the thread information structure * preempt_count needs to be 1 initially, until the scheduler is functional. */ #ifndef __ASSEMBLY__ -static inline struct thread_info *current_thread_info(void) -{ - struct thread_info *ti; - ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); - return ti; -} +DECLARE_PER_CPU(unsigned long, kernel_stack); -/* do not use in interrupt context */ -static inline struct thread_info *stack_thread_info(void) +static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; - asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); + ti = (void *)(percpu_read(kernel_stack) + + KERNEL_STACK_OFFSET - THREAD_SIZE); return ti; } @@ -220,8 +216,8 @@ static inline struct thread_info *stack_thread_info(void) /* how to get the thread information struct from ASM */ #define GET_THREAD_INFO(reg) \ - movq %gs:pda_kernelstack,reg ; \ - subq $(THREAD_SIZE-PDA_STACKOFFSET),reg + movq PER_CPU_VAR(kernel_stack),reg ; \ + subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg #endif diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 4f7a210e1e5..cafff5f4a03 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -49,7 +49,6 @@ int main(void) BLANK(); #undef ENTRY #define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry)) - ENTRY(kernelstack); ENTRY(oldrsp); ENTRY(irqcount); DEFINE(pda_size, sizeof(struct x8664_pda)); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b50e38d1688..06b6290088f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -889,6 +889,10 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) = per_cpu_var(irq_stack) + IRQ_STACK_SIZE - 64; #endif +DEFINE_PER_CPU(unsigned long, kernel_stack) = + (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; +EXPORT_PER_CPU_SYMBOL(kernel_stack); + void __cpuinit pda_init(int cpu) { struct x8664_pda *pda = cpu_pda(cpu); @@ -900,8 +904,6 @@ void __cpuinit pda_init(int cpu) load_pda_offset(cpu); pda->irqcount = -1; - pda->kernelstack = (unsigned long)stack_thread_info() - - PDA_STACKOFFSET + THREAD_SIZE; if (cpu != 0) { if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index d22677a6643..0dd45859a7a 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -468,7 +468,7 @@ END(ret_from_fork) ENTRY(system_call) CFI_STARTPROC simple CFI_SIGNAL_FRAME - CFI_DEF_CFA rsp,PDA_STACKOFFSET + CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ SWAPGS_UNSAFE_STACK @@ -480,7 +480,7 @@ ENTRY(system_call) ENTRY(system_call_after_swapgs) movq %rsp,%gs:pda_oldrsp - movq %gs:pda_kernelstack,%rsp + movq PER_CPU_VAR(kernel_stack),%rsp /* * No need to follow this irqs off/on section - it's straight * and short: diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index e00c31a4b3c..6c5f5760210 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -620,9 +620,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) write_pda(oldrsp, next->usersp); percpu_write(current_task, next_p); - write_pda(kernelstack, + percpu_write(kernel_stack, (unsigned long)task_stack_page(next_p) + - THREAD_SIZE - PDA_STACKOFFSET); + THREAD_SIZE - KERNEL_STACK_OFFSET); #ifdef CONFIG_CC_STACKPROTECTOR write_pda(stack_canary, next_p->stack_canary); /* diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5854be0fb80..869b98840fd 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -798,6 +798,9 @@ do_rest: #else clear_tsk_thread_flag(c_idle.idle, TIF_FORK); initial_gs = per_cpu_offset(cpu); + per_cpu(kernel_stack, cpu) = + (unsigned long)task_stack_page(c_idle.idle) - + KERNEL_STACK_OFFSET + THREAD_SIZE; #endif early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); initial_code = (unsigned long)start_secondary; diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 05794c566e8..5a23e899367 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -28,12 +29,10 @@ #if 1 /* - x86-64 does not yet support direct access to percpu variables - via a segment override, so we just need to make sure this code - never gets used + FIXME: x86_64 now can support direct access to percpu variables + via a segment override. Update xen accordingly. */ #define BUG ud2a -#define PER_CPU_VAR(var, off) 0xdeadbeef #endif /* @@ -45,14 +44,14 @@ ENTRY(xen_irq_enable_direct) BUG /* Unmask events */ - movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) + movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask /* Preempt here doesn't matter because that will deal with any pending interrupts. The pending check may end up being run on the wrong CPU, but that doesn't hurt. */ /* Test for pending */ - testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) + testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending jz 1f 2: call check_events @@ -69,7 +68,7 @@ ENDPATCH(xen_irq_enable_direct) ENTRY(xen_irq_disable_direct) BUG - movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) + movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask ENDPATCH(xen_irq_disable_direct) ret ENDPROC(xen_irq_disable_direct) @@ -87,7 +86,7 @@ ENDPATCH(xen_irq_disable_direct) ENTRY(xen_save_fl_direct) BUG - testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) + testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah addb %ah,%ah ENDPATCH(xen_save_fl_direct) @@ -107,13 +106,13 @@ ENTRY(xen_restore_fl_direct) BUG testb $X86_EFLAGS_IF>>8, %ah - setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) + setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask /* Preempt here doesn't matter because that will deal with any pending interrupts. The pending check may end up being run on the wrong CPU, but that doesn't hurt. */ /* check for unmasked and pending */ - cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) + cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending jz 1f 2: call check_events 1: @@ -196,7 +195,7 @@ ENTRY(xen_sysret64) /* We're already on the usermode stack at this point, but still with the kernel gs, so we can easily switch back */ movq %rsp, %gs:pda_oldrsp - movq %gs:pda_kernelstack,%rsp + movq PER_CPU_VAR(kernel_stack),%rsp pushq $__USER_DS pushq %gs:pda_oldrsp @@ -213,7 +212,7 @@ ENTRY(xen_sysret32) /* We're already on the usermode stack at this point, but still with the kernel gs, so we can easily switch back */ movq %rsp, %gs:pda_oldrsp - movq %gs:pda_kernelstack, %rsp + movq PER_CPU_VAR(kernel_stack), %rsp pushq $__USER32_DS pushq %gs:pda_oldrsp -- cgit v1.2.3 From 3d1e42a7cf945e289d6ba26159aa0e2b0645401b Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:58 +0900 Subject: x86-64: Move oldrsp from PDA to per-cpu. tj: * in asm-offsets_64.c, pda.h inclusion shouldn't be removed as pda is still referenced in the file * s/oldrsp/old_rsp/ Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/pda.h | 2 +- arch/x86/kernel/asm-offsets_64.c | 1 - arch/x86/kernel/entry_64.S | 10 +++++----- arch/x86/kernel/process_64.c | 8 +++++--- arch/x86/xen/xen-asm_64.S | 8 ++++---- 5 files changed, 15 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 4d28ffba6e1..ae23deb9955 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -14,7 +14,7 @@ struct x8664_pda { unsigned long unused1; unsigned long unused2; unsigned long unused3; - unsigned long oldrsp; /* 24 user rsp for system call */ + unsigned long unused4; int irqcount; /* 32 Irq nesting counter. Starts -1 */ unsigned int unused6; /* 36 was cpunumber */ #ifdef CONFIG_CC_STACKPROTECTOR diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index cafff5f4a03..afda6deb851 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -49,7 +49,6 @@ int main(void) BLANK(); #undef ENTRY #define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry)) - ENTRY(oldrsp); ENTRY(irqcount); DEFINE(pda_size, sizeof(struct x8664_pda)); BLANK(); diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 0dd45859a7a..7c27da407da 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -210,7 +210,7 @@ ENTRY(native_usergs_sysret64) /* %rsp:at FRAMEEND */ .macro FIXUP_TOP_OF_STACK tmp offset=0 - movq %gs:pda_oldrsp,\tmp + movq PER_CPU_VAR(old_rsp),\tmp movq \tmp,RSP+\offset(%rsp) movq $__USER_DS,SS+\offset(%rsp) movq $__USER_CS,CS+\offset(%rsp) @@ -221,7 +221,7 @@ ENTRY(native_usergs_sysret64) .macro RESTORE_TOP_OF_STACK tmp offset=0 movq RSP+\offset(%rsp),\tmp - movq \tmp,%gs:pda_oldrsp + movq \tmp,PER_CPU_VAR(old_rsp) movq EFLAGS+\offset(%rsp),\tmp movq \tmp,R11+\offset(%rsp) .endm @@ -479,7 +479,7 @@ ENTRY(system_call) */ ENTRY(system_call_after_swapgs) - movq %rsp,%gs:pda_oldrsp + movq %rsp,PER_CPU_VAR(old_rsp) movq PER_CPU_VAR(kernel_stack),%rsp /* * No need to follow this irqs off/on section - it's straight @@ -523,7 +523,7 @@ sysret_check: CFI_REGISTER rip,rcx RESTORE_ARGS 0,-ARG_SKIP,1 /*CFI_REGISTER rflags,r11*/ - movq %gs:pda_oldrsp, %rsp + movq PER_CPU_VAR(old_rsp), %rsp USERGS_SYSRET64 CFI_RESTORE_STATE @@ -833,7 +833,7 @@ common_interrupt: XCPT_FRAME addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ interrupt do_IRQ - /* 0(%rsp): oldrsp-ARGOFFSET */ + /* 0(%rsp): old_rsp-ARGOFFSET */ ret_from_intr: DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6c5f5760210..48012891892 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -60,6 +60,8 @@ asmlinkage extern void ret_from_fork(void); DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); +DEFINE_PER_CPU(unsigned long, old_rsp); + unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; static ATOMIC_NOTIFIER_HEAD(idle_notifier); @@ -395,7 +397,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) load_gs_index(0); regs->ip = new_ip; regs->sp = new_sp; - write_pda(oldrsp, new_sp); + percpu_write(old_rsp, new_sp); regs->cs = __USER_CS; regs->ss = __USER_DS; regs->flags = 0x200; @@ -616,8 +618,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Switch the PDA and FPU contexts. */ - prev->usersp = read_pda(oldrsp); - write_pda(oldrsp, next->usersp); + prev->usersp = percpu_read(old_rsp); + percpu_write(old_rsp, next->usersp); percpu_write(current_task, next_p); percpu_write(kernel_stack, diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 5a23e899367..d6fc51f4ce8 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -194,11 +194,11 @@ RELOC(xen_sysexit, 1b+1) ENTRY(xen_sysret64) /* We're already on the usermode stack at this point, but still with the kernel gs, so we can easily switch back */ - movq %rsp, %gs:pda_oldrsp + movq %rsp, PER_CPU_VAR(old_rsp) movq PER_CPU_VAR(kernel_stack),%rsp pushq $__USER_DS - pushq %gs:pda_oldrsp + pushq PER_CPU_VAR(old_rsp) pushq %r11 pushq $__USER_CS pushq %rcx @@ -211,11 +211,11 @@ RELOC(xen_sysret64, 1b+1) ENTRY(xen_sysret32) /* We're already on the usermode stack at this point, but still with the kernel gs, so we can easily switch back */ - movq %rsp, %gs:pda_oldrsp + movq %rsp, PER_CPU_VAR(old_rsp) movq PER_CPU_VAR(kernel_stack), %rsp pushq $__USER32_DS - pushq %gs:pda_oldrsp + pushq PER_CPU_VAR(old_rsp) pushq %r11 pushq $__USER32_CS pushq %rcx -- cgit v1.2.3 From 5689553076c4a67b83426b076082c63085b7567a Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:58 +0900 Subject: x86-64: Move irqcount from PDA to per-cpu. tj: s/irqcount/irq_count/ Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/pda.h | 2 +- arch/x86/kernel/asm-offsets_64.c | 1 - arch/x86/kernel/cpu/common.c | 4 ++-- arch/x86/kernel/entry_64.S | 14 +++++++------- 4 files changed, 10 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index ae23deb9955..4527d70314d 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -15,7 +15,7 @@ struct x8664_pda { unsigned long unused2; unsigned long unused3; unsigned long unused4; - int irqcount; /* 32 Irq nesting counter. Starts -1 */ + int unused5; unsigned int unused6; /* 36 was cpunumber */ #ifdef CONFIG_CC_STACKPROTECTOR unsigned long stack_canary; /* 40 stack canary value */ diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index afda6deb851..64c834a39aa 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -49,7 +49,6 @@ int main(void) BLANK(); #undef ENTRY #define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry)) - ENTRY(irqcount); DEFINE(pda_size, sizeof(struct x8664_pda)); BLANK(); #undef ENTRY diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 06b6290088f..e2323ecce1d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -893,6 +893,8 @@ DEFINE_PER_CPU(unsigned long, kernel_stack) = (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; EXPORT_PER_CPU_SYMBOL(kernel_stack); +DEFINE_PER_CPU(unsigned int, irq_count) = -1; + void __cpuinit pda_init(int cpu) { struct x8664_pda *pda = cpu_pda(cpu); @@ -903,8 +905,6 @@ void __cpuinit pda_init(int cpu) load_pda_offset(cpu); - pda->irqcount = -1; - if (cpu != 0) { if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) pda->nodenumber = cpu_to_node(cpu); diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 7c27da407da..c52b6091916 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -337,12 +337,12 @@ ENTRY(save_args) je 1f SWAPGS /* - * irqcount is used to check if a CPU is already on an interrupt stack + * irq_count is used to check if a CPU is already on an interrupt stack * or not. While this is essentially redundant with preempt_count it is * a little cheaper to use a separate counter in the PDA (short of * moving irq_enter into assembly, which would be too much work) */ -1: incl %gs:pda_irqcount +1: incl PER_CPU_VAR(irq_count) jne 2f popq_cfi %rax /* move return address... */ mov PER_CPU_VAR(irq_stack_ptr),%rsp @@ -837,7 +837,7 @@ common_interrupt: ret_from_intr: DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF - decl %gs:pda_irqcount + decl PER_CPU_VAR(irq_count) leaveq CFI_DEF_CFA_REGISTER rsp CFI_ADJUST_CFA_OFFSET -8 @@ -1260,14 +1260,14 @@ ENTRY(call_softirq) CFI_REL_OFFSET rbp,0 mov %rsp,%rbp CFI_DEF_CFA_REGISTER rbp - incl %gs:pda_irqcount + incl PER_CPU_VAR(irq_count) cmove PER_CPU_VAR(irq_stack_ptr),%rsp push %rbp # backlink for old unwinder call __do_softirq leaveq CFI_DEF_CFA_REGISTER rsp CFI_ADJUST_CFA_OFFSET -8 - decl %gs:pda_irqcount + decl PER_CPU_VAR(irq_count) ret CFI_ENDPROC END(call_softirq) @@ -1297,7 +1297,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) movq %rdi, %rsp # we don't return, adjust the stack frame CFI_ENDPROC DEFAULT_FRAME -11: incl %gs:pda_irqcount +11: incl PER_CPU_VAR(irq_count) movq %rsp,%rbp CFI_DEF_CFA_REGISTER rbp cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp @@ -1305,7 +1305,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) call xen_evtchn_do_upcall popq %rsp CFI_DEF_CFA_REGISTER rsp - decl %gs:pda_irqcount + decl PER_CPU_VAR(irq_count) jmp error_exit CFI_ENDPROC END(do_hypervisor_callback) -- cgit v1.2.3 From e7a22c1ebcc1caa8178df1819d05128bb5b45ab9 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:59 +0900 Subject: x86-64: Move nodenumber from PDA to per-cpu. tj: * s/nodenumber/node_number/ * removed now unused pda variable from pda_init() Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/pda.h | 1 - arch/x86/include/asm/topology.h | 3 ++- arch/x86/kernel/cpu/common.c | 13 ++++++------- arch/x86/kernel/setup_percpu.c | 4 +++- 4 files changed, 11 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 4527d70314d..b30ef6bddc4 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -22,7 +22,6 @@ struct x8664_pda { /* gcc-ABI: this canary MUST be at offset 40!!! */ #endif - short nodenumber; /* number of current node (32k max) */ short in_bootmem; /* pda lives in bootmem */ short isidle; } ____cacheline_aligned_in_smp; diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 87ca3fd86e8..ffea1fe03a9 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -83,7 +83,8 @@ extern cpumask_t *node_to_cpumask_map; DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); /* Returns the number of the current Node. */ -#define numa_node_id() read_pda(nodenumber) +DECLARE_PER_CPU(int, node_number); +#define numa_node_id() percpu_read(node_number) #ifdef CONFIG_DEBUG_PER_CPU_MAPS extern int cpu_to_node(int cpu); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e2323ecce1d..7976a6a0f65 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -897,18 +897,11 @@ DEFINE_PER_CPU(unsigned int, irq_count) = -1; void __cpuinit pda_init(int cpu) { - struct x8664_pda *pda = cpu_pda(cpu); - /* Setup up data that may be needed in __get_free_pages early */ loadsegment(fs, 0); loadsegment(gs, 0); load_pda_offset(cpu); - - if (cpu != 0) { - if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) - pda->nodenumber = cpu_to_node(cpu); - } } static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks @@ -978,6 +971,12 @@ void __cpuinit cpu_init(void) if (cpu != 0) pda_init(cpu); +#ifdef CONFIG_NUMA + if (cpu != 0 && percpu_read(node_number) == 0 && + cpu_to_node(cpu) != NUMA_NO_NODE) + percpu_write(node_number, cpu_to_node(cpu)); +#endif + me = current; if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 258497f93f4..efbafbbff58 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -53,6 +53,8 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) #define X86_64_NUMA 1 /* (used later) */ +DEFINE_PER_CPU(int, node_number) = 0; +EXPORT_PER_CPU_SYMBOL(node_number); /* * Map cpu index to node index @@ -283,7 +285,7 @@ void __cpuinit numa_set_node(int cpu, int node) per_cpu(x86_cpu_to_node_map, cpu) = node; if (node != NUMA_NO_NODE) - cpu_pda(cpu)->nodenumber = node; + per_cpu(node_number, cpu) = node; } void __cpuinit numa_clear_node(int cpu) -- cgit v1.2.3 From c2558e0eba66b49993e619da66c95a50a97830a3 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:59 +0900 Subject: x86-64: Move isidle from PDA to per-cpu. tj: s/isidle/is_idle/ Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/pda.h | 1 - arch/x86/kernel/process_64.c | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index b30ef6bddc4..c31ca048a90 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -23,7 +23,6 @@ struct x8664_pda { offset 40!!! */ #endif short in_bootmem; /* pda lives in bootmem */ - short isidle; } ____cacheline_aligned_in_smp; DECLARE_PER_CPU(struct x8664_pda, __pda); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 48012891892..4523ff88a69 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -61,6 +61,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); DEFINE_PER_CPU(unsigned long, old_rsp); +static DEFINE_PER_CPU(unsigned char, is_idle); unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; @@ -80,13 +81,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister); void enter_idle(void) { - write_pda(isidle, 1); + percpu_write(is_idle, 1); atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); } static void __exit_idle(void) { - if (test_and_clear_bit_pda(0, isidle) == 0) + if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) return; atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); } -- cgit v1.2.3 From 87b264065880fa696c121dad8498a60524e0f6de Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 00:38:59 +0900 Subject: x86-64: Use absolute displacements for per-cpu accesses. Accessing memory through %gs should not use rip-relative addressing. Adding a P prefix for the argument tells gcc to not add (%rip) to the memory references. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/percpu.h | 26 +++++++++++++------------- arch/x86/include/asm/system.h | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 03aa4b00a1c..165d5272ece 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -39,10 +39,10 @@ #include #ifdef CONFIG_SMP -#define __percpu_seg_str "%%"__stringify(__percpu_seg)":" +#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x #define __my_cpu_offset percpu_read(this_cpu_off) #else -#define __percpu_seg_str +#define __percpu_arg(x) "%" #x #endif /* For arch-specific code, we can use direct single-insn ops (they @@ -58,22 +58,22 @@ do { \ } \ switch (sizeof(var)) { \ case 1: \ - asm(op "b %1,"__percpu_seg_str"%0" \ + asm(op "b %1,"__percpu_arg(0) \ : "+m" (var) \ : "ri" ((T__)val)); \ break; \ case 2: \ - asm(op "w %1,"__percpu_seg_str"%0" \ + asm(op "w %1,"__percpu_arg(0) \ : "+m" (var) \ : "ri" ((T__)val)); \ break; \ case 4: \ - asm(op "l %1,"__percpu_seg_str"%0" \ + asm(op "l %1,"__percpu_arg(0) \ : "+m" (var) \ : "ri" ((T__)val)); \ break; \ case 8: \ - asm(op "q %1,"__percpu_seg_str"%0" \ + asm(op "q %1,"__percpu_arg(0) \ : "+m" (var) \ : "r" ((T__)val)); \ break; \ @@ -86,22 +86,22 @@ do { \ typeof(var) ret__; \ switch (sizeof(var)) { \ case 1: \ - asm(op "b "__percpu_seg_str"%1,%0" \ + asm(op "b "__percpu_arg(1)",%0" \ : "=r" (ret__) \ : "m" (var)); \ break; \ case 2: \ - asm(op "w "__percpu_seg_str"%1,%0" \ + asm(op "w "__percpu_arg(1)",%0" \ : "=r" (ret__) \ : "m" (var)); \ break; \ case 4: \ - asm(op "l "__percpu_seg_str"%1,%0" \ + asm(op "l "__percpu_arg(1)",%0" \ : "=r" (ret__) \ : "m" (var)); \ break; \ case 8: \ - asm(op "q "__percpu_seg_str"%1,%0" \ + asm(op "q "__percpu_arg(1)",%0" \ : "=r" (ret__) \ : "m" (var)); \ break; \ @@ -122,9 +122,9 @@ do { \ #define x86_test_and_clear_bit_percpu(bit, var) \ ({ \ int old__; \ - asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0" \ - : "=r" (old__) \ - : "dIr" (bit), "i" (&per_cpu__##var) : "memory"); \ + asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ + : "=r" (old__), "+m" (per_cpu__##var) \ + : "dIr" (bit)); \ old__; \ }) diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 4399aac680e..d1dc27dba36 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -94,7 +94,7 @@ do { \ "call __switch_to\n\t" \ ".globl thread_return\n" \ "thread_return:\n\t" \ - "movq "__percpu_seg_str"%P[current_task],%%rsi\n\t" \ + "movq "__percpu_arg([current_task])",%%rsi\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ -- cgit v1.2.3 From 5662a2f8e7313f78d6b17ab383f3e4f04971c335 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 18 Jan 2009 19:37:21 +0100 Subject: x86, rdc321x: remove/move leftover files Impact: cleanup Move/remove leftover RDC321 files. Now that it's not a subarch anymore, arch/x86/mach-rdc321x and arch/x86/include/asm/mach-rdc321x/ are not needed. One include file was still in use: rdc321x_defs.h, move that to the generic x86 asm header directory. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mach-rdc321x/gpio.h | 60 ------- arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h | 12 -- arch/x86/include/asm/rdc321x_defs.h | 12 ++ arch/x86/mach-rdc321x/Makefile | 5 - arch/x86/mach-rdc321x/gpio.c | 194 ----------------------- arch/x86/mach-rdc321x/platform.c | 69 -------- 6 files changed, 12 insertions(+), 340 deletions(-) delete mode 100644 arch/x86/include/asm/mach-rdc321x/gpio.h delete mode 100644 arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h create mode 100644 arch/x86/include/asm/rdc321x_defs.h delete mode 100644 arch/x86/mach-rdc321x/Makefile delete mode 100644 arch/x86/mach-rdc321x/gpio.c delete mode 100644 arch/x86/mach-rdc321x/platform.c (limited to 'arch') diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h deleted file mode 100644 index c210ab5788b..00000000000 --- a/arch/x86/include/asm/mach-rdc321x/gpio.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _ASM_X86_MACH_RDC321X_GPIO_H -#define _ASM_X86_MACH_RDC321X_GPIO_H - -#include - -extern int rdc_gpio_get_value(unsigned gpio); -extern void rdc_gpio_set_value(unsigned gpio, int value); -extern int rdc_gpio_direction_input(unsigned gpio); -extern int rdc_gpio_direction_output(unsigned gpio, int value); -extern int rdc_gpio_request(unsigned gpio, const char *label); -extern void rdc_gpio_free(unsigned gpio); -extern void __init rdc321x_gpio_setup(void); - -/* Wrappers for the arch-neutral GPIO API */ - -static inline int gpio_request(unsigned gpio, const char *label) -{ - return rdc_gpio_request(gpio, label); -} - -static inline void gpio_free(unsigned gpio) -{ - might_sleep(); - rdc_gpio_free(gpio); -} - -static inline int gpio_direction_input(unsigned gpio) -{ - return rdc_gpio_direction_input(gpio); -} - -static inline int gpio_direction_output(unsigned gpio, int value) -{ - return rdc_gpio_direction_output(gpio, value); -} - -static inline int gpio_get_value(unsigned gpio) -{ - return rdc_gpio_get_value(gpio); -} - -static inline void gpio_set_value(unsigned gpio, int value) -{ - rdc_gpio_set_value(gpio, value); -} - -static inline int gpio_to_irq(unsigned gpio) -{ - return gpio; -} - -static inline int irq_to_gpio(unsigned irq) -{ - return irq; -} - -/* For cansleep */ -#include - -#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */ diff --git a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h b/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h deleted file mode 100644 index c8e9c8bed3d..00000000000 --- a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h +++ /dev/null @@ -1,12 +0,0 @@ -#define PFX "rdc321x: " - -/* General purpose configuration and data registers */ -#define RDC3210_CFGREG_ADDR 0x0CF8 -#define RDC3210_CFGREG_DATA 0x0CFC - -#define RDC321X_GPIO_CTRL_REG1 0x48 -#define RDC321X_GPIO_CTRL_REG2 0x84 -#define RDC321X_GPIO_DATA_REG1 0x4c -#define RDC321X_GPIO_DATA_REG2 0x88 - -#define RDC321X_MAX_GPIO 58 diff --git a/arch/x86/include/asm/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h new file mode 100644 index 00000000000..c8e9c8bed3d --- /dev/null +++ b/arch/x86/include/asm/rdc321x_defs.h @@ -0,0 +1,12 @@ +#define PFX "rdc321x: " + +/* General purpose configuration and data registers */ +#define RDC3210_CFGREG_ADDR 0x0CF8 +#define RDC3210_CFGREG_DATA 0x0CFC + +#define RDC321X_GPIO_CTRL_REG1 0x48 +#define RDC321X_GPIO_CTRL_REG2 0x84 +#define RDC321X_GPIO_DATA_REG1 0x4c +#define RDC321X_GPIO_DATA_REG2 0x88 + +#define RDC321X_MAX_GPIO 58 diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile deleted file mode 100644 index 8325b4ca431..00000000000 --- a/arch/x86/mach-rdc321x/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the RDC321x specific parts of the kernel -# -obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o - diff --git a/arch/x86/mach-rdc321x/gpio.c b/arch/x86/mach-rdc321x/gpio.c deleted file mode 100644 index 247f33d3a40..00000000000 --- a/arch/x86/mach-rdc321x/gpio.c +++ /dev/null @@ -1,194 +0,0 @@ -/* - * GPIO support for RDC SoC R3210/R8610 - * - * Copyright (C) 2007, Florian Fainelli - * Copyright (C) 2008, Volker Weiss - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - - -#include -#include -#include -#include - -#include -#include - - -/* spin lock to protect our private copy of GPIO data register plus - the access to PCI conf registers. */ -static DEFINE_SPINLOCK(gpio_lock); - -/* copy of GPIO data registers */ -static u32 gpio_data_reg1; -static u32 gpio_data_reg2; - -static u32 gpio_request_data[2]; - - -static inline void rdc321x_conf_write(unsigned addr, u32 value) -{ - outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR); - outl(value, RDC3210_CFGREG_DATA); -} - -static inline void rdc321x_conf_or(unsigned addr, u32 value) -{ - outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR); - value |= inl(RDC3210_CFGREG_DATA); - outl(value, RDC3210_CFGREG_DATA); -} - -static inline u32 rdc321x_conf_read(unsigned addr) -{ - outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR); - - return inl(RDC3210_CFGREG_DATA); -} - -/* configure pin as GPIO */ -static void rdc321x_configure_gpio(unsigned gpio) -{ - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - rdc321x_conf_or(gpio < 32 - ? RDC321X_GPIO_CTRL_REG1 : RDC321X_GPIO_CTRL_REG2, - 1 << (gpio & 0x1f)); - spin_unlock_irqrestore(&gpio_lock, flags); -} - -/* initially setup the 2 copies of the gpio data registers. - This function must be called by the platform setup code. */ -void __init rdc321x_gpio_setup() -{ - /* this might not be, what others (BIOS, bootloader, etc.) - wrote to these registers before, but it's a good guess. Still - better than just using 0xffffffff. */ - - gpio_data_reg1 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG1); - gpio_data_reg2 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG2); -} - -/* determine, if gpio number is valid */ -static inline int rdc321x_is_gpio(unsigned gpio) -{ - return gpio <= RDC321X_MAX_GPIO; -} - -/* request GPIO */ -int rdc_gpio_request(unsigned gpio, const char *label) -{ - unsigned long flags; - - if (!rdc321x_is_gpio(gpio)) - return -EINVAL; - - spin_lock_irqsave(&gpio_lock, flags); - if (gpio_request_data[(gpio & 0x20) ? 1 : 0] & (1 << (gpio & 0x1f))) - goto inuse; - gpio_request_data[(gpio & 0x20) ? 1 : 0] |= (1 << (gpio & 0x1f)); - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -inuse: - spin_unlock_irqrestore(&gpio_lock, flags); - return -EINVAL; -} -EXPORT_SYMBOL(rdc_gpio_request); - -/* release previously-claimed GPIO */ -void rdc_gpio_free(unsigned gpio) -{ - unsigned long flags; - - if (!rdc321x_is_gpio(gpio)) - return; - - spin_lock_irqsave(&gpio_lock, flags); - gpio_request_data[(gpio & 0x20) ? 1 : 0] &= ~(1 << (gpio & 0x1f)); - spin_unlock_irqrestore(&gpio_lock, flags); -} -EXPORT_SYMBOL(rdc_gpio_free); - -/* read GPIO pin */ -int rdc_gpio_get_value(unsigned gpio) -{ - u32 reg; - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - reg = rdc321x_conf_read(gpio < 32 - ? RDC321X_GPIO_DATA_REG1 : RDC321X_GPIO_DATA_REG2); - spin_unlock_irqrestore(&gpio_lock, flags); - - return (1 << (gpio & 0x1f)) & reg ? 1 : 0; -} -EXPORT_SYMBOL(rdc_gpio_get_value); - -/* set GPIO pin to value */ -void rdc_gpio_set_value(unsigned gpio, int value) -{ - unsigned long flags; - u32 reg; - - reg = 1 << (gpio & 0x1f); - if (gpio < 32) { - spin_lock_irqsave(&gpio_lock, flags); - if (value) - gpio_data_reg1 |= reg; - else - gpio_data_reg1 &= ~reg; - rdc321x_conf_write(RDC321X_GPIO_DATA_REG1, gpio_data_reg1); - spin_unlock_irqrestore(&gpio_lock, flags); - } else { - spin_lock_irqsave(&gpio_lock, flags); - if (value) - gpio_data_reg2 |= reg; - else - gpio_data_reg2 &= ~reg; - rdc321x_conf_write(RDC321X_GPIO_DATA_REG2, gpio_data_reg2); - spin_unlock_irqrestore(&gpio_lock, flags); - } -} -EXPORT_SYMBOL(rdc_gpio_set_value); - -/* configure GPIO pin as input */ -int rdc_gpio_direction_input(unsigned gpio) -{ - if (!rdc321x_is_gpio(gpio)) - return -EINVAL; - - rdc321x_configure_gpio(gpio); - - return 0; -} -EXPORT_SYMBOL(rdc_gpio_direction_input); - -/* configure GPIO pin as output and set value */ -int rdc_gpio_direction_output(unsigned gpio, int value) -{ - if (!rdc321x_is_gpio(gpio)) - return -EINVAL; - - gpio_set_value(gpio, value); - rdc321x_configure_gpio(gpio); - - return 0; -} -EXPORT_SYMBOL(rdc_gpio_direction_output); diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c deleted file mode 100644 index 4f4e50c3ad3..00000000000 --- a/arch/x86/mach-rdc321x/platform.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Generic RDC321x platform devices - * - * Copyright (C) 2007 Florian Fainelli - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the - * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - -#include -#include -#include -#include -#include -#include - -#include - -/* LEDS */ -static struct gpio_led default_leds[] = { - { .name = "rdc:dmz", .gpio = 1, }, -}; - -static struct gpio_led_platform_data rdc321x_led_data = { - .num_leds = ARRAY_SIZE(default_leds), - .leds = default_leds, -}; - -static struct platform_device rdc321x_leds = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &rdc321x_led_data, - } -}; - -/* Watchdog */ -static struct platform_device rdc321x_wdt = { - .name = "rdc321x-wdt", - .id = -1, - .num_resources = 0, -}; - -static struct platform_device *rdc321x_devs[] = { - &rdc321x_leds, - &rdc321x_wdt -}; - -static int __init rdc_board_setup(void) -{ - rdc321x_gpio_setup(); - - return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs)); -} - -arch_initcall(rdc_board_setup); -- cgit v1.2.3 From 422e79a8b39d9ac73e410dc3cd099aecea82afd2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 19 Jan 2009 17:06:42 +1100 Subject: x86: Remove never-called arch_setup_msi_irq() Since commit 75c46fa, "x64, x2apic/intr-remap: MSI and MSI-X support for interrupt remapping infrastructure", x86 has had an implementation of arch_setup_msi_irqs(). That implementation does not call arch_setup_msi_irq(), instead it calls setup_irq(). No other x86 code calls arch_setup_msi_irq(). That leaves only arch_setup_msi_irqs() in drivers/pci/msi.c, but that routine is overridden by the x86 version of arch_setup_msi_irqs(). So arch_setup_msi_irq() is dead code, remove it. Signed-off-by: Michael Ellerman Signed-off-by: H. Peter Anvin --- arch/x86/kernel/io_apic.c | 34 ---------------------------------- 1 file changed, 34 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 79b8c0c72d3..157aafa4558 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3462,40 +3462,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) return 0; } -int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc) -{ - unsigned int irq; - int ret; - unsigned int irq_want; - - irq_want = nr_irqs_gsi; - irq = create_irq_nr(irq_want); - if (irq == 0) - return -1; - -#ifdef CONFIG_INTR_REMAP - if (!intr_remapping_enabled) - goto no_ir; - - ret = msi_alloc_irte(dev, irq, 1); - if (ret < 0) - goto error; -no_ir: -#endif - ret = setup_msi_irq(dev, msidesc, irq); - if (ret < 0) { - destroy_irq(irq); - return ret; - } - return 0; - -#ifdef CONFIG_INTR_REMAP -error: - destroy_irq(irq); - return ret; -#endif -} - int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { unsigned int irq; -- cgit v1.2.3 From 28796eaf806502b9bd86cbacf8edbc14c80c14b0 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Sat, 17 Jan 2009 15:11:06 +0000 Subject: ASoC: machine support for Toshiba e740 PDA This patch provides suupport for the wm9705 AC97 codec on the Toshiba e740. Note: The e740 has a hard headphone switch that turns the speaker off and is not software detectable or controlable. Also both headphone and speaker amps share a common output enable. Signed-off-by: Ian Molton Signed-off-by: Mark Brown --- arch/arm/mach-pxa/e740.c | 5 +++++ arch/arm/mach-pxa/include/mach/eseries-gpio.h | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-pxa/e740.c b/arch/arm/mach-pxa/e740.c index 6d48e00f4f0..a6fff782e7a 100644 --- a/arch/arm/mach-pxa/e740.c +++ b/arch/arm/mach-pxa/e740.c @@ -135,6 +135,11 @@ static unsigned long e740_pin_config[] __initdata = { /* IrDA */ GPIO38_GPIO | MFP_LPM_DRIVE_HIGH, + /* Audio power control */ + GPIO16_GPIO, /* AC97 codec AVDD2 supply (analogue power) */ + GPIO40_GPIO, /* Mic amp power */ + GPIO41_GPIO, /* Headphone amp power */ + /* PC Card */ GPIO8_GPIO, /* CD0 */ GPIO44_GPIO, /* CD1 */ diff --git a/arch/arm/mach-pxa/include/mach/eseries-gpio.h b/arch/arm/mach-pxa/include/mach/eseries-gpio.h index 6d6e4d8fa4c..f3e5509820d 100644 --- a/arch/arm/mach-pxa/include/mach/eseries-gpio.h +++ b/arch/arm/mach-pxa/include/mach/eseries-gpio.h @@ -45,6 +45,11 @@ /* e7xx IrDA power control */ #define GPIO_E7XX_IR_OFF 38 +/* e740 audio control GPIOs */ +#define GPIO_E740_WM9705_nAVDD2 16 +#define GPIO_E740_MIC_ON 40 +#define GPIO_E740_AMP_ON 41 + /* e750 audio control GPIOs */ #define GPIO_E750_HP_AMP_OFF 4 #define GPIO_E750_SPK_AMP_OFF 7 -- cgit v1.2.3 From 5cdc5e9e69d4dc3a3630ae1fa666401b2a8dcde6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 19 Jan 2009 20:49:37 +0100 Subject: x86: fully honor "nolapic", fix Impact: build fix Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 48578795583..9ca12af6c87 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1131,7 +1131,9 @@ void __cpuinit setup_local_APIC(void) int i, j; if (disable_apic) { +#ifdef CONFIG_X86_IO_APIC disable_ioapic_setup(); +#endif return; } -- cgit v1.2.3 From c6e50f93db5bd0895ec7c7d1b6f3886c6e1f11b6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jan 2009 12:29:19 +0900 Subject: x86: cleanup stack protector Impact: cleanup Make the following cleanups. * remove duplicate comment from boot_init_stack_canary() which fits better in the other place - cpu_idle(). * move stack_canary offset check from __switch_to() to boot_init_stack_canary(). Signed-off-by: Tejun Heo --- arch/x86/include/asm/pda.h | 2 -- arch/x86/include/asm/stackprotector.h | 13 ++++++------- arch/x86/kernel/process_64.c | 7 ------- 3 files changed, 6 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 5976cd803e9..4a8c9d382c9 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -40,6 +40,4 @@ extern void pda_init(int); #endif -#define refresh_stack_canary() write_pda(stack_canary, current->stack_canary) - #endif /* _ASM_X86_PDA_H */ diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index c7f0d10bae7..2383e5bb475 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -16,13 +16,12 @@ static __always_inline void boot_init_stack_canary(void) u64 tsc; /* - * If we're the non-boot CPU, nothing set the PDA stack - * canary up for us - and if we are the boot CPU we have - * a 0 stack canary. This is a good place for updating - * it, as we wont ever return from this function (so the - * invalid canaries already on the stack wont ever - * trigger). - * + * Build time only check to make sure the stack_canary is at + * offset 40 in the pda; this is a gcc ABI requirement + */ + BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40); + + /* * We both use the random pool and the current TSC as a source * of randomness. The TSC only matters for very early init, * there it already has some randomness on most systems. Later diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index aa89eabf09e..088bc9a0f82 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -638,13 +638,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) percpu_write(kernel_stack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE - KERNEL_STACK_OFFSET); -#ifdef CONFIG_CC_STACKPROTECTOR - /* - * Build time only check to make sure the stack_canary is at - * offset 40 in the pda; this is a gcc ABI requirement - */ - BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40); -#endif /* * Now maybe reload the debug registers and handle I/O bitmaps -- cgit v1.2.3 From b4a8f7a262e79ecb0b39beb1449af524a78887f8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jan 2009 12:29:19 +0900 Subject: x86: conditionalize stack canary handling in hot path Impact: no unnecessary stack canary swapping during context switch There's no point in moving stack_canary around during context switch if it's not enabled. Conditionalize it. Signed-off-by: Tejun Heo --- arch/x86/include/asm/system.h | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 8cadfe9b119..b77bd8bd3cc 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -86,17 +86,28 @@ do { \ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ "r12", "r13", "r14", "r15" +#ifdef CONFIG_CC_STACKPROTECTOR +#define __switch_canary \ + "movq %P[task_canary](%%rsi),%%r8\n\t" \ + "movq %%r8,%%gs:%P[pda_canary]\n\t" +#define __switch_canary_param \ + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) \ + , [pda_canary] "i" (offsetof(struct x8664_pda, stack_canary)) +#else /* CC_STACKPROTECTOR */ +#define __switch_canary +#define __switch_canary_param +#endif /* CC_STACKPROTECTOR */ + /* Save restore flags to clear handle leaking NT */ #define switch_to(prev, next, last) \ - asm volatile(SAVE_CONTEXT \ + asm volatile(SAVE_CONTEXT \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ "call __switch_to\n\t" \ ".globl thread_return\n" \ "thread_return:\n\t" \ "movq "__percpu_arg([current_task])",%%rsi\n\t" \ - "movq %P[task_canary](%%rsi),%%r8\n\t" \ - "movq %%r8,%%gs:%P[pda_canary]\n\t" \ + __switch_canary \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ @@ -108,9 +119,8 @@ do { \ [ti_flags] "i" (offsetof(struct thread_info, flags)), \ [tif_fork] "i" (TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, stack)), \ - [task_canary] "i" (offsetof(struct task_struct, stack_canary)),\ - [current_task] "m" (per_cpu_var(current_task)), \ - [pda_canary] "i" (offsetof(struct x8664_pda, stack_canary))\ + [current_task] "m" (per_cpu_var(current_task)) \ + __switch_canary_param \ : "memory", "cc" __EXTRA_CLOBBER) #endif -- cgit v1.2.3 From 8ce031972b40da58c268caba8c5ea3c0856d7131 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 12:21:27 +0900 Subject: x86: remove pda_init() Impact: cleanup Copy the code to cpu_init() to satisfy the requirement that the cpu be reinitialized. Remove all other calls, since the segments are already initialized in head_64.S. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/pda.h | 1 - arch/x86/kernel/cpu/common.c | 15 +++------------ arch/x86/kernel/head64.c | 2 -- arch/x86/xen/enlighten.c | 1 - 4 files changed, 3 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 4a8c9d382c9..b473e952439 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -24,7 +24,6 @@ struct x8664_pda { } ____cacheline_aligned_in_smp; DECLARE_PER_CPU(struct x8664_pda, __pda); -extern void pda_init(int); #define cpu_pda(cpu) (&per_cpu(__pda, cpu)) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7976a6a0f65..f83a4d6160f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -895,15 +895,6 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack); DEFINE_PER_CPU(unsigned int, irq_count) = -1; -void __cpuinit pda_init(int cpu) -{ - /* Setup up data that may be needed in __get_free_pages early */ - loadsegment(fs, 0); - loadsegment(gs, 0); - - load_pda_offset(cpu); -} - static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) __aligned(PAGE_SIZE); @@ -967,9 +958,9 @@ void __cpuinit cpu_init(void) struct task_struct *me; int i; - /* CPU 0 is initialised in head64.c */ - if (cpu != 0) - pda_init(cpu); + loadsegment(fs, 0); + loadsegment(gs, 0); + load_pda_offset(cpu); #ifdef CONFIG_NUMA if (cpu != 0 && percpu_read(node_number) == 0 && diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index af67d3227ea..f5b27224769 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -91,8 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data) if (console_loglevel == 10) early_printk("Kernel alive\n"); - pda_init(0); - x86_64_start_reservations(real_mode_data); } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 75b94139e1f..bef941f6145 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1645,7 +1645,6 @@ asmlinkage void __init xen_start_kernel(void) #ifdef CONFIG_X86_64 /* Disable until direct per-cpu data access. */ have_vcpu_info_placement = 0; - pda_init(0); #endif xen_smp_init(); -- cgit v1.2.3 From 8c7e58e690ae60ab4215b025f433ed4af261e103 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 12:21:28 +0900 Subject: x86: rework __per_cpu_load adjustments Impact: cleanup Use cpu_number to determine if the adjustment is necessary. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/head_64.S | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index c8ace880661..98ea26a2fca 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -207,19 +207,15 @@ ENTRY(secondary_startup_64) #ifdef CONFIG_SMP /* - * early_gdt_base should point to the gdt_page in static percpu init - * data area. Computing this requires two symbols - __per_cpu_load - * and per_cpu__gdt_page. As linker can't do no such relocation, do - * it by hand. As early_gdt_descr is manipulated by C code for - * secondary CPUs, this should be done only once for the boot CPU - * when early_gdt_descr_base contains zero. + * Fix up static pointers that need __per_cpu_load added. The assembler + * is unable to do this directly. This is only needed for the boot cpu. + * These values are set up with the correct base addresses by C code for + * secondary cpus. */ - movq early_gdt_descr_base(%rip), %rax - testq %rax, %rax - jnz 1f - movq $__per_cpu_load, %rax - addq $per_cpu__gdt_page, %rax - movq %rax, early_gdt_descr_base(%rip) + movq initial_gs(%rip), %rax + cmpl $0, per_cpu__cpu_number(%rax) + jne 1f + addq %rax, early_gdt_descr_base(%rip) 1: #endif /* @@ -431,12 +427,8 @@ NEXT_PAGE(level2_spare_pgt) .globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 -#ifdef CONFIG_SMP early_gdt_descr_base: - .quad 0x0000000000000000 -#else .quad per_cpu__gdt_page -#endif ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ -- cgit v1.2.3 From 947e76cdc34c782fc947313d4331380686eebbad Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 19 Jan 2009 12:21:28 +0900 Subject: x86: move stack_canary into irq_stack Impact: x86_64 percpu area layout change, irq_stack now at the beginning Now that the PDA is empty except for the stack canary, it can be removed. The irqstack is moved to the start of the per-cpu section. If the stack protector is enabled, the canary overlaps the bottom 48 bytes of the irqstack. tj: * updated subject * dropped asm relocation of irq_stack_ptr * updated comments a bit * rebased on top of stack canary changes Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/pda.h | 3 --- arch/x86/include/asm/percpu.h | 6 ------ arch/x86/include/asm/processor.h | 23 ++++++++++++++++++++++- arch/x86/include/asm/stackprotector.h | 6 +++--- arch/x86/include/asm/system.h | 4 ++-- arch/x86/kernel/asm-offsets_64.c | 4 ---- arch/x86/kernel/cpu/common.c | 7 ++++--- arch/x86/kernel/head_64.S | 13 +++++-------- arch/x86/kernel/setup_percpu.c | 34 ++++------------------------------ arch/x86/kernel/vmlinux_64.lds.S | 8 ++++++-- 10 files changed, 46 insertions(+), 62 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index b473e952439..ba46416634f 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h @@ -17,9 +17,6 @@ struct x8664_pda { unsigned long unused4; int unused5; unsigned int unused6; /* 36 was cpunumber */ - unsigned long stack_canary; /* 40 stack canary value */ - /* gcc-ABI: this canary MUST be at - offset 40!!! */ short in_bootmem; /* pda lives in bootmem */ } ____cacheline_aligned_in_smp; diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 165d5272ece..ce980db5e59 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -133,12 +133,6 @@ do { \ /* We can use this directly for local CPU (faster). */ DECLARE_PER_CPU(unsigned long, this_cpu_off); -#ifdef CONFIG_X86_64 -extern void load_pda_offset(int cpu); -#else -static inline void load_pda_offset(int cpu) { } -#endif - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index f511246fa6c..48676b943b9 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -379,8 +379,29 @@ union thread_xstate { #ifdef CONFIG_X86_64 DECLARE_PER_CPU(struct orig_ist, orig_ist); -DECLARE_PER_CPU(char[IRQ_STACK_SIZE], irq_stack); +union irq_stack_union { + char irq_stack[IRQ_STACK_SIZE]; + /* + * GCC hardcodes the stack canary as %gs:40. Since the + * irq_stack is the object at %gs:0, we reserve the bottom + * 48 bytes of the irq stack for the canary. + */ + struct { + char gs_base[40]; + unsigned long stack_canary; + }; +}; + +DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); DECLARE_PER_CPU(char *, irq_stack_ptr); + +static inline void load_gs_base(int cpu) +{ + /* Memory clobbers used to order pda/percpu accesses */ + mb(); + wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); + mb(); +} #endif extern void print_cpu_info(struct cpuinfo_x86 *); diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 2383e5bb475..36a700acaf2 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -2,7 +2,7 @@ #define _ASM_STACKPROTECTOR_H 1 #include -#include +#include /* * Initialize the stackprotector canary value. @@ -19,7 +19,7 @@ static __always_inline void boot_init_stack_canary(void) * Build time only check to make sure the stack_canary is at * offset 40 in the pda; this is a gcc ABI requirement */ - BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40); + BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40); /* * We both use the random pool and the current TSC as a source @@ -32,7 +32,7 @@ static __always_inline void boot_init_stack_canary(void) canary += tsc + (tsc << 32UL); current->stack_canary = canary; - write_pda(stack_canary, canary); + percpu_write(irq_stack_union.stack_canary, canary); } #endif diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index b77bd8bd3cc..52eb748a68a 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -89,10 +89,10 @@ do { \ #ifdef CONFIG_CC_STACKPROTECTOR #define __switch_canary \ "movq %P[task_canary](%%rsi),%%r8\n\t" \ - "movq %%r8,%%gs:%P[pda_canary]\n\t" + "movq %%r8,%%gs:%P[gs_canary]\n\t" #define __switch_canary_param \ , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) \ - , [pda_canary] "i" (offsetof(struct x8664_pda, stack_canary)) + , [gs_canary] "i" (offsetof(union irq_stack_union, stack_canary)) #else /* CC_STACKPROTECTOR */ #define __switch_canary #define __switch_canary_param diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 64c834a39aa..94f9c8b39d2 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -48,10 +48,6 @@ int main(void) #endif BLANK(); #undef ENTRY -#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry)) - DEFINE(pda_size, sizeof(struct x8664_pda)); - BLANK(); -#undef ENTRY #ifdef CONFIG_PARAVIRT BLANK(); OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f83a4d6160f..098934e72a1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -881,12 +881,13 @@ __setup("clearcpuid=", setup_disablecpuid); #ifdef CONFIG_X86_64 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; -DEFINE_PER_CPU_PAGE_ALIGNED(char[IRQ_STACK_SIZE], irq_stack); +DEFINE_PER_CPU_FIRST(union irq_stack_union, + irq_stack_union) __aligned(PAGE_SIZE); #ifdef CONFIG_SMP DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */ #else DEFINE_PER_CPU(char *, irq_stack_ptr) = - per_cpu_var(irq_stack) + IRQ_STACK_SIZE - 64; + per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; #endif DEFINE_PER_CPU(unsigned long, kernel_stack) = @@ -960,7 +961,7 @@ void __cpuinit cpu_init(void) loadsegment(fs, 0); loadsegment(gs, 0); - load_pda_offset(cpu); + load_gs_base(cpu); #ifdef CONFIG_NUMA if (cpu != 0 && percpu_read(node_number) == 0 && diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 98ea26a2fca..a0a2b5ca9b7 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -242,13 +242,10 @@ ENTRY(secondary_startup_64) /* Set up %gs. * - * On SMP, %gs should point to the per-cpu area. For initial - * boot, make %gs point to the init data section. For a - * secondary CPU,initial_gs should be set to its pda address - * before the CPU runs this code. - * - * On UP, initial_gs points to PER_CPU_VAR(__pda) and doesn't - * change. + * The base of %gs always points to the bottom of the irqstack + * union. If the stack protector canary is enabled, it is + * located at %gs:40. Note that, on SMP, the boot cpu uses + * init data section till per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx movq initial_gs(%rip),%rax @@ -281,7 +278,7 @@ ENTRY(secondary_startup_64) #ifdef CONFIG_SMP .quad __per_cpu_load #else - .quad PER_CPU_VAR(__pda) + .quad PER_CPU_VAR(irq_stack_union) #endif __FINITDATA diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index efbafbbff58..90b8e154bb5 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -77,30 +77,6 @@ static void __init setup_node_to_cpumask_map(void); static inline void setup_node_to_cpumask_map(void) { } #endif -/* - * Define load_pda_offset() and per-cpu __pda for x86_64. - * load_pda_offset() is responsible for loading the offset of pda into - * %gs. - * - * On SMP, pda offset also duals as percpu base address and thus it - * should be at the start of per-cpu area. To achieve this, it's - * preallocated in vmlinux_64.lds.S directly instead of using - * DEFINE_PER_CPU(). - */ -#ifdef CONFIG_X86_64 -void __cpuinit load_pda_offset(int cpu) -{ - /* Memory clobbers used to order pda/percpu accesses */ - mb(); - wrmsrl(MSR_GS_BASE, cpu_pda(cpu)); - mb(); -} -#ifndef CONFIG_SMP -DEFINE_PER_CPU(struct x8664_pda, __pda); -#endif -EXPORT_PER_CPU_SYMBOL(__pda); -#endif /* CONFIG_SMP && CONFIG_X86_64 */ - #ifdef CONFIG_X86_64 /* correctly size the local cpu masks */ @@ -207,15 +183,13 @@ void __init setup_per_cpu_areas(void) per_cpu(cpu_number, cpu) = cpu; #ifdef CONFIG_X86_64 per_cpu(irq_stack_ptr, cpu) = - (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64; + per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; /* - * CPU0 modified pda in the init data area, reload pda - * offset for CPU0 and clear the area for others. + * Up to this point, CPU0 has been using .data.init + * area. Reload %gs offset for CPU0. */ if (cpu == 0) - load_pda_offset(0); - else - memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); + load_gs_base(cpu); #endif DBG("PERCPU: cpu %4d %p\n", cpu, ptr); diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index a09abb8fb97..c9740996430 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -220,8 +220,7 @@ SECTIONS * so that it can be accessed as a percpu variable. */ . = ALIGN(PAGE_SIZE); - PERCPU_VADDR_PREALLOC(0, :percpu, pda_size) - per_cpu____pda = __per_cpu_start; + PERCPU_VADDR(0, :percpu) #else PERCPU(PAGE_SIZE) #endif @@ -262,3 +261,8 @@ SECTIONS */ ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE") + +#ifdef CONFIG_SMP +ASSERT((per_cpu__irq_stack_union == 0), + "irq_stack_union is not at start of per-cpu area"); +#endif -- cgit v1.2.3 From 0d974d4592708f85044751817da4b7016e1b0602 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Sun, 18 Jan 2009 19:52:25 -0500 Subject: x86: remove pda.h Impact: cleanup Signed-off-by: Brian Gerst --- arch/x86/include/asm/pda.h | 39 --------------------------------------- arch/x86/include/asm/pgtable_64.h | 1 - arch/x86/include/asm/smp.h | 1 - arch/x86/kernel/asm-offsets_64.c | 1 - arch/x86/kernel/cpu/common.c | 1 - arch/x86/kernel/process_64.c | 1 - arch/x86/kernel/traps.c | 1 - 7 files changed, 45 deletions(-) delete mode 100644 arch/x86/include/asm/pda.h (limited to 'arch') diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h deleted file mode 100644 index ba46416634f..00000000000 --- a/arch/x86/include/asm/pda.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef _ASM_X86_PDA_H -#define _ASM_X86_PDA_H - -#ifndef __ASSEMBLY__ -#include -#include -#include -#include -#include -#include - -/* Per processor datastructure. %gs points to it while the kernel runs */ -struct x8664_pda { - unsigned long unused1; - unsigned long unused2; - unsigned long unused3; - unsigned long unused4; - int unused5; - unsigned int unused6; /* 36 was cpunumber */ - short in_bootmem; /* pda lives in bootmem */ -} ____cacheline_aligned_in_smp; - -DECLARE_PER_CPU(struct x8664_pda, __pda); - -#define cpu_pda(cpu) (&per_cpu(__pda, cpu)) - -#define read_pda(field) percpu_read(__pda.field) -#define write_pda(field, val) percpu_write(__pda.field, val) -#define add_pda(field, val) percpu_add(__pda.field, val) -#define sub_pda(field, val) percpu_sub(__pda.field, val) -#define or_pda(field, val) percpu_or(__pda.field, val) - -/* This is not atomic against other CPUs -- CPU preemption needs to be off */ -#define test_and_clear_bit_pda(bit, field) \ - x86_test_and_clear_bit_percpu(bit, __pda.field) - -#endif - -#endif /* _ASM_X86_PDA_H */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index ba09289acca..1df9637dfda 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -11,7 +11,6 @@ #include #include #include -#include extern pud_t level3_kernel_pgt[512]; extern pud_t level3_ident_pgt[512]; diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 68636e767a9..45ef8a1b9d7 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -15,7 +15,6 @@ # include # endif #endif -#include #include #include diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 94f9c8b39d2..8793ab33e2c 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 098934e72a1..3887fcf6e51 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -30,7 +30,6 @@ #include #endif -#include #include #include #include diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 088bc9a0f82..c422eebb0c5 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 98c2d055284..ed5aee5f3fc 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -59,7 +59,6 @@ #ifdef CONFIG_X86_64 #include #include -#include #else #include #include -- cgit v1.2.3 From 5766b842b23c6b40935a5f3bd435b2bcdaff2143 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 20 Jan 2009 09:13:15 +0100 Subject: x86, cpumask: fix tlb flush race Impact: fix bootup crash The cpumask is now passed in as a reference to mm->cpu_vm_mask, not on the stack - hence it is not constant anymore during the TLB flush. That way it could race and some static sanity checks would trigger: [ 238.154287] ------------[ cut here ]------------ [ 238.156039] kernel BUG at arch/x86/kernel/tlb_32.c:130! [ 238.156039] invalid opcode: 0000 [#1] SMP [ 238.156039] last sysfs file: /sys/class/net/eth2/address [ 238.156039] Modules linked in: [ 238.156039] [ 238.156039] Pid: 6493, comm: ifup-eth Not tainted (2.6.29-rc2-tip #1) P4DC6 [ 238.156039] EIP: 0060:[] EFLAGS: 00010202 CPU: 2 [ 238.156039] EIP is at native_flush_tlb_others+0x35/0x158 [ 238.156039] EAX: c0ef972c EBX: f6143301 ECX: 00000000 EDX: 00000000 [ 238.156039] ESI: f61433a8 EDI: f6143200 EBP: f34f3e00 ESP: f34f3df0 [ 238.156039] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068 [ 238.156039] Process ifup-eth (pid: 6493, ti=f34f2000 task=f399ab00 task.ti=f34f2000) [ 238.156039] Stack: [ 238.156039] ffffffff f61433a8 ffffffff f6143200 f34f3e18 c0118e9c 00000000 f6143200 [ 238.156039] f61433a8 f5bec738 f34f3e28 c0119435 c2b5b830 f6143200 f34f3e34 c01c2dc3 [ 238.156039] bffd9000 f34f3e60 c01c3051 00000000 ffffffff f34f3e4c 00000000 00000071 [ 238.156039] Call Trace: [ 238.156039] [] ? flush_tlb_others+0x52/0x5b [ 238.156039] [] ? flush_tlb_mm+0x7f/0x8b [ 238.156039] [] ? tlb_finish_mmu+0x2d/0x55 [ 238.156039] [] ? exit_mmap+0x124/0x170 [ 238.156039] [] ? mmput+0x40/0xf5 [ 238.156039] [] ? flush_old_exec+0x640/0x94b [ 238.156039] [] ? fsnotify_access+0x37/0x39 [ 238.156039] [] ? kernel_read+0x39/0x4b [ 238.156039] [] ? load_elf_binary+0x4a1/0x11bb [ 238.156039] [] ? might_fault+0x51/0x9c [ 238.156039] [] ? paravirt_read_tsc+0x20/0x4f [ 238.156039] [] ? native_sched_clock+0x5d/0x60 [ 238.156039] [] ? search_binary_handler+0xab/0x2c4 [ 238.156039] [] ? load_elf_binary+0x0/0x11bb [ 238.156039] [] ? _raw_read_unlock+0x21/0x46 [ 238.156039] [] ? load_elf_binary+0x0/0x11bb [ 238.156039] [] ? search_binary_handler+0xb2/0x2c4 [ 238.156039] [] ? do_execve+0x21c/0x2ee [ 238.156039] [] ? sys_execve+0x51/0x8c [ 238.156039] [] ? sysenter_do_call+0x12/0x43 Fix it by not assuming that the cpumask is constant. Signed-off-by: Ingo Molnar --- arch/x86/kernel/tlb_32.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index ec53818f4e3..d37bbfcb813 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -125,9 +125,8 @@ void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va) { /* - * - mask must exist :) + * mm must exist :) */ - BUG_ON(cpumask_empty(cpumask)); BUG_ON(!mm); /* @@ -138,14 +137,18 @@ void native_flush_tlb_others(const struct cpumask *cpumask, spin_lock(&tlbstate_lock); cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id())); -#ifdef CONFIG_HOTPLUG_CPU - /* If a CPU which we ran on has gone down, OK. */ cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask); + + /* + * If a task whose mm mask we are looking at has descheduled and + * has cleared its presence from the mask, or if a CPU which we ran + * on has gone down then there might be no flush work left: + */ if (unlikely(cpumask_empty(flush_cpumask))) { spin_unlock(&tlbstate_lock); return; } -#endif + flush_mm = mm; flush_va = va; -- cgit v1.2.3 From 92181f190b649f7ef2b79cbf5c00f26ccc66da2a Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 20 Jan 2009 04:24:26 +0100 Subject: x86: optimise x86's do_page_fault (C entry point for the page fault path) Impact: cleanup, restructure code to improve assembly gcc isn't _all_ that smart about spilling registers to stack or reusing stack slots, even with branch annotations. do_page_fault contained a lot of functionality, so split unlikely paths into their own functions, and mark them as noinline just to be sure. I consider this actually to be somewhat of a cleanup too: the main function now contains about half the number of lines so the normal path is easier to read, while the error cases are also nicely split away. Also, ensure the order of arguments to functions is always the same: regs, addr, error_code. This can reduce code size a tiny bit, and just looks neater too. And add a couple of branch annotations. Before: do_page_fault: subq $360, %rsp #, After: do_page_fault: subq $56, %rsp #, bloat-o-meter: add/remove: 8/0 grow/shrink: 0/1 up/down: 2222/-1680 (542) function old new delta __bad_area_nosemaphore - 506 +506 no_context - 474 +474 vmalloc_fault - 424 +424 spurious_fault - 358 +358 mm_fault_error - 272 +272 bad_area_access_error - 89 +89 bad_area - 89 +89 bad_area_nosemaphore - 10 +10 do_page_fault 2464 784 -1680 Yes, the total size increases by 542 bytes, due to the extra function calls. But these will very rarely be called (except for vmalloc_fault) in a normal workload. Importantly, do_page_fault is less than 1/3rd it's original size, and touches far less stack. Existing gotos and branch hints did move a lot of the infrequently used text out of the fastpath, but that's even further improved after this patch. Signed-off-by: Nick Piggin Acked-by: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 438 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 256 insertions(+), 182 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 90dfae511a4..033292dc9e2 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -91,8 +91,8 @@ static inline int notify_page_fault(struct pt_regs *regs) * * Opcode checker based on code by Richard Brunner */ -static int is_prefetch(struct pt_regs *regs, unsigned long addr, - unsigned long error_code) +static int is_prefetch(struct pt_regs *regs, unsigned long error_code, + unsigned long addr) { unsigned char *instr; int scan_more = 1; @@ -409,15 +409,15 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, } #ifdef CONFIG_X86_64 -static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, - unsigned long error_code) +static noinline void pgtable_bad(struct pt_regs *regs, + unsigned long error_code, unsigned long address) { unsigned long flags = oops_begin(); int sig = SIGKILL; - struct task_struct *tsk; + struct task_struct *tsk = current; printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", - current->comm, address); + tsk->comm, address); dump_pagetable(address); tsk = current; tsk->thread.cr2 = address; @@ -429,6 +429,190 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, } #endif +static noinline void no_context(struct pt_regs *regs, + unsigned long error_code, unsigned long address) +{ + struct task_struct *tsk = current; +#ifdef CONFIG_X86_64 + unsigned long flags; + int sig; +#endif + + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + + /* + * X86_32 + * Valid to do another page fault here, because if this fault + * had been triggered by is_prefetch fixup_exception would have + * handled it. + * + * X86_64 + * Hall of shame of CPU/BIOS bugs. + */ + if (is_prefetch(regs, error_code, address)) + return; + + if (is_errata93(regs, address)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ +#ifdef CONFIG_X86_32 + bust_spinlocks(1); +#else + flags = oops_begin(); +#endif + + show_fault_oops(regs, error_code, address); + + tsk->thread.cr2 = address; + tsk->thread.trap_no = 14; + tsk->thread.error_code = error_code; + +#ifdef CONFIG_X86_32 + die("Oops", regs, error_code); + bust_spinlocks(0); + do_exit(SIGKILL); +#else + sig = SIGKILL; + if (__die("Oops", regs, error_code)) + sig = 0; + /* Executive summary in case the body of the oops scrolled away */ + printk(KERN_EMERG "CR2: %016lx\n", address); + oops_end(flags, regs, sig); +#endif +} + +static void __bad_area_nosemaphore(struct pt_regs *regs, + unsigned long error_code, unsigned long address, + int si_code) +{ + struct task_struct *tsk = current; + + /* User mode accesses just cause a SIGSEGV */ + if (error_code & PF_USER) { + /* + * It's possible to have interrupts off here. + */ + local_irq_enable(); + + /* + * Valid to do another page fault here because this one came + * from user space. + */ + if (is_prefetch(regs, error_code, address)) + return; + + if (is_errata100(regs, address)) + return; + + if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && + printk_ratelimit()) { + printk( + "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", + task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, + tsk->comm, task_pid_nr(tsk), address, + (void *) regs->ip, (void *) regs->sp, error_code); + print_vma_addr(" in ", regs->ip); + printk("\n"); + } + + tsk->thread.cr2 = address; + /* Kernel addresses are always protection faults */ + tsk->thread.error_code = error_code | (address >= TASK_SIZE); + tsk->thread.trap_no = 14; + force_sig_info_fault(SIGSEGV, si_code, address, tsk); + return; + } + + if (is_f00f_bug(regs, address)) + return; + + no_context(regs, error_code, address); +} + +static noinline void bad_area_nosemaphore(struct pt_regs *regs, + unsigned long error_code, unsigned long address) +{ + __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); +} + +static void __bad_area(struct pt_regs *regs, + unsigned long error_code, unsigned long address, + int si_code) +{ + struct mm_struct *mm = current->mm; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ + up_read(&mm->mmap_sem); + + __bad_area_nosemaphore(regs, error_code, address, si_code); +} + +static noinline void bad_area(struct pt_regs *regs, + unsigned long error_code, unsigned long address) +{ + __bad_area(regs, error_code, address, SEGV_MAPERR); +} + +static noinline void bad_area_access_error(struct pt_regs *regs, + unsigned long error_code, unsigned long address) +{ + __bad_area(regs, error_code, address, SEGV_ACCERR); +} + +/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ +static void out_of_memory(struct pt_regs *regs, + unsigned long error_code, unsigned long address) +{ + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + up_read(¤t->mm->mmap_sem); + pagefault_out_of_memory(); +} + +static void do_sigbus(struct pt_regs *regs, + unsigned long error_code, unsigned long address) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + up_read(&mm->mmap_sem); + + /* Kernel mode? Handle exceptions or die */ + if (!(error_code & PF_USER)) + no_context(regs, error_code, address); +#ifdef CONFIG_X86_32 + /* User space => ok to do another page fault */ + if (is_prefetch(regs, error_code, address)) + return; +#endif + tsk->thread.cr2 = address; + tsk->thread.error_code = error_code; + tsk->thread.trap_no = 14; + force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); +} + +static noinline void mm_fault_error(struct pt_regs *regs, + unsigned long error_code, unsigned long address, unsigned int fault) +{ + if (fault & VM_FAULT_OOM) + out_of_memory(regs, error_code, address); + else if (fault & VM_FAULT_SIGBUS) + do_sigbus(regs, error_code, address); + else + BUG(); +} + static int spurious_fault_check(unsigned long error_code, pte_t *pte) { if ((error_code & PF_WRITE) && !pte_write(*pte)) @@ -448,8 +632,8 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) * There are no security implications to leaving a stale TLB when * increasing the permissions on a page. */ -static int spurious_fault(unsigned long address, - unsigned long error_code) +static noinline int spurious_fault(unsigned long error_code, + unsigned long address) { pgd_t *pgd; pud_t *pud; @@ -494,7 +678,7 @@ static int spurious_fault(unsigned long address, * * This assumes no large pages in there. */ -static int vmalloc_fault(unsigned long address) +static noinline int vmalloc_fault(unsigned long address) { #ifdef CONFIG_X86_32 unsigned long pgd_paddr; @@ -573,6 +757,25 @@ static int vmalloc_fault(unsigned long address) int show_unhandled_signals = 1; +static inline int access_error(unsigned long error_code, int write, + struct vm_area_struct *vma) +{ + if (write) { + /* write, present and write, not present */ + if (unlikely(!(vma->vm_flags & VM_WRITE))) + return 1; + } else if (unlikely(error_code & PF_PROT)) { + /* read, present */ + return 1; + } else { + /* read, not present */ + if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) + return 1; + } + + return 0; +} + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -583,16 +786,12 @@ asmlinkage #endif void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) { + unsigned long address; struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; - unsigned long address; - int write, si_code; + int write; int fault; -#ifdef CONFIG_X86_64 - unsigned long flags; - int sig; -#endif tsk = current; mm = tsk->mm; @@ -601,9 +800,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) /* get the address */ address = read_cr2(); - si_code = SEGV_MAPERR; - - if (notify_page_fault(regs)) + if (unlikely(notify_page_fault(regs))) return; if (unlikely(kmmio_fault(regs, address))) return; @@ -631,17 +828,17 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) return; /* Can handle a stale RO->RW TLB */ - if (spurious_fault(address, error_code)) + if (spurious_fault(error_code, address)) return; /* * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock. */ - goto bad_area_nosemaphore; + bad_area_nosemaphore(regs, error_code, address); + return; } - /* * It's safe to allow irq's after cr2 has been saved and the * vmalloc fault has been handled. @@ -657,15 +854,17 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) #ifdef CONFIG_X86_64 if (unlikely(error_code & PF_RSVD)) - pgtable_bad(address, regs, error_code); + pgtable_bad(regs, error_code, address); #endif /* * If we're in an interrupt, have no user context or are running in an * atomic region then we must not take the fault. */ - if (unlikely(in_atomic() || !mm)) - goto bad_area_nosemaphore; + if (unlikely(in_atomic() || !mm)) { + bad_area_nosemaphore(regs, error_code, address); + return; + } /* * When running in the kernel we expect faults to occur only to @@ -683,20 +882,26 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ - if (!down_read_trylock(&mm->mmap_sem)) { + if (unlikely(!down_read_trylock(&mm->mmap_sem))) { if ((error_code & PF_USER) == 0 && - !search_exception_tables(regs->ip)) - goto bad_area_nosemaphore; + !search_exception_tables(regs->ip)) { + bad_area_nosemaphore(regs, error_code, address); + return; + } down_read(&mm->mmap_sem); } vma = find_vma(mm, address); - if (!vma) - goto bad_area; - if (vma->vm_start <= address) + if (unlikely(!vma)) { + bad_area(regs, error_code, address); + return; + } + if (likely(vma->vm_start <= address)) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { + bad_area(regs, error_code, address); + return; + } if (error_code & PF_USER) { /* * Accessing the stack below %sp is always a bug. @@ -704,31 +909,25 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) * and pusha to work. ("enter $65535,$31" pushes * 32 pointers and then decrements %sp by 65535.) */ - if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp) - goto bad_area; + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { + bad_area(regs, error_code, address); + return; + } } - if (expand_stack(vma, address)) - goto bad_area; -/* - * Ok, we have a good vm_area for this memory access, so - * we can handle it.. - */ + if (unlikely(expand_stack(vma, address))) { + bad_area(regs, error_code, address); + return; + } + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ good_area: - si_code = SEGV_ACCERR; - write = 0; - switch (error_code & (PF_PROT|PF_WRITE)) { - default: /* 3: write, present */ - /* fall through */ - case PF_WRITE: /* write, not present */ - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - write++; - break; - case PF_PROT: /* read, present */ - goto bad_area; - case 0: /* read, not present */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) - goto bad_area; + write = error_code & PF_WRITE; + if (unlikely(access_error(error_code, write, vma))) { + bad_area_access_error(regs, error_code, address); + return; } /* @@ -738,11 +937,8 @@ good_area: */ fault = handle_mm_fault(mm, vma, address, write); if (unlikely(fault & VM_FAULT_ERROR)) { - if (fault & VM_FAULT_OOM) - goto out_of_memory; - else if (fault & VM_FAULT_SIGBUS) - goto do_sigbus; - BUG(); + mm_fault_error(regs, error_code, address, fault); + return; } if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; @@ -760,128 +956,6 @@ good_area: } #endif up_read(&mm->mmap_sem); - return; - -/* - * Something tried to access memory that isn't in our memory map.. - * Fix it, but check if it's kernel or user first.. - */ -bad_area: - up_read(&mm->mmap_sem); - -bad_area_nosemaphore: - /* User mode accesses just cause a SIGSEGV */ - if (error_code & PF_USER) { - /* - * It's possible to have interrupts off here. - */ - local_irq_enable(); - - /* - * Valid to do another page fault here because this one came - * from user space. - */ - if (is_prefetch(regs, address, error_code)) - return; - - if (is_errata100(regs, address)) - return; - - if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && - printk_ratelimit()) { - printk( - "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", - task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, - tsk->comm, task_pid_nr(tsk), address, - (void *) regs->ip, (void *) regs->sp, error_code); - print_vma_addr(" in ", regs->ip); - printk("\n"); - } - - tsk->thread.cr2 = address; - /* Kernel addresses are always protection faults */ - tsk->thread.error_code = error_code | (address >= TASK_SIZE); - tsk->thread.trap_no = 14; - force_sig_info_fault(SIGSEGV, si_code, address, tsk); - return; - } - - if (is_f00f_bug(regs, address)) - return; - -no_context: - /* Are we prepared to handle this kernel fault? */ - if (fixup_exception(regs)) - return; - - /* - * X86_32 - * Valid to do another page fault here, because if this fault - * had been triggered by is_prefetch fixup_exception would have - * handled it. - * - * X86_64 - * Hall of shame of CPU/BIOS bugs. - */ - if (is_prefetch(regs, address, error_code)) - return; - - if (is_errata93(regs, address)) - return; - -/* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. - */ -#ifdef CONFIG_X86_32 - bust_spinlocks(1); -#else - flags = oops_begin(); -#endif - - show_fault_oops(regs, error_code, address); - - tsk->thread.cr2 = address; - tsk->thread.trap_no = 14; - tsk->thread.error_code = error_code; - -#ifdef CONFIG_X86_32 - die("Oops", regs, error_code); - bust_spinlocks(0); - do_exit(SIGKILL); -#else - sig = SIGKILL; - if (__die("Oops", regs, error_code)) - sig = 0; - /* Executive summary in case the body of the oops scrolled away */ - printk(KERN_EMERG "CR2: %016lx\n", address); - oops_end(flags, regs, sig); -#endif - -out_of_memory: - /* - * We ran out of memory, call the OOM killer, and return the userspace - * (which will retry the fault, or kill us if we got oom-killed). - */ - up_read(&mm->mmap_sem); - pagefault_out_of_memory(); - return; - -do_sigbus: - up_read(&mm->mmap_sem); - - /* Kernel mode? Handle exceptions or die */ - if (!(error_code & PF_USER)) - goto no_context; -#ifdef CONFIG_X86_32 - /* User space => ok to do another page fault */ - if (is_prefetch(regs, address, error_code)) - return; -#endif - tsk->thread.cr2 = address; - tsk->thread.error_code = error_code; - tsk->thread.trap_no = 14; - force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); } DEFINE_SPINLOCK(pgd_lock); -- cgit v1.2.3 From afb33f8c0d7dea8c48ae1c2e3af5b437aa8dd7bb Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Mon, 12 Jan 2009 12:53:45 +0100 Subject: x86: remove byte locks Impact: cleanup Remove byte locks implementation, which was introduced by Jeremy in 8efcbab6 ("paravirt: introduce a "lock-byte" spinlock implementation"), but turned out to be dead code that is not used by any in-kernel virtualization guest (Xen uses its own variant of spinlocks implementation and KVM is not planning to move to byte locks). Signed-off-by: Jiri Kosina Signed-off-by: Ingo Molnar --- arch/x86/include/asm/paravirt.h | 2 -- arch/x86/include/asm/spinlock.h | 66 ++---------------------------------- arch/x86/kernel/paravirt-spinlocks.c | 10 ------ 3 files changed, 2 insertions(+), 76 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ba3e2ff6aed..32bc6c2c138 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -1389,8 +1389,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, void _paravirt_nop(void); #define paravirt_nop ((void *)_paravirt_nop) -void paravirt_use_bytelocks(void); - #ifdef CONFIG_SMP static inline int __raw_spin_is_locked(struct raw_spinlock *lock) diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index d17c91981da..2bd6b111a41 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; } -#ifdef CONFIG_PARAVIRT -/* - * Define virtualization-friendly old-style lock byte lock, for use in - * pv_lock_ops if desired. - * - * This differs from the pre-2.6.24 spinlock by always using xchgb - * rather than decb to take the lock; this allows it to use a - * zero-initialized lock structure. It also maintains a 1-byte - * contention counter, so that we can implement - * __byte_spin_is_contended. - */ -struct __byte_spinlock { - s8 lock; - s8 spinners; -}; - -static inline int __byte_spin_is_locked(raw_spinlock_t *lock) -{ - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; - return bl->lock != 0; -} - -static inline int __byte_spin_is_contended(raw_spinlock_t *lock) -{ - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; - return bl->spinners != 0; -} - -static inline void __byte_spin_lock(raw_spinlock_t *lock) -{ - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; - s8 val = 1; - - asm("1: xchgb %1, %0\n" - " test %1,%1\n" - " jz 3f\n" - " " LOCK_PREFIX "incb %2\n" - "2: rep;nop\n" - " cmpb $1, %0\n" - " je 2b\n" - " " LOCK_PREFIX "decb %2\n" - " jmp 1b\n" - "3:" - : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory"); -} - -static inline int __byte_spin_trylock(raw_spinlock_t *lock) -{ - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; - u8 old = 1; - - asm("xchgb %1,%0" - : "+m" (bl->lock), "+q" (old) : : "memory"); +#ifndef CONFIG_PARAVIRT - return old == 0; -} - -static inline void __byte_spin_unlock(raw_spinlock_t *lock) -{ - struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; - smp_wmb(); - bl->lock = 0; -} -#else /* !CONFIG_PARAVIRT */ static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { return __ticket_spin_is_locked(lock); @@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, __raw_spin_lock(lock); } -#endif /* CONFIG_PARAVIRT */ +#endif static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) { diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 95777b0faa7..3a7c5a44082 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = { }; EXPORT_SYMBOL(pv_lock_ops); -void __init paravirt_use_bytelocks(void) -{ -#ifdef CONFIG_SMP - pv_lock_ops.spin_is_locked = __byte_spin_is_locked; - pv_lock_ops.spin_is_contended = __byte_spin_is_contended; - pv_lock_ops.spin_lock = __byte_spin_lock; - pv_lock_ops.spin_trylock = __byte_spin_trylock; - pv_lock_ops.spin_unlock = __byte_spin_unlock; -#endif -} -- cgit v1.2.3 From 67e68bde02fe783efc2ce2ca31bdb992f5235f8d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 21 Jan 2009 17:26:05 +0900 Subject: x86: update canary handling during switch Impact: cleanup In switch_to(), instead of taking offset to irq_stack_union.stack, make it a proper percpu access using __percpu_arg() and per_cpu_var(). Signed-off-by: Tejun Heo --- arch/x86/include/asm/system.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 52eb748a68a..2fcc70bc85f 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -89,13 +89,15 @@ do { \ #ifdef CONFIG_CC_STACKPROTECTOR #define __switch_canary \ "movq %P[task_canary](%%rsi),%%r8\n\t" \ - "movq %%r8,%%gs:%P[gs_canary]\n\t" -#define __switch_canary_param \ - , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) \ - , [gs_canary] "i" (offsetof(union irq_stack_union, stack_canary)) + "movq %%r8,"__percpu_arg([gs_canary])"\n\t" +#define __switch_canary_oparam \ + , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) +#define __switch_canary_iparam \ + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) #else /* CC_STACKPROTECTOR */ #define __switch_canary -#define __switch_canary_param +#define __switch_canary_oparam +#define __switch_canary_iparam #endif /* CC_STACKPROTECTOR */ /* Save restore flags to clear handle leaking NT */ @@ -114,13 +116,14 @@ do { \ "jc ret_from_fork\n\t" \ RESTORE_CONTEXT \ : "=a" (last) \ + __switch_canary_oparam \ : [next] "S" (next), [prev] "D" (prev), \ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)), \ [tif_fork] "i" (TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, stack)), \ [current_task] "m" (per_cpu_var(current_task)) \ - __switch_canary_param \ + __switch_canary_iparam \ : "memory", "cc" __EXTRA_CLOBBER) #endif -- cgit v1.2.3 From 06deef892c7327992434917fb6592c233430803d Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Wed, 21 Jan 2009 17:26:05 +0900 Subject: x86: clean up gdt_page definition Impact: cleanup && more compact percpu area layout with future changes Move 64-bit GDT to page-aligned section and clean up comment formatting. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/cpu/common.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3887fcf6e51..a8f0dedcb0c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -63,23 +63,23 @@ cpumask_t cpu_sibling_setup_map; static struct cpu_dev *this_cpu __cpuinitdata; +DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { #ifdef CONFIG_X86_64 -/* We need valid kernel segments for data and code in long mode too - * IRET will check the segment types kkeil 2000/10/28 - * Also sysret mandates a special GDT layout - */ -/* The TLS descriptors are currently at a different place compared to i386. - Hopefully nobody expects them at a fixed place (Wine?) */ -DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { + /* + * We need valid kernel segments for data and code in long mode too + * IRET will check the segment types kkeil 2000/10/28 + * Also sysret mandates a special GDT layout + * + * The TLS descriptors are currently at a different place compared to i386. + * Hopefully nobody expects them at a fixed place (Wine?) + */ [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, -} }; #else -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, @@ -112,8 +112,8 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, -} }; #endif +} }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); #ifdef CONFIG_X86_32 -- cgit v1.2.3 From 299e26992a737804e13e74fdb97cdab470ed19ac Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Wed, 21 Jan 2009 17:26:05 +0900 Subject: x86: fix percpu_write with 64-bit constants Impact: slightly better code generation for percpu_to_op() The processor will sign-extend 32-bit immediate values in 64-bit operations. Use the 'e' constraint ("32-bit signed integer constant, or a symbolic reference known to fit that range") for 64-bit constants. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/percpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index ce980db5e59..0b64af4f13a 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -75,7 +75,7 @@ do { \ case 8: \ asm(op "q %1,"__percpu_arg(0) \ : "+m" (var) \ - : "r" ((T__)val)); \ + : "re" ((T__)val)); \ break; \ default: __bad_percpu_size(); \ } \ -- cgit v1.2.3 From 0dd76d736eeb3e0ef86c5b103b47ae0e15edebad Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Wed, 21 Jan 2009 17:26:05 +0900 Subject: x86: set %fs to __KERNEL_PERCPU unconditionally for x86_32 Impact: cleanup %fs is currently set to __KERNEL_DS at boot, and conditionally switched to __KERNEL_PERCPU for secondary cpus. Instead, initialize GDT_ENTRY_PERCPU to the same attributes as GDT_ENTRY_KERNEL_DS and set %fs to __KERNEL_PERCPU unconditionally. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/head_32.S | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a8f0dedcb0c..fbebbcec001 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -111,7 +111,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, - [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, + [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, #endif } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index e835b4eea70..24c0e5cd71e 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -429,12 +429,14 @@ is386: movl $2,%ecx # set MP ljmp $(__KERNEL_CS),$1f 1: movl $(__KERNEL_DS),%eax # reload all the segment registers movl %eax,%ss # after changing gdt. - movl %eax,%fs # gets reset once there's real percpu movl $(__USER_DS),%eax # DS/ES contains default USER segment movl %eax,%ds movl %eax,%es + movl $(__KERNEL_PERCPU), %eax + movl %eax,%fs # set this cpu's percpu + xorl %eax,%eax # Clear GS and LDT movl %eax,%gs lldt %ax @@ -446,8 +448,6 @@ is386: movl $2,%ecx # set MP movb $1, ready cmpb $0,%cl # the first CPU calls start_kernel je 1f - movl $(__KERNEL_PERCPU), %eax - movl %eax,%fs # set this cpu's percpu movl (stack_start), %esp 1: #endif /* CONFIG_SMP */ -- cgit v1.2.3 From 6826c8ff07b5f95df0473a748a9831707079b940 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Wed, 21 Jan 2009 17:26:06 +0900 Subject: x86: merge mmu_context.h Impact: cleanup tj: * changed cpu to unsigned as was done on mmu_context_64.h as cpu id is officially unsigned int * added missing ';' to 32bit version of deactivate_mm() Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/mmu_context.h | 63 ++++++++++++++++++++++++++++++++--- arch/x86/include/asm/mmu_context_32.h | 55 ------------------------------ arch/x86/include/asm/mmu_context_64.h | 52 ----------------------------- 3 files changed, 59 insertions(+), 111 deletions(-) delete mode 100644 arch/x86/include/asm/mmu_context_32.h delete mode 100644 arch/x86/include/asm/mmu_context_64.h (limited to 'arch') diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 8aeeb3fd73d..52948df9cd1 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); -#ifdef CONFIG_X86_32 -# include "mmu_context_32.h" -#else -# include "mmu_context_64.h" + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +#ifdef CONFIG_SMP + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); +#endif +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned cpu = smp_processor_id(); + + if (likely(prev != next)) { + /* stop flush ipis for the previous mm */ + cpu_clear(cpu, prev->cpu_vm_mask); +#ifdef CONFIG_SMP + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + percpu_write(cpu_tlbstate.active_mm, next); #endif + cpu_set(cpu, next->cpu_vm_mask); + + /* Re-load page tables */ + load_cr3(next->pgd); + + /* + * load the LDT, if the LDT is different: + */ + if (unlikely(prev->context.ldt != next->context.ldt)) + load_LDT_nolock(&next->context); + } +#ifdef CONFIG_SMP + else { + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); + + if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { + /* We were in lazy tlb mode and leave_mm disabled + * tlb flush IPI delivery. We must reload CR3 + * to make sure to use no freed page tables. + */ + load_cr3(next->pgd); + load_LDT_nolock(&next->context); + } + } +#endif +} #define activate_mm(prev, next) \ do { \ @@ -33,5 +76,17 @@ do { \ switch_mm((prev), (next), NULL); \ } while (0); +#ifdef CONFIG_X86_32 +#define deactivate_mm(tsk, mm) \ +do { \ + loadsegment(gs, 0); \ +} while (0) +#else +#define deactivate_mm(tsk, mm) \ +do { \ + load_gs_index(0); \ + loadsegment(fs, 0); \ +} while (0) +#endif #endif /* _ASM_X86_MMU_CONTEXT_H */ diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h deleted file mode 100644 index 08b53454f83..00000000000 --- a/arch/x86/include/asm/mmu_context_32.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef _ASM_X86_MMU_CONTEXT_32_H -#define _ASM_X86_MMU_CONTEXT_32_H - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -#ifdef CONFIG_SMP - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) - percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); -#endif -} - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, - struct task_struct *tsk) -{ - int cpu = smp_processor_id(); - - if (likely(prev != next)) { - /* stop flush ipis for the previous mm */ - cpu_clear(cpu, prev->cpu_vm_mask); -#ifdef CONFIG_SMP - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); - percpu_write(cpu_tlbstate.active_mm, next); -#endif - cpu_set(cpu, next->cpu_vm_mask); - - /* Re-load page tables */ - load_cr3(next->pgd); - - /* - * load the LDT, if the LDT is different: - */ - if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context); - } -#ifdef CONFIG_SMP - else { - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); - BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); - - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { - /* We were in lazy tlb mode and leave_mm disabled - * tlb flush IPI delivery. We must reload %cr3. - */ - load_cr3(next->pgd); - load_LDT_nolock(&next->context); - } - } -#endif -} - -#define deactivate_mm(tsk, mm) \ - asm("movl %0,%%gs": :"r" (0)); - -#endif /* _ASM_X86_MMU_CONTEXT_32_H */ diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h deleted file mode 100644 index c4572505ab3..00000000000 --- a/arch/x86/include/asm/mmu_context_64.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef _ASM_X86_MMU_CONTEXT_64_H -#define _ASM_X86_MMU_CONTEXT_64_H - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -#ifdef CONFIG_SMP - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) - percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); -#endif -} - -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) -{ - unsigned cpu = smp_processor_id(); - if (likely(prev != next)) { - /* stop flush ipis for the previous mm */ - cpu_clear(cpu, prev->cpu_vm_mask); -#ifdef CONFIG_SMP - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); - percpu_write(cpu_tlbstate.active_mm, next); -#endif - cpu_set(cpu, next->cpu_vm_mask); - load_cr3(next->pgd); - - if (unlikely(next->context.ldt != prev->context.ldt)) - load_LDT_nolock(&next->context); - } -#ifdef CONFIG_SMP - else { - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); - BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); - - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { - /* We were in lazy tlb mode and leave_mm disabled - * tlb flush IPI delivery. We must reload CR3 - * to make sure to use no freed page tables. - */ - load_cr3(next->pgd); - load_LDT_nolock(&next->context); - } - } -#endif -} - -#define deactivate_mm(tsk, mm) \ -do { \ - load_gs_index(0); \ - asm volatile("movl %0,%%fs"::"r"(0)); \ -} while (0) - -#endif /* _ASM_X86_MMU_CONTEXT_64_H */ -- cgit v1.2.3 From d650a5148593b65a3c3f9a344f46b91b7dfe7713 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Wed, 21 Jan 2009 17:26:06 +0900 Subject: x86: merge irq_regs.h Impact: cleanup, better irq_regs code generation for x86_64 Make 64-bit use the same optimizations as 32-bit. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/irq_regs.h | 36 +++++++++++++++++++++++++++++++----- arch/x86/include/asm/irq_regs_32.h | 31 ------------------------------- arch/x86/include/asm/irq_regs_64.h | 1 - arch/x86/kernel/irq_64.c | 3 +++ 4 files changed, 34 insertions(+), 37 deletions(-) delete mode 100644 arch/x86/include/asm/irq_regs_32.h delete mode 100644 arch/x86/include/asm/irq_regs_64.h (limited to 'arch') diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h index 89c898ab298..77843225b7e 100644 --- a/arch/x86/include/asm/irq_regs.h +++ b/arch/x86/include/asm/irq_regs.h @@ -1,5 +1,31 @@ -#ifdef CONFIG_X86_32 -# include "irq_regs_32.h" -#else -# include "irq_regs_64.h" -#endif +/* + * Per-cpu current frame pointer - the location of the last exception frame on + * the stack, stored in the per-cpu area. + * + * Jeremy Fitzhardinge + */ +#ifndef _ASM_X86_IRQ_REGS_H +#define _ASM_X86_IRQ_REGS_H + +#include + +#define ARCH_HAS_OWN_IRQ_REGS + +DECLARE_PER_CPU(struct pt_regs *, irq_regs); + +static inline struct pt_regs *get_irq_regs(void) +{ + return percpu_read(irq_regs); +} + +static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) +{ + struct pt_regs *old_regs; + + old_regs = get_irq_regs(); + percpu_write(irq_regs, new_regs); + + return old_regs; +} + +#endif /* _ASM_X86_IRQ_REGS_32_H */ diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h deleted file mode 100644 index d7ed33ee94e..00000000000 --- a/arch/x86/include/asm/irq_regs_32.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Per-cpu current frame pointer - the location of the last exception frame on - * the stack, stored in the per-cpu area. - * - * Jeremy Fitzhardinge - */ -#ifndef _ASM_X86_IRQ_REGS_32_H -#define _ASM_X86_IRQ_REGS_32_H - -#include - -#define ARCH_HAS_OWN_IRQ_REGS - -DECLARE_PER_CPU(struct pt_regs *, irq_regs); - -static inline struct pt_regs *get_irq_regs(void) -{ - return percpu_read(irq_regs); -} - -static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) -{ - struct pt_regs *old_regs; - - old_regs = get_irq_regs(); - percpu_write(irq_regs, new_regs); - - return old_regs; -} - -#endif /* _ASM_X86_IRQ_REGS_32_H */ diff --git a/arch/x86/include/asm/irq_regs_64.h b/arch/x86/include/asm/irq_regs_64.h deleted file mode 100644 index 3dd9c0b7027..00000000000 --- a/arch/x86/include/asm/irq_regs_64.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 1db05247b47..0b254de8408 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -22,6 +22,9 @@ DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); +DEFINE_PER_CPU(struct pt_regs *, irq_regs); +EXPORT_PER_CPU_SYMBOL(irq_regs); + /* * Probabilistic stack overflow check: * -- cgit v1.2.3 From bdbcdd48883940bbd8d17eb01172d58a261a413a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 21 Jan 2009 17:26:06 +0900 Subject: x86: uv cleanup Impact: cleanup Make the following uv related cleanups. * collect visible uv related definitions and interfaces into uv/uv.h and use it. this cleans up the messy situation where on 64bit, uv is defined properly, on 32bit generic it's dummy and on the rest undefined. after this clean up, uv is defined on 64 and dummy on 32. * update uv_flush_tlb_others() such that it takes cpumask of to-be-flushed cpus as argument, instead of that minus self, and returns yet-to-be-flushed cpumask, instead of modifying the passed in parameter. this interface change will ease dummy implementation of uv_flush_tlb_others() and makes uv tlb flush related stuff defined in tlb_uv proper. Signed-off-by: Tejun Heo --- arch/x86/include/asm/genapic_32.h | 7 ---- arch/x86/include/asm/genapic_64.h | 6 ---- arch/x86/include/asm/uv/uv.h | 33 +++++++++++++++++++ arch/x86/include/asm/uv/uv_bau.h | 2 -- arch/x86/kernel/cpu/common.c | 1 + arch/x86/kernel/genx2apic_uv_x.c | 1 + arch/x86/kernel/smpboot.c | 1 + arch/x86/kernel/tlb_64.c | 18 ++++------- arch/x86/kernel/tlb_uv.c | 68 +++++++++++++++++++++++---------------- 9 files changed, 83 insertions(+), 54 deletions(-) create mode 100644 arch/x86/include/asm/uv/uv.h (limited to 'arch') diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 2c05b737ee2..4334502d366 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -138,11 +138,4 @@ struct genapic { extern struct genapic *genapic; extern void es7000_update_genapic_to_cluster(void); -enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; -#define get_uv_system_type() UV_NONE -#define is_uv_system() 0 -#define uv_wakeup_secondary(a, b) 1 -#define uv_system_init() do {} while (0) - - #endif /* _ASM_X86_GENAPIC_32_H */ diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index adf32fb56aa..7bb092c5905 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h @@ -51,15 +51,9 @@ extern struct genapic apic_x2apic_phys; extern int acpi_madt_oem_check(char *, char *); extern void apic_send_IPI_self(int vector); -enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; -extern enum uv_system_type get_uv_system_type(void); -extern int is_uv_system(void); extern struct genapic apic_x2apic_uv_x; DECLARE_PER_CPU(int, x2apic_extra_bits); -extern void uv_cpu_init(void); -extern void uv_system_init(void); -extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); extern void setup_apic_routing(void); diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h new file mode 100644 index 00000000000..dce5fe35013 --- /dev/null +++ b/arch/x86/include/asm/uv/uv.h @@ -0,0 +1,33 @@ +#ifndef _ASM_X86_UV_UV_H +#define _ASM_X86_UV_UV_H + +enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; + +#ifdef CONFIG_X86_64 + +extern enum uv_system_type get_uv_system_type(void); +extern int is_uv_system(void); +extern void uv_cpu_init(void); +extern void uv_system_init(void); +extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); +extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, + struct mm_struct *mm, + unsigned long va, + unsigned int cpu); + +#else /* X86_64 */ + +static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } +static inline int is_uv_system(void) { return 0; } +static inline void uv_cpu_init(void) { } +static inline void uv_system_init(void) { } +static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) +{ return 1; } +static inline const struct cpumask * +uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, + unsigned long va, unsigned int cpu) +{ return cpumask; } + +#endif /* X86_64 */ + +#endif /* _ASM_X86_UV_UV_H */ diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 74e6393bfdd..9b0e61bf7a8 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -325,8 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) #define cpubit_isset(cpu, bau_local_cpumask) \ test_bit((cpu), (bau_local_cpumask).bits) -extern int uv_flush_tlb_others(struct cpumask *, - struct mm_struct *, unsigned long); extern void uv_bau_message_intr1(void); extern void uv_bau_timeout_intr1(void); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index fbebbcec001..99904f288d6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -28,6 +28,7 @@ #include #include #include +#include #endif #include diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index b193e082f6c..bfe36249145 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 869b98840fd..def770b57b5 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index e64a32c4882..b8bed841ad6 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -15,8 +15,7 @@ #include #include #include -#include -#include +#include DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0, }; @@ -206,16 +205,13 @@ void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va) { if (is_uv_system()) { - /* FIXME: could be an percpu_alloc'd thing */ - static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); - struct cpumask *after_uv_flush = &get_cpu_var(flush_tlb_mask); + unsigned int cpu; - cpumask_andnot(after_uv_flush, cpumask, - cpumask_of(smp_processor_id())); - if (!uv_flush_tlb_others(after_uv_flush, mm, va)) - flush_tlb_others_ipi(after_uv_flush, mm, va); - - put_cpu_var(flush_tlb_uv_cpumask); + cpu = get_cpu(); + cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); + if (cpumask) + flush_tlb_others_ipi(cpumask, mm, va); + put_cpu(); return; } flush_tlb_others_ipi(cpumask, mm, va); diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 690dcf1a27d..aae15dd7260 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -209,14 +210,15 @@ static int uv_wait_completion(struct bau_desc *bau_desc, * * Send a broadcast and wait for a broadcast message to complete. * - * The cpumaskp mask contains the cpus the broadcast was sent to. + * The flush_mask contains the cpus the broadcast was sent to. * - * Returns 1 if all remote flushing was done. The mask is zeroed. - * Returns 0 if some remote flushing remains to be done. The mask will have - * some bits still set. + * Returns NULL if all remote flushing was done. The mask is zeroed. + * Returns @flush_mask if some remote flushing remains to be done. The + * mask will have some bits still set. */ -int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, - struct cpumask *cpumaskp) +const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, + struct bau_desc *bau_desc, + struct cpumask *flush_mask) { int completion_status = 0; int right_shift; @@ -263,59 +265,69 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, * Success, so clear the remote cpu's from the mask so we don't * use the IPI method of shootdown on them. */ - for_each_cpu(bit, cpumaskp) { + for_each_cpu(bit, flush_mask) { blade = uv_cpu_to_blade_id(bit); if (blade == this_blade) continue; - cpumask_clear_cpu(bit, cpumaskp); + cpumask_clear_cpu(bit, flush_mask); } - if (!cpumask_empty(cpumaskp)) - return 0; - return 1; + if (!cpumask_empty(flush_mask)) + return flush_mask; + return NULL; } /** * uv_flush_tlb_others - globally purge translation cache of a virtual * address or all TLB's - * @cpumaskp: mask of all cpu's in which the address is to be removed + * @cpumask: mask of all cpu's in which the address is to be removed * @mm: mm_struct containing virtual address range * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) + * @cpu: the current cpu * * This is the entry point for initiating any UV global TLB shootdown. * * Purges the translation caches of all specified processors of the given * virtual address, or purges all TLB's on specified processors. * - * The caller has derived the cpumaskp from the mm_struct and has subtracted - * the local cpu from the mask. This function is called only if there - * are bits set in the mask. (e.g. flush_tlb_page()) + * The caller has derived the cpumask from the mm_struct. This function + * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) * - * The cpumaskp is converted into a nodemask of the nodes containing + * The cpumask is converted into a nodemask of the nodes containing * the cpus. * - * Returns 1 if all remote flushing was done. - * Returns 0 if some remote flushing remains to be done. + * Note that this function should be called with preemption disabled. + * + * Returns NULL if all remote flushing was done. + * Returns pointer to cpumask if some remote flushing remains to be + * done. The returned pointer is valid till preemption is re-enabled. */ -int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm, - unsigned long va) +const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, + struct mm_struct *mm, + unsigned long va, unsigned int cpu) { + static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask); int i; int bit; int blade; - int cpu; + int uv_cpu; int this_blade; int locals = 0; struct bau_desc *bau_desc; - cpu = uv_blade_processor_id(); + WARN_ON(!in_atomic()); + + cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); + + uv_cpu = uv_blade_processor_id(); this_blade = uv_numa_blade_id(); bau_desc = __get_cpu_var(bau_control).descriptor_base; - bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu; + bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); i = 0; - for_each_cpu(bit, cpumaskp) { + for_each_cpu(bit, flush_mask) { blade = uv_cpu_to_blade_id(bit); BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); if (blade == this_blade) { @@ -330,17 +342,17 @@ int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm, * no off_node flushing; return status for local node */ if (locals) - return 0; + return flush_mask; else - return 1; + return NULL; } __get_cpu_var(ptcstats).requestor++; __get_cpu_var(ptcstats).ntargeted += i; bau_desc->payload.address = va; - bau_desc->payload.sending_cpu = smp_processor_id(); + bau_desc->payload.sending_cpu = cpu; - return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp); + return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); } /* -- cgit v1.2.3 From 6dd01bedee6c3191643db303a1dc530bad56ec55 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 21 Jan 2009 17:26:06 +0900 Subject: x86: prepare for tlb merge Impact: clean up, ipi vector number reordering for x86_32 Make the following changes to prepare for tlb merge. * reorder x86_32 ip vectors * adjust tlb_32.c and tlb_64.c such that their logics coincide exactly - on spurious invalidate ipi, tlb_32 acks the irq - tlb_64 now has proper memory barriers around clearing flush_cpumask (no change in generated code) * unexport flush_tlb_page from tlb_32.c, there's no user * use unsigned int for cpu id * drop unnecessary includes from tlb_64.c Signed-off-by: Tejun Heo --- arch/x86/include/asm/irq_vectors.h | 33 ++++++++++++++++----------------- arch/x86/kernel/tlb_32.c | 10 +++++----- arch/x86/kernel/tlb_64.c | 18 +++++++----------- 3 files changed, 28 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index a16a2ab2b42..4ee8f800504 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -49,31 +49,30 @@ * some of the following vectors are 'rare', they are merged * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. - * - * Vectors 0xf0-0xfa are free (reserved for future Linux use). */ #ifdef CONFIG_X86_32 # define SPURIOUS_APIC_VECTOR 0xff # define ERROR_APIC_VECTOR 0xfe -# define INVALIDATE_TLB_VECTOR 0xfd -# define RESCHEDULE_VECTOR 0xfc -# define CALL_FUNCTION_VECTOR 0xfb -# define CALL_FUNCTION_SINGLE_VECTOR 0xfa -# define THERMAL_APIC_VECTOR 0xf0 +# define RESCHEDULE_VECTOR 0xfd +# define CALL_FUNCTION_VECTOR 0xfc +# define CALL_FUNCTION_SINGLE_VECTOR 0xfb +# define THERMAL_APIC_VECTOR 0xfa +/* 0xf1 - 0xf9 : free */ +# define INVALIDATE_TLB_VECTOR 0xf0 #else -#define SPURIOUS_APIC_VECTOR 0xff -#define ERROR_APIC_VECTOR 0xfe -#define RESCHEDULE_VECTOR 0xfd -#define CALL_FUNCTION_VECTOR 0xfc -#define CALL_FUNCTION_SINGLE_VECTOR 0xfb -#define THERMAL_APIC_VECTOR 0xfa -#define THRESHOLD_APIC_VECTOR 0xf9 -#define UV_BAU_MESSAGE 0xf8 -#define INVALIDATE_TLB_VECTOR_END 0xf7 -#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ +# define SPURIOUS_APIC_VECTOR 0xff +# define ERROR_APIC_VECTOR 0xfe +# define RESCHEDULE_VECTOR 0xfd +# define CALL_FUNCTION_VECTOR 0xfc +# define CALL_FUNCTION_SINGLE_VECTOR 0xfb +# define THERMAL_APIC_VECTOR 0xfa +# define THRESHOLD_APIC_VECTOR 0xf9 +# define UV_BAU_MESSAGE 0xf8 +# define INVALIDATE_TLB_VECTOR_END 0xf7 +# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ #define NUM_INVALIDATE_TLB_VECTORS 8 diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index abf0808d6fc..93fcb05c7d4 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -84,13 +84,15 @@ EXPORT_SYMBOL_GPL(leave_mm); * * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. + * + * Interrupts are disabled. */ void smp_invalidate_interrupt(struct pt_regs *regs) { - unsigned long cpu; + unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); if (!cpumask_test_cpu(cpu, flush_cpumask)) goto out; @@ -112,12 +114,11 @@ void smp_invalidate_interrupt(struct pt_regs *regs) } else leave_mm(cpu); } +out: ack_APIC_irq(); smp_mb__before_clear_bit(); cpumask_clear_cpu(cpu, flush_cpumask); smp_mb__after_clear_bit(); -out: - put_cpu_no_resched(); inc_irq_stat(irq_tlb_count); } @@ -215,7 +216,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) flush_tlb_others(&mm->cpu_vm_mask, mm, va); preempt_enable(); } -EXPORT_SYMBOL(flush_tlb_page); static void do_flush_tlb_all(void *info) { diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index b8bed841ad6..19ac661422f 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -1,20 +1,14 @@ #include #include -#include #include #include -#include -#include #include +#include -#include -#include #include #include -#include -#include -#include +#include #include DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) @@ -121,8 +115,8 @@ EXPORT_SYMBOL_GPL(leave_mm); asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) { - int cpu; - int sender; + unsigned int cpu; + unsigned int sender; union smp_flush_state *f; cpu = smp_processor_id(); @@ -155,14 +149,16 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) } out: ack_APIC_irq(); + smp_mb__before_clear_bit(); cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); + smp_mb__after_clear_bit(); inc_irq_stat(irq_tlb_count); } static void flush_tlb_others_ipi(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va) { - int sender; + unsigned int sender; union smp_flush_state *f; /* Caller has disabled preemption */ -- cgit v1.2.3 From 02cf94c370e0dc9bf408fe45eb86fe9ad58eaf7f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 21 Jan 2009 17:26:06 +0900 Subject: x86: make x86_32 use tlb_64.c Impact: less contention when issuing invalidate IPI, cleanup Make x86_32 use the same tlb code as 64bit. The 64bit code uses multiple IPI vectors for tlb shootdown to reduce contention. This patch makes x86_32 allocate the same 8 IPIs as x86_64 and share the code paths. Note that the usage of asmlinkage is inconsistent for x86_32 and 64 and calls for further cleanup. This has been noted with a FIXME comment in tlb_64.c. Signed-off-by: Tejun Heo --- arch/x86/include/asm/irq_vectors.h | 7 +- arch/x86/include/asm/mach-default/entry_arch.h | 18 +- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/entry_32.S | 6 +- arch/x86/kernel/irqinit_32.c | 11 +- arch/x86/kernel/tlb_32.c | 239 ------------------------- arch/x86/kernel/tlb_64.c | 12 +- 7 files changed, 47 insertions(+), 248 deletions(-) delete mode 100644 arch/x86/kernel/tlb_32.c (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 4ee8f800504..9a83a10a5d5 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -58,8 +58,11 @@ # define CALL_FUNCTION_VECTOR 0xfc # define CALL_FUNCTION_SINGLE_VECTOR 0xfb # define THERMAL_APIC_VECTOR 0xfa -/* 0xf1 - 0xf9 : free */ -# define INVALIDATE_TLB_VECTOR 0xf0 +/* 0xf8 - 0xf9 : free */ +# define INVALIDATE_TLB_VECTOR_END 0xf7 +# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ + +# define NUM_INVALIDATE_TLB_VECTORS 8 #else diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/mach-default/entry_arch.h index 6b1add8e31d..6fa399ad1de 100644 --- a/arch/x86/include/asm/mach-default/entry_arch.h +++ b/arch/x86/include/asm/mach-default/entry_arch.h @@ -11,10 +11,26 @@ */ #ifdef CONFIG_X86_SMP BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) -BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) + +BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, + smp_invalidate_interrupt) #endif /* diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index eb074530c7d..a62a15c2222 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -58,7 +58,7 @@ obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp.o -obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o +obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_64.o obj-$(CONFIG_X86_32_SMP) += smpcommon.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 46469029e9d..a0b91aac72a 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -672,7 +672,7 @@ common_interrupt: ENDPROC(common_interrupt) CFI_ENDPROC -#define BUILD_INTERRUPT(name, nr) \ +#define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ RING0_INT_FRAME; \ pushl $~(nr); \ @@ -680,11 +680,13 @@ ENTRY(name) \ SAVE_ALL; \ TRACE_IRQS_OFF \ movl %esp,%eax; \ - call smp_##name; \ + call fn; \ jmp ret_from_intr; \ CFI_ENDPROC; \ ENDPROC(name) +#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name) + /* The include is where all of the SMP etc. interrupts come from */ #include "entry_arch.h" diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 1507ad4e674..bf629cadec1 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -149,8 +149,15 @@ void __init native_init_IRQ(void) */ alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); - /* IPI for invalidation */ - alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); + /* IPIs for invalidation */ + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c deleted file mode 100644 index 93fcb05c7d4..00000000000 --- a/arch/x86/kernel/tlb_32.c +++ /dev/null @@ -1,239 +0,0 @@ -#include -#include -#include - -#include - -DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) - = { &init_mm, 0, }; - -/* must come after the send_IPI functions above for inlining */ -#include - -/* - * Smarter SMP flushing macros. - * c/o Linus Torvalds. - * - * These mean you can really definitely utterly forget about - * writing to user space from interrupts. (Its not allowed anyway). - * - * Optimizations Manfred Spraul - */ - -static cpumask_var_t flush_cpumask; -static struct mm_struct *flush_mm; -static unsigned long flush_va; -static DEFINE_SPINLOCK(tlbstate_lock); - -/* - * We cannot call mmdrop() because we are in interrupt context, - * instead update mm->cpu_vm_mask. - * - * We need to reload %cr3 since the page tables may be going - * away from under us.. - */ -void leave_mm(int cpu) -{ - BUG_ON(percpu_read(cpu_tlbstate.state) == TLBSTATE_OK); - cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); - load_cr3(swapper_pg_dir); -} -EXPORT_SYMBOL_GPL(leave_mm); - -/* - * - * The flush IPI assumes that a thread switch happens in this order: - * [cpu0: the cpu that switches] - * 1) switch_mm() either 1a) or 1b) - * 1a) thread switch to a different mm - * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); - * Stop ipi delivery for the old mm. This is not synchronized with - * the other cpus, but smp_invalidate_interrupt ignore flush ipis - * for the wrong mm, and in the worst case we perform a superfluous - * tlb flush. - * 1a2) set cpu_tlbstate to TLBSTATE_OK - * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 - * was in lazy tlb mode. - * 1a3) update cpu_tlbstate[].active_mm - * Now cpu0 accepts tlb flushes for the new mm. - * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); - * Now the other cpus will send tlb flush ipis. - * 1a4) change cr3. - * 1b) thread switch without mm change - * cpu_tlbstate[].active_mm is correct, cpu0 already handles - * flush ipis. - * 1b1) set cpu_tlbstate to TLBSTATE_OK - * 1b2) test_and_set the cpu bit in cpu_vm_mask. - * Atomically set the bit [other cpus will start sending flush ipis], - * and test the bit. - * 1b3) if the bit was 0: leave_mm was called, flush the tlb. - * 2) switch %%esp, ie current - * - * The interrupt must handle 2 special cases: - * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. - * - the cpu performs speculative tlb reads, i.e. even if the cpu only - * runs in kernel space, the cpu could load tlb entries for user space - * pages. - * - * The good news is that cpu_tlbstate is local to each cpu, no - * write/read ordering problems. - */ - -/* - * TLB flush IPI: - * - * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. - * 2) Leave the mm if we are in the lazy tlb mode. - * - * Interrupts are disabled. - */ - -void smp_invalidate_interrupt(struct pt_regs *regs) -{ - unsigned int cpu; - - cpu = smp_processor_id(); - - if (!cpumask_test_cpu(cpu, flush_cpumask)) - goto out; - /* - * This was a BUG() but until someone can quote me the - * line from the intel manual that guarantees an IPI to - * multiple CPUs is retried _only_ on the erroring CPUs - * its staying as a return - * - * BUG(); - */ - - if (flush_mm == percpu_read(cpu_tlbstate.active_mm)) { - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { - if (flush_va == TLB_FLUSH_ALL) - local_flush_tlb(); - else - __flush_tlb_one(flush_va); - } else - leave_mm(cpu); - } -out: - ack_APIC_irq(); - smp_mb__before_clear_bit(); - cpumask_clear_cpu(cpu, flush_cpumask); - smp_mb__after_clear_bit(); - inc_irq_stat(irq_tlb_count); -} - -void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, unsigned long va) -{ - /* - * - mask must exist :) - */ - BUG_ON(cpumask_empty(cpumask)); - BUG_ON(!mm); - - /* - * i'm not happy about this global shared spinlock in the - * MM hot path, but we'll see how contended it is. - * AK: x86-64 has a faster method that could be ported. - */ - spin_lock(&tlbstate_lock); - - cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id())); -#ifdef CONFIG_HOTPLUG_CPU - /* If a CPU which we ran on has gone down, OK. */ - cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask); - if (unlikely(cpumask_empty(flush_cpumask))) { - spin_unlock(&tlbstate_lock); - return; - } -#endif - flush_mm = mm; - flush_va = va; - - /* - * Make the above memory operations globally visible before - * sending the IPI. - */ - smp_mb(); - /* - * We have to send the IPI only to - * CPUs affected. - */ - send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR); - - while (!cpumask_empty(flush_cpumask)) - /* nothing. lockup detection does not belong here */ - cpu_relax(); - - flush_mm = NULL; - flush_va = 0; - spin_unlock(&tlbstate_lock); -} - -void flush_tlb_current_task(void) -{ - struct mm_struct *mm = current->mm; - - preempt_disable(); - - local_flush_tlb(); - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); - preempt_enable(); -} - -void flush_tlb_mm(struct mm_struct *mm) -{ - - preempt_disable(); - - if (current->active_mm == mm) { - if (current->mm) - local_flush_tlb(); - else - leave_mm(smp_processor_id()); - } - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); - - preempt_enable(); -} - -void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) -{ - struct mm_struct *mm = vma->vm_mm; - - preempt_disable(); - - if (current->active_mm == mm) { - if (current->mm) - __flush_tlb_one(va); - else - leave_mm(smp_processor_id()); - } - - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, va); - preempt_enable(); -} - -static void do_flush_tlb_all(void *info) -{ - unsigned long cpu = smp_processor_id(); - - __flush_tlb_all(); - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) - leave_mm(cpu); -} - -void flush_tlb_all(void) -{ - on_each_cpu(do_flush_tlb_all, NULL, 1); -} - -static int init_flush_cpumask(void) -{ - alloc_cpumask_var(&flush_cpumask, GFP_KERNEL); - return 0; -} -early_initcall(init_flush_cpumask); diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 19ac661422f..b3ca1b94065 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -113,7 +113,17 @@ EXPORT_SYMBOL_GPL(leave_mm); * Interrupts are disabled. */ -asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) +/* + * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop + * but still used for documentation purpose but the usage is slightly + * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt + * entry calls in with the first parameter in %eax. Maybe define + * intrlinkage? + */ +#ifdef CONFIG_X86_64 +asmlinkage +#endif +void smp_invalidate_interrupt(struct pt_regs *regs) { unsigned int cpu; unsigned int sender; -- cgit v1.2.3 From 16c2d3f895a3bc8d8e4c76c2646a6b750c181299 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 21 Jan 2009 17:26:06 +0900 Subject: x86: rename tlb_64.c to tlb.c Impact: file rename tlb_64.c is now the tlb code for both 32 and 64. Rename it to tlb.c. Signed-off-by: Tejun Heo --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/tlb.c | 296 +++++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/tlb_64.c | 296 ----------------------------------------------- 3 files changed, 297 insertions(+), 297 deletions(-) create mode 100644 arch/x86/kernel/tlb.c delete mode 100644 arch/x86/kernel/tlb_64.c (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index a62a15c2222..0626a88fbb4 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -58,7 +58,7 @@ obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp.o -obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_64.o +obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb.o obj-$(CONFIG_X86_32_SMP) += smpcommon.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/tlb.c b/arch/x86/kernel/tlb.c new file mode 100644 index 00000000000..b3ca1b94065 --- /dev/null +++ b/arch/x86/kernel/tlb.c @@ -0,0 +1,296 @@ +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) + = { &init_mm, 0, }; + +#include +/* + * Smarter SMP flushing macros. + * c/o Linus Torvalds. + * + * These mean you can really definitely utterly forget about + * writing to user space from interrupts. (Its not allowed anyway). + * + * Optimizations Manfred Spraul + * + * More scalable flush, from Andi Kleen + * + * To avoid global state use 8 different call vectors. + * Each CPU uses a specific vector to trigger flushes on other + * CPUs. Depending on the received vector the target CPUs look into + * the right per cpu variable for the flush data. + * + * With more than 8 CPUs they are hashed to the 8 available + * vectors. The limited global vector space forces us to this right now. + * In future when interrupts are split into per CPU domains this could be + * fixed, at the cost of triggering multiple IPIs in some cases. + */ + +union smp_flush_state { + struct { + struct mm_struct *flush_mm; + unsigned long flush_va; + spinlock_t tlbstate_lock; + DECLARE_BITMAP(flush_cpumask, NR_CPUS); + }; + char pad[SMP_CACHE_BYTES]; +} ____cacheline_aligned; + +/* State is put into the per CPU data section, but padded + to a full cache line because other CPUs can access it and we don't + want false sharing in the per cpu data segment. */ +static DEFINE_PER_CPU(union smp_flush_state, flush_state); + +/* + * We cannot call mmdrop() because we are in interrupt context, + * instead update mm->cpu_vm_mask. + */ +void leave_mm(int cpu) +{ + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + BUG(); + cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); + load_cr3(swapper_pg_dir); +} +EXPORT_SYMBOL_GPL(leave_mm); + +/* + * + * The flush IPI assumes that a thread switch happens in this order: + * [cpu0: the cpu that switches] + * 1) switch_mm() either 1a) or 1b) + * 1a) thread switch to a different mm + * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); + * Stop ipi delivery for the old mm. This is not synchronized with + * the other cpus, but smp_invalidate_interrupt ignore flush ipis + * for the wrong mm, and in the worst case we perform a superfluous + * tlb flush. + * 1a2) set cpu mmu_state to TLBSTATE_OK + * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 + * was in lazy tlb mode. + * 1a3) update cpu active_mm + * Now cpu0 accepts tlb flushes for the new mm. + * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); + * Now the other cpus will send tlb flush ipis. + * 1a4) change cr3. + * 1b) thread switch without mm change + * cpu active_mm is correct, cpu0 already handles + * flush ipis. + * 1b1) set cpu mmu_state to TLBSTATE_OK + * 1b2) test_and_set the cpu bit in cpu_vm_mask. + * Atomically set the bit [other cpus will start sending flush ipis], + * and test the bit. + * 1b3) if the bit was 0: leave_mm was called, flush the tlb. + * 2) switch %%esp, ie current + * + * The interrupt must handle 2 special cases: + * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. + * - the cpu performs speculative tlb reads, i.e. even if the cpu only + * runs in kernel space, the cpu could load tlb entries for user space + * pages. + * + * The good news is that cpu mmu_state is local to each cpu, no + * write/read ordering problems. + */ + +/* + * TLB flush IPI: + * + * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. + * 2) Leave the mm if we are in the lazy tlb mode. + * + * Interrupts are disabled. + */ + +/* + * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop + * but still used for documentation purpose but the usage is slightly + * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt + * entry calls in with the first parameter in %eax. Maybe define + * intrlinkage? + */ +#ifdef CONFIG_X86_64 +asmlinkage +#endif +void smp_invalidate_interrupt(struct pt_regs *regs) +{ + unsigned int cpu; + unsigned int sender; + union smp_flush_state *f; + + cpu = smp_processor_id(); + /* + * orig_rax contains the negated interrupt vector. + * Use that to determine where the sender put the data. + */ + sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; + f = &per_cpu(flush_state, sender); + + if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) + goto out; + /* + * This was a BUG() but until someone can quote me the + * line from the intel manual that guarantees an IPI to + * multiple CPUs is retried _only_ on the erroring CPUs + * its staying as a return + * + * BUG(); + */ + + if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { + if (f->flush_va == TLB_FLUSH_ALL) + local_flush_tlb(); + else + __flush_tlb_one(f->flush_va); + } else + leave_mm(cpu); + } +out: + ack_APIC_irq(); + smp_mb__before_clear_bit(); + cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); + smp_mb__after_clear_bit(); + inc_irq_stat(irq_tlb_count); +} + +static void flush_tlb_others_ipi(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va) +{ + unsigned int sender; + union smp_flush_state *f; + + /* Caller has disabled preemption */ + sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; + f = &per_cpu(flush_state, sender); + + /* + * Could avoid this lock when + * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is + * probably not worth checking this for a cache-hot lock. + */ + spin_lock(&f->tlbstate_lock); + + f->flush_mm = mm; + f->flush_va = va; + cpumask_andnot(to_cpumask(f->flush_cpumask), + cpumask, cpumask_of(smp_processor_id())); + + /* + * Make the above memory operations globally visible before + * sending the IPI. + */ + smp_mb(); + /* + * We have to send the IPI only to + * CPUs affected. + */ + send_IPI_mask(to_cpumask(f->flush_cpumask), + INVALIDATE_TLB_VECTOR_START + sender); + + while (!cpumask_empty(to_cpumask(f->flush_cpumask))) + cpu_relax(); + + f->flush_mm = NULL; + f->flush_va = 0; + spin_unlock(&f->tlbstate_lock); +} + +void native_flush_tlb_others(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va) +{ + if (is_uv_system()) { + unsigned int cpu; + + cpu = get_cpu(); + cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); + if (cpumask) + flush_tlb_others_ipi(cpumask, mm, va); + put_cpu(); + return; + } + flush_tlb_others_ipi(cpumask, mm, va); +} + +static int __cpuinit init_smp_flush(void) +{ + int i; + + for_each_possible_cpu(i) + spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); + + return 0; +} +core_initcall(init_smp_flush); + +void flush_tlb_current_task(void) +{ + struct mm_struct *mm = current->mm; + + preempt_disable(); + + local_flush_tlb(); + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); + preempt_enable(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + preempt_disable(); + + if (current->active_mm == mm) { + if (current->mm) + local_flush_tlb(); + else + leave_mm(smp_processor_id()); + } + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); + + preempt_enable(); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + + if (current->active_mm == mm) { + if (current->mm) + __flush_tlb_one(va); + else + leave_mm(smp_processor_id()); + } + + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, va); + + preempt_enable(); +} + +static void do_flush_tlb_all(void *info) +{ + unsigned long cpu = smp_processor_id(); + + __flush_tlb_all(); + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) + leave_mm(cpu); +} + +void flush_tlb_all(void) +{ + on_each_cpu(do_flush_tlb_all, NULL, 1); +} diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c deleted file mode 100644 index b3ca1b94065..00000000000 --- a/arch/x86/kernel/tlb_64.c +++ /dev/null @@ -1,296 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) - = { &init_mm, 0, }; - -#include -/* - * Smarter SMP flushing macros. - * c/o Linus Torvalds. - * - * These mean you can really definitely utterly forget about - * writing to user space from interrupts. (Its not allowed anyway). - * - * Optimizations Manfred Spraul - * - * More scalable flush, from Andi Kleen - * - * To avoid global state use 8 different call vectors. - * Each CPU uses a specific vector to trigger flushes on other - * CPUs. Depending on the received vector the target CPUs look into - * the right per cpu variable for the flush data. - * - * With more than 8 CPUs they are hashed to the 8 available - * vectors. The limited global vector space forces us to this right now. - * In future when interrupts are split into per CPU domains this could be - * fixed, at the cost of triggering multiple IPIs in some cases. - */ - -union smp_flush_state { - struct { - struct mm_struct *flush_mm; - unsigned long flush_va; - spinlock_t tlbstate_lock; - DECLARE_BITMAP(flush_cpumask, NR_CPUS); - }; - char pad[SMP_CACHE_BYTES]; -} ____cacheline_aligned; - -/* State is put into the per CPU data section, but padded - to a full cache line because other CPUs can access it and we don't - want false sharing in the per cpu data segment. */ -static DEFINE_PER_CPU(union smp_flush_state, flush_state); - -/* - * We cannot call mmdrop() because we are in interrupt context, - * instead update mm->cpu_vm_mask. - */ -void leave_mm(int cpu) -{ - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) - BUG(); - cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); - load_cr3(swapper_pg_dir); -} -EXPORT_SYMBOL_GPL(leave_mm); - -/* - * - * The flush IPI assumes that a thread switch happens in this order: - * [cpu0: the cpu that switches] - * 1) switch_mm() either 1a) or 1b) - * 1a) thread switch to a different mm - * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); - * Stop ipi delivery for the old mm. This is not synchronized with - * the other cpus, but smp_invalidate_interrupt ignore flush ipis - * for the wrong mm, and in the worst case we perform a superfluous - * tlb flush. - * 1a2) set cpu mmu_state to TLBSTATE_OK - * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 - * was in lazy tlb mode. - * 1a3) update cpu active_mm - * Now cpu0 accepts tlb flushes for the new mm. - * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); - * Now the other cpus will send tlb flush ipis. - * 1a4) change cr3. - * 1b) thread switch without mm change - * cpu active_mm is correct, cpu0 already handles - * flush ipis. - * 1b1) set cpu mmu_state to TLBSTATE_OK - * 1b2) test_and_set the cpu bit in cpu_vm_mask. - * Atomically set the bit [other cpus will start sending flush ipis], - * and test the bit. - * 1b3) if the bit was 0: leave_mm was called, flush the tlb. - * 2) switch %%esp, ie current - * - * The interrupt must handle 2 special cases: - * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. - * - the cpu performs speculative tlb reads, i.e. even if the cpu only - * runs in kernel space, the cpu could load tlb entries for user space - * pages. - * - * The good news is that cpu mmu_state is local to each cpu, no - * write/read ordering problems. - */ - -/* - * TLB flush IPI: - * - * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. - * 2) Leave the mm if we are in the lazy tlb mode. - * - * Interrupts are disabled. - */ - -/* - * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop - * but still used for documentation purpose but the usage is slightly - * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt - * entry calls in with the first parameter in %eax. Maybe define - * intrlinkage? - */ -#ifdef CONFIG_X86_64 -asmlinkage -#endif -void smp_invalidate_interrupt(struct pt_regs *regs) -{ - unsigned int cpu; - unsigned int sender; - union smp_flush_state *f; - - cpu = smp_processor_id(); - /* - * orig_rax contains the negated interrupt vector. - * Use that to determine where the sender put the data. - */ - sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; - f = &per_cpu(flush_state, sender); - - if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) - goto out; - /* - * This was a BUG() but until someone can quote me the - * line from the intel manual that guarantees an IPI to - * multiple CPUs is retried _only_ on the erroring CPUs - * its staying as a return - * - * BUG(); - */ - - if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { - if (f->flush_va == TLB_FLUSH_ALL) - local_flush_tlb(); - else - __flush_tlb_one(f->flush_va); - } else - leave_mm(cpu); - } -out: - ack_APIC_irq(); - smp_mb__before_clear_bit(); - cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); - smp_mb__after_clear_bit(); - inc_irq_stat(irq_tlb_count); -} - -static void flush_tlb_others_ipi(const struct cpumask *cpumask, - struct mm_struct *mm, unsigned long va) -{ - unsigned int sender; - union smp_flush_state *f; - - /* Caller has disabled preemption */ - sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; - f = &per_cpu(flush_state, sender); - - /* - * Could avoid this lock when - * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is - * probably not worth checking this for a cache-hot lock. - */ - spin_lock(&f->tlbstate_lock); - - f->flush_mm = mm; - f->flush_va = va; - cpumask_andnot(to_cpumask(f->flush_cpumask), - cpumask, cpumask_of(smp_processor_id())); - - /* - * Make the above memory operations globally visible before - * sending the IPI. - */ - smp_mb(); - /* - * We have to send the IPI only to - * CPUs affected. - */ - send_IPI_mask(to_cpumask(f->flush_cpumask), - INVALIDATE_TLB_VECTOR_START + sender); - - while (!cpumask_empty(to_cpumask(f->flush_cpumask))) - cpu_relax(); - - f->flush_mm = NULL; - f->flush_va = 0; - spin_unlock(&f->tlbstate_lock); -} - -void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, unsigned long va) -{ - if (is_uv_system()) { - unsigned int cpu; - - cpu = get_cpu(); - cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); - if (cpumask) - flush_tlb_others_ipi(cpumask, mm, va); - put_cpu(); - return; - } - flush_tlb_others_ipi(cpumask, mm, va); -} - -static int __cpuinit init_smp_flush(void) -{ - int i; - - for_each_possible_cpu(i) - spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); - - return 0; -} -core_initcall(init_smp_flush); - -void flush_tlb_current_task(void) -{ - struct mm_struct *mm = current->mm; - - preempt_disable(); - - local_flush_tlb(); - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); - preempt_enable(); -} - -void flush_tlb_mm(struct mm_struct *mm) -{ - preempt_disable(); - - if (current->active_mm == mm) { - if (current->mm) - local_flush_tlb(); - else - leave_mm(smp_processor_id()); - } - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); - - preempt_enable(); -} - -void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) -{ - struct mm_struct *mm = vma->vm_mm; - - preempt_disable(); - - if (current->active_mm == mm) { - if (current->mm) - __flush_tlb_one(va); - else - leave_mm(smp_processor_id()); - } - - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, va); - - preempt_enable(); -} - -static void do_flush_tlb_all(void *info) -{ - unsigned long cpu = smp_processor_id(); - - __flush_tlb_all(); - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) - leave_mm(cpu); -} - -void flush_tlb_all(void) -{ - on_each_cpu(do_flush_tlb_all, NULL, 1); -} -- cgit v1.2.3 From 55f4949f5765e7a29863b6d17a774601810732f5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 21 Jan 2009 10:08:53 +0100 Subject: x86, mm: move tlb.c to arch/x86/mm/ Impact: cleanup Now that it's unified, move the (SMP) TLB flushing code from arch/x86/kernel/ to arch/x86/mm/, where it belongs logically. Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/tlb.c | 296 ----------------------------------------------- arch/x86/mm/Makefile | 2 + arch/x86/mm/tlb.c | 296 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 299 insertions(+), 297 deletions(-) delete mode 100644 arch/x86/kernel/tlb.c create mode 100644 arch/x86/mm/tlb.c (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0626a88fbb4..0b3272f58bd 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -58,7 +58,7 @@ obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp.o -obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb.o +obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o obj-$(CONFIG_X86_32_SMP) += smpcommon.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/tlb.c b/arch/x86/kernel/tlb.c deleted file mode 100644 index b3ca1b94065..00000000000 --- a/arch/x86/kernel/tlb.c +++ /dev/null @@ -1,296 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) - = { &init_mm, 0, }; - -#include -/* - * Smarter SMP flushing macros. - * c/o Linus Torvalds. - * - * These mean you can really definitely utterly forget about - * writing to user space from interrupts. (Its not allowed anyway). - * - * Optimizations Manfred Spraul - * - * More scalable flush, from Andi Kleen - * - * To avoid global state use 8 different call vectors. - * Each CPU uses a specific vector to trigger flushes on other - * CPUs. Depending on the received vector the target CPUs look into - * the right per cpu variable for the flush data. - * - * With more than 8 CPUs they are hashed to the 8 available - * vectors. The limited global vector space forces us to this right now. - * In future when interrupts are split into per CPU domains this could be - * fixed, at the cost of triggering multiple IPIs in some cases. - */ - -union smp_flush_state { - struct { - struct mm_struct *flush_mm; - unsigned long flush_va; - spinlock_t tlbstate_lock; - DECLARE_BITMAP(flush_cpumask, NR_CPUS); - }; - char pad[SMP_CACHE_BYTES]; -} ____cacheline_aligned; - -/* State is put into the per CPU data section, but padded - to a full cache line because other CPUs can access it and we don't - want false sharing in the per cpu data segment. */ -static DEFINE_PER_CPU(union smp_flush_state, flush_state); - -/* - * We cannot call mmdrop() because we are in interrupt context, - * instead update mm->cpu_vm_mask. - */ -void leave_mm(int cpu) -{ - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) - BUG(); - cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); - load_cr3(swapper_pg_dir); -} -EXPORT_SYMBOL_GPL(leave_mm); - -/* - * - * The flush IPI assumes that a thread switch happens in this order: - * [cpu0: the cpu that switches] - * 1) switch_mm() either 1a) or 1b) - * 1a) thread switch to a different mm - * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); - * Stop ipi delivery for the old mm. This is not synchronized with - * the other cpus, but smp_invalidate_interrupt ignore flush ipis - * for the wrong mm, and in the worst case we perform a superfluous - * tlb flush. - * 1a2) set cpu mmu_state to TLBSTATE_OK - * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 - * was in lazy tlb mode. - * 1a3) update cpu active_mm - * Now cpu0 accepts tlb flushes for the new mm. - * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); - * Now the other cpus will send tlb flush ipis. - * 1a4) change cr3. - * 1b) thread switch without mm change - * cpu active_mm is correct, cpu0 already handles - * flush ipis. - * 1b1) set cpu mmu_state to TLBSTATE_OK - * 1b2) test_and_set the cpu bit in cpu_vm_mask. - * Atomically set the bit [other cpus will start sending flush ipis], - * and test the bit. - * 1b3) if the bit was 0: leave_mm was called, flush the tlb. - * 2) switch %%esp, ie current - * - * The interrupt must handle 2 special cases: - * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. - * - the cpu performs speculative tlb reads, i.e. even if the cpu only - * runs in kernel space, the cpu could load tlb entries for user space - * pages. - * - * The good news is that cpu mmu_state is local to each cpu, no - * write/read ordering problems. - */ - -/* - * TLB flush IPI: - * - * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. - * 2) Leave the mm if we are in the lazy tlb mode. - * - * Interrupts are disabled. - */ - -/* - * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop - * but still used for documentation purpose but the usage is slightly - * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt - * entry calls in with the first parameter in %eax. Maybe define - * intrlinkage? - */ -#ifdef CONFIG_X86_64 -asmlinkage -#endif -void smp_invalidate_interrupt(struct pt_regs *regs) -{ - unsigned int cpu; - unsigned int sender; - union smp_flush_state *f; - - cpu = smp_processor_id(); - /* - * orig_rax contains the negated interrupt vector. - * Use that to determine where the sender put the data. - */ - sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; - f = &per_cpu(flush_state, sender); - - if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) - goto out; - /* - * This was a BUG() but until someone can quote me the - * line from the intel manual that guarantees an IPI to - * multiple CPUs is retried _only_ on the erroring CPUs - * its staying as a return - * - * BUG(); - */ - - if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { - if (f->flush_va == TLB_FLUSH_ALL) - local_flush_tlb(); - else - __flush_tlb_one(f->flush_va); - } else - leave_mm(cpu); - } -out: - ack_APIC_irq(); - smp_mb__before_clear_bit(); - cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); - smp_mb__after_clear_bit(); - inc_irq_stat(irq_tlb_count); -} - -static void flush_tlb_others_ipi(const struct cpumask *cpumask, - struct mm_struct *mm, unsigned long va) -{ - unsigned int sender; - union smp_flush_state *f; - - /* Caller has disabled preemption */ - sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; - f = &per_cpu(flush_state, sender); - - /* - * Could avoid this lock when - * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is - * probably not worth checking this for a cache-hot lock. - */ - spin_lock(&f->tlbstate_lock); - - f->flush_mm = mm; - f->flush_va = va; - cpumask_andnot(to_cpumask(f->flush_cpumask), - cpumask, cpumask_of(smp_processor_id())); - - /* - * Make the above memory operations globally visible before - * sending the IPI. - */ - smp_mb(); - /* - * We have to send the IPI only to - * CPUs affected. - */ - send_IPI_mask(to_cpumask(f->flush_cpumask), - INVALIDATE_TLB_VECTOR_START + sender); - - while (!cpumask_empty(to_cpumask(f->flush_cpumask))) - cpu_relax(); - - f->flush_mm = NULL; - f->flush_va = 0; - spin_unlock(&f->tlbstate_lock); -} - -void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, unsigned long va) -{ - if (is_uv_system()) { - unsigned int cpu; - - cpu = get_cpu(); - cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); - if (cpumask) - flush_tlb_others_ipi(cpumask, mm, va); - put_cpu(); - return; - } - flush_tlb_others_ipi(cpumask, mm, va); -} - -static int __cpuinit init_smp_flush(void) -{ - int i; - - for_each_possible_cpu(i) - spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); - - return 0; -} -core_initcall(init_smp_flush); - -void flush_tlb_current_task(void) -{ - struct mm_struct *mm = current->mm; - - preempt_disable(); - - local_flush_tlb(); - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); - preempt_enable(); -} - -void flush_tlb_mm(struct mm_struct *mm) -{ - preempt_disable(); - - if (current->active_mm == mm) { - if (current->mm) - local_flush_tlb(); - else - leave_mm(smp_processor_id()); - } - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); - - preempt_enable(); -} - -void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) -{ - struct mm_struct *mm = vma->vm_mm; - - preempt_disable(); - - if (current->active_mm == mm) { - if (current->mm) - __flush_tlb_one(va); - else - leave_mm(smp_processor_id()); - } - - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(&mm->cpu_vm_mask, mm, va); - - preempt_enable(); -} - -static void do_flush_tlb_all(void *info) -{ - unsigned long cpu = smp_processor_id(); - - __flush_tlb_all(); - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) - leave_mm(cpu); -} - -void flush_tlb_all(void) -{ - on_each_cpu(do_flush_tlb_all, NULL, 1); -} diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index d8cc96a2738..9f05157220f 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,6 +1,8 @@ obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ pat.o pgtable.o gup.o +obj-$(CONFIG_X86_SMP) += tlb.o + obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c new file mode 100644 index 00000000000..b3ca1b94065 --- /dev/null +++ b/arch/x86/mm/tlb.c @@ -0,0 +1,296 @@ +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) + = { &init_mm, 0, }; + +#include +/* + * Smarter SMP flushing macros. + * c/o Linus Torvalds. + * + * These mean you can really definitely utterly forget about + * writing to user space from interrupts. (Its not allowed anyway). + * + * Optimizations Manfred Spraul + * + * More scalable flush, from Andi Kleen + * + * To avoid global state use 8 different call vectors. + * Each CPU uses a specific vector to trigger flushes on other + * CPUs. Depending on the received vector the target CPUs look into + * the right per cpu variable for the flush data. + * + * With more than 8 CPUs they are hashed to the 8 available + * vectors. The limited global vector space forces us to this right now. + * In future when interrupts are split into per CPU domains this could be + * fixed, at the cost of triggering multiple IPIs in some cases. + */ + +union smp_flush_state { + struct { + struct mm_struct *flush_mm; + unsigned long flush_va; + spinlock_t tlbstate_lock; + DECLARE_BITMAP(flush_cpumask, NR_CPUS); + }; + char pad[SMP_CACHE_BYTES]; +} ____cacheline_aligned; + +/* State is put into the per CPU data section, but padded + to a full cache line because other CPUs can access it and we don't + want false sharing in the per cpu data segment. */ +static DEFINE_PER_CPU(union smp_flush_state, flush_state); + +/* + * We cannot call mmdrop() because we are in interrupt context, + * instead update mm->cpu_vm_mask. + */ +void leave_mm(int cpu) +{ + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + BUG(); + cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); + load_cr3(swapper_pg_dir); +} +EXPORT_SYMBOL_GPL(leave_mm); + +/* + * + * The flush IPI assumes that a thread switch happens in this order: + * [cpu0: the cpu that switches] + * 1) switch_mm() either 1a) or 1b) + * 1a) thread switch to a different mm + * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); + * Stop ipi delivery for the old mm. This is not synchronized with + * the other cpus, but smp_invalidate_interrupt ignore flush ipis + * for the wrong mm, and in the worst case we perform a superfluous + * tlb flush. + * 1a2) set cpu mmu_state to TLBSTATE_OK + * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 + * was in lazy tlb mode. + * 1a3) update cpu active_mm + * Now cpu0 accepts tlb flushes for the new mm. + * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); + * Now the other cpus will send tlb flush ipis. + * 1a4) change cr3. + * 1b) thread switch without mm change + * cpu active_mm is correct, cpu0 already handles + * flush ipis. + * 1b1) set cpu mmu_state to TLBSTATE_OK + * 1b2) test_and_set the cpu bit in cpu_vm_mask. + * Atomically set the bit [other cpus will start sending flush ipis], + * and test the bit. + * 1b3) if the bit was 0: leave_mm was called, flush the tlb. + * 2) switch %%esp, ie current + * + * The interrupt must handle 2 special cases: + * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. + * - the cpu performs speculative tlb reads, i.e. even if the cpu only + * runs in kernel space, the cpu could load tlb entries for user space + * pages. + * + * The good news is that cpu mmu_state is local to each cpu, no + * write/read ordering problems. + */ + +/* + * TLB flush IPI: + * + * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. + * 2) Leave the mm if we are in the lazy tlb mode. + * + * Interrupts are disabled. + */ + +/* + * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop + * but still used for documentation purpose but the usage is slightly + * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt + * entry calls in with the first parameter in %eax. Maybe define + * intrlinkage? + */ +#ifdef CONFIG_X86_64 +asmlinkage +#endif +void smp_invalidate_interrupt(struct pt_regs *regs) +{ + unsigned int cpu; + unsigned int sender; + union smp_flush_state *f; + + cpu = smp_processor_id(); + /* + * orig_rax contains the negated interrupt vector. + * Use that to determine where the sender put the data. + */ + sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; + f = &per_cpu(flush_state, sender); + + if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) + goto out; + /* + * This was a BUG() but until someone can quote me the + * line from the intel manual that guarantees an IPI to + * multiple CPUs is retried _only_ on the erroring CPUs + * its staying as a return + * + * BUG(); + */ + + if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { + if (f->flush_va == TLB_FLUSH_ALL) + local_flush_tlb(); + else + __flush_tlb_one(f->flush_va); + } else + leave_mm(cpu); + } +out: + ack_APIC_irq(); + smp_mb__before_clear_bit(); + cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); + smp_mb__after_clear_bit(); + inc_irq_stat(irq_tlb_count); +} + +static void flush_tlb_others_ipi(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va) +{ + unsigned int sender; + union smp_flush_state *f; + + /* Caller has disabled preemption */ + sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; + f = &per_cpu(flush_state, sender); + + /* + * Could avoid this lock when + * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is + * probably not worth checking this for a cache-hot lock. + */ + spin_lock(&f->tlbstate_lock); + + f->flush_mm = mm; + f->flush_va = va; + cpumask_andnot(to_cpumask(f->flush_cpumask), + cpumask, cpumask_of(smp_processor_id())); + + /* + * Make the above memory operations globally visible before + * sending the IPI. + */ + smp_mb(); + /* + * We have to send the IPI only to + * CPUs affected. + */ + send_IPI_mask(to_cpumask(f->flush_cpumask), + INVALIDATE_TLB_VECTOR_START + sender); + + while (!cpumask_empty(to_cpumask(f->flush_cpumask))) + cpu_relax(); + + f->flush_mm = NULL; + f->flush_va = 0; + spin_unlock(&f->tlbstate_lock); +} + +void native_flush_tlb_others(const struct cpumask *cpumask, + struct mm_struct *mm, unsigned long va) +{ + if (is_uv_system()) { + unsigned int cpu; + + cpu = get_cpu(); + cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); + if (cpumask) + flush_tlb_others_ipi(cpumask, mm, va); + put_cpu(); + return; + } + flush_tlb_others_ipi(cpumask, mm, va); +} + +static int __cpuinit init_smp_flush(void) +{ + int i; + + for_each_possible_cpu(i) + spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); + + return 0; +} +core_initcall(init_smp_flush); + +void flush_tlb_current_task(void) +{ + struct mm_struct *mm = current->mm; + + preempt_disable(); + + local_flush_tlb(); + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); + preempt_enable(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + preempt_disable(); + + if (current->active_mm == mm) { + if (current->mm) + local_flush_tlb(); + else + leave_mm(smp_processor_id()); + } + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); + + preempt_enable(); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + + if (current->active_mm == mm) { + if (current->mm) + __flush_tlb_one(va); + else + leave_mm(smp_processor_id()); + } + + if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(&mm->cpu_vm_mask, mm, va); + + preempt_enable(); +} + +static void do_flush_tlb_all(void *info) +{ + unsigned long cpu = smp_processor_id(); + + __flush_tlb_all(); + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) + leave_mm(cpu); +} + +void flush_tlb_all(void) +{ + on_each_cpu(do_flush_tlb_all, NULL, 1); +} -- cgit v1.2.3 From 4ec71fa2d2c3f1040348f2604f4b8ccc833d1c2e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 21 Jan 2009 10:24:27 +0100 Subject: x86: uv cleanup, build fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix: arch/x86/mm/srat_64.c: In function ‘acpi_numa_processor_affinity_init’: arch/x86/mm/srat_64.c:141: error: implicit declaration of function ‘get_uv_system_type’ arch/x86/mm/srat_64.c:141: error: ‘UV_X2APIC’ undeclared (first use in this function) arch/x86/mm/srat_64.c:141: error: (Each undeclared identifier is reported only once arch/x86/mm/srat_64.c:141: error: for each function it appears in.) A couple of UV definitions were moved to asm/uv/uv.h, but srat_64.c did not include that header. Add it. Signed-off-by: Ingo Molnar --- arch/x86/mm/srat_64.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 09737c8af07..15df1baee10 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -21,6 +21,7 @@ #include #include #include +#include int acpi_numa __initdata; -- cgit v1.2.3 From ace6c6c840878342f698f0da6588dd5ded755369 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 21 Jan 2009 10:32:44 +0100 Subject: x86: make x86_32 use tlb_64.c, build fix, clean up X86_L1_CACHE_BYTES MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix: arch/x86/mm/tlb.c:47: error: ‘CONFIG_X86_INTERNODE_CACHE_BYTES’ undeclared here (not in a function) The CONFIG_X86_INTERNODE_CACHE_BYTES symbol is only defined on 64-bit, because vsmp support is 64-bit only. Define it on 32-bit too - where it will always be equal to X86_L1_CACHE_BYTES. Also move the default of X86_L1_CACHE_BYTES (which is separate from the more commonly used L1_CACHE_SHIFT kconfig symbol) from 128 bytes to 64 bytes. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.cpu | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index cdf4a962323..8eb50ba9161 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -292,15 +292,13 @@ config X86_CPU # Define implied options from the CPU selection here config X86_L1_CACHE_BYTES int - default "128" if GENERIC_CPU || MPSC - default "64" if MK8 || MCORE2 - depends on X86_64 + default "128" if MPSC + default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32 config X86_INTERNODE_CACHE_BYTES int default "4096" if X86_VSMP default X86_L1_CACHE_BYTES if !X86_VSMP - depends on X86_64 config X86_CMPXCHG def_bool X86_64 || (X86_32 && !M386) -- cgit v1.2.3 From 4d5d783896fc8c37be88ee5837ca9b3c13fcd55b Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Mon, 19 Jan 2009 16:34:26 -0800 Subject: x86: uaccess: fix style problems Impact: cleanup Fix coding style problems in arch/x86/include/asm/uaccess.h. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 4340055b755..aeb3c1b074c 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -121,7 +121,7 @@ extern int __get_user_bad(void); #define __get_user_x(size, ret, x, ptr) \ asm volatile("call __get_user_" #size \ - : "=a" (ret),"=d" (x) \ + : "=a" (ret), "=d" (x) \ : "0" (ptr)) \ /* Careful: we have to cast the result to the type of the pointer @@ -181,7 +181,7 @@ extern int __get_user_bad(void); #define __put_user_x(size, x, ptr, __ret_pu) \ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ - :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") + : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") @@ -276,7 +276,7 @@ do { \ __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ break; \ case 4: \ - __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ + __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ break; \ case 8: \ __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ -- cgit v1.2.3 From cc86c9e0dc1a41451240b948bb39d46bb2536ae8 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Mon, 19 Jan 2009 16:37:41 -0800 Subject: x86: uaccess: rename __put_user_u64() to __put_user_asm_u64() Impact: cleanup rename __put_user_u64() to __put_user_asm_u64() like __get_user_asm_u64(). Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index aeb3c1b074c..69d2757cca9 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -186,7 +186,7 @@ extern int __get_user_bad(void); #ifdef CONFIG_X86_32 -#define __put_user_u64(x, addr, err) \ +#define __put_user_asm_u64(x, addr, err) \ asm volatile("1: movl %%eax,0(%2)\n" \ "2: movl %%edx,4(%2)\n" \ "3:\n" \ @@ -203,7 +203,7 @@ extern int __get_user_bad(void); asm volatile("call __put_user_8" : "=a" (__ret_pu) \ : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #else -#define __put_user_u64(x, ptr, retval) \ +#define __put_user_asm_u64(x, ptr, retval) \ __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) #endif @@ -279,7 +279,7 @@ do { \ __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ break; \ case 8: \ - __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ + __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval); \ break; \ default: \ __put_user_bad(); \ -- cgit v1.2.3 From 03b486322e994dde49e67aedb391867b7cf28822 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 20 Jan 2009 04:36:04 +0100 Subject: x86: make UV support configurable Make X86 SGI Ultraviolet support configurable. Saves about 13K of text size on my modest config. text data bss dec hex filename 6770537 1158680 694356 8623573 8395d5 vmlinux 6757492 1157664 694228 8609384 835e68 vmlinux.nouv Signed-off-by: Nick Piggin Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 7 +++++++ arch/x86/include/asm/uv/uv.h | 6 +++--- arch/x86/kernel/Makefile | 5 +++-- arch/x86/kernel/efi.c | 2 ++ arch/x86/kernel/entry_64.S | 2 ++ arch/x86/kernel/genapic_64.c | 2 ++ arch/x86/kernel/io_apic.c | 2 +- 7 files changed, 20 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ef27aed6ff7..5a29b792cb8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -391,6 +391,13 @@ config X86_RDC321X as R-8610-(G). If you don't have one of these chips, you should say N here. +config X86_UV + bool "SGI Ultraviolet" + depends on X86_64 + help + This option is needed in order to support SGI Ultraviolet systems. + If you don't have one of these, you should say N here. + config SCHED_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index dce5fe35013..8ac1d7e312f 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h @@ -3,7 +3,7 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_UV extern enum uv_system_type get_uv_system_type(void); extern int is_uv_system(void); @@ -15,7 +15,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, unsigned long va, unsigned int cpu); -#else /* X86_64 */ +#else /* X86_UV */ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } static inline int is_uv_system(void) { return 0; } @@ -28,6 +28,6 @@ uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va, unsigned int cpu) { return cpumask; } -#endif /* X86_64 */ +#endif /* X86_UV */ #endif /* _ASM_X86_UV_UV_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0b3272f58bd..a99437c965c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -115,10 +115,11 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) - obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o - obj-y += bios_uv.o uv_irq.o uv_sysfs.o + obj-y += genapic_64.o genapic_flat_64.o obj-y += genx2apic_cluster.o obj-y += genx2apic_phys.o + obj-$(CONFIG_X86_UV) += genx2apic_uv_x.o tlb_uv.o + obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o obj-$(CONFIG_AUDIT) += audit_64.o diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 1119d247fe1..b205272ad39 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c @@ -366,10 +366,12 @@ void __init efi_init(void) SMBIOS_TABLE_GUID)) { efi.smbios = config_tables[i].table; printk(" SMBIOS=0x%lx ", config_tables[i].table); +#ifdef CONFIG_X86_UV } else if (!efi_guidcmp(config_tables[i].guid, UV_SYSTEM_TABLE_GUID)) { efi.uv_systab = config_tables[i].table; printk(" UVsystab=0x%lx ", config_tables[i].table); +#endif } else if (!efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID)) { efi.hcdp = config_tables[i].table; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c52b6091916..a52703864a1 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -982,8 +982,10 @@ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt #endif +#ifdef CONFIG_X86_UV apicinterrupt UV_BAU_MESSAGE \ uv_bau_message_intr1 uv_bau_message_interrupt +#endif apicinterrupt LOCAL_TIMER_VECTOR \ apic_timer_interrupt smp_apic_timer_interrupt diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 2bced78b0b8..e656c272115 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -32,7 +32,9 @@ extern struct genapic apic_x2apic_cluster; struct genapic __read_mostly *genapic = &apic_flat; static struct genapic *apic_probe[] __initdata = { +#ifdef CONFIG_X86_UV &apic_x2apic_uv_x, +#endif &apic_x2apic_phys, &apic_x2apic_cluster, &apic_physflat, diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index f7966039072..e4d36bd56b6 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3765,7 +3765,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) } #endif /* CONFIG_HT_IRQ */ -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_UV /* * Re-target the irq to the specified CPU and enable the specified MMR located * on the specified blade to allow the sending of MSIs to the specified CPU. -- cgit v1.2.3 From fb746d0e1365b7472ccc4c3d5b0672b34a092d0b Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 21 Jan 2009 20:45:41 +0100 Subject: x86: optimise page fault entry, cleanup tsk is already assigned to current, drop the redundant second assignment. Signed-off-by: Johannes Weiner Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 033292dc9e2..8f4b859a04b 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -419,7 +419,6 @@ static noinline void pgtable_bad(struct pt_regs *regs, printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", tsk->comm, address); dump_pagetable(address); - tsk = current; tsk->thread.cr2 = address; tsk->thread.trap_no = 14; tsk->thread.error_code = error_code; -- cgit v1.2.3 From 623d3f0c619f3576dae69709ca8aa93ac76d8c63 Mon Sep 17 00:00:00 2001 From: David Miller Date: Wed, 21 Jan 2009 17:24:51 -0800 Subject: sparc64: Fix build by including linux/irq.h into time_64.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changeset d7e51e66899f95dabc89b4d4c6674a6e50fa37fc ("sparseirq: make some func to be used with genirq") broke the build on sparc64: arch/sparc/kernel/time_64.c: In function ‘timer_interrupt’: arch/sparc/kernel/time_64.c:732: error: implicit declaration of function ‘kstat_incr_irqs_this_cpu’ make[1]: *** [arch/sparc/kernel/time_64.o] Error 1 Signed-off-by: David S. Miller Signed-off-by: Ingo Molnar --- arch/sparc/kernel/time_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 54405d36214..a55279939b6 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -36,10 +36,10 @@ #include #include #include +#include #include #include -#include #include #include #include -- cgit v1.2.3 From e81838d2555e77c893f720c25bfb0c0e5782ef57 Mon Sep 17 00:00:00 2001 From: David Miller Date: Wed, 21 Jan 2009 17:15:53 -0800 Subject: sparc64: Fix build by using kstat_irqs_cpu() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changeset d7e51e66899f95dabc89b4d4c6674a6e50fa37fc ("sparseirq: make some func to be used with genirq") broke the build on sparc64: arch/sparc/kernel/irq_64.c: In function ‘show_interrupts’: arch/sparc/kernel/irq_64.c:188: error: ‘struct kernel_stat’ has no member named ‘irqs’ make[1]: *** [arch/sparc/kernel/irq_64.o] Error 1 Fix by using the kstat_irqs_cpu() interface. Signed-off-by: David S. Miller Signed-off-by: Ingo Molnar --- arch/sparc/kernel/irq_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index cab8e028687..2e98bef5078 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -185,7 +185,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %9s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); -- cgit v1.2.3 From d639bab8da86d330493487e8c0fea8ca31f53427 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Fri, 9 Jan 2009 16:13:13 -0800 Subject: x86 PAT: ioremap_wc should take resource_size_t parameter Impact: fix/extend ioremap_wc() beyond 4GB aperture on 32-bit ioremap_wc() was taking in unsigned long parameter, where as it should take 64-bit resource_size_t parameter like other ioremap variants. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io.h | 2 +- arch/x86/mm/ioremap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 05cfed4485f..bdbb4b96160 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -91,7 +91,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, unsigned long prot_val); -extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); +extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); /* * early_ioremap() and early_iounmap() are for temporary early boot-time diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index bd85d42819e..2ddb1e79a19 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -367,7 +367,7 @@ EXPORT_SYMBOL(ioremap_nocache); * * Must be freed with iounmap. */ -void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) +void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) { if (pat_enabled) return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, -- cgit v1.2.3 From ab897d2013128f470240a541b31cf5e636984e71 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 22 Jan 2009 14:24:16 -0800 Subject: x86/pvops: remove pte_flags pvop pte_flags() was introduced as a new pvop in order to extract just the flags portion of a pte, which is a potentially cheaper operation than extracting the page number as well. It turns out this operation is not needed, because simply using a mask to extract the flags from a pte is sufficient for all current users. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page.h | 3 +-- arch/x86/include/asm/paravirt.h | 18 ------------------ arch/x86/kernel/paravirt.c | 1 - arch/x86/xen/enlighten.c | 1 - 4 files changed, 1 insertion(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index e9873a2e869..6b9810859da 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -147,7 +147,7 @@ static inline pteval_t native_pte_val(pte_t pte) return pte.pte; } -static inline pteval_t native_pte_flags(pte_t pte) +static inline pteval_t pte_flags(pte_t pte) { return native_pte_val(pte) & PTE_FLAGS_MASK; } @@ -173,7 +173,6 @@ static inline pteval_t native_pte_flags(pte_t pte) #endif #define pte_val(x) native_pte_val(x) -#define pte_flags(x) native_pte_flags(x) #define __pte(x) native_make_pte(x) #endif /* CONFIG_PARAVIRT */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ba3e2ff6aed..e25c410f3d8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -279,7 +279,6 @@ struct pv_mmu_ops { pte_t *ptep, pte_t pte); pteval_t (*pte_val)(pte_t); - pteval_t (*pte_flags)(pte_t); pte_t (*make_pte)(pteval_t pte); pgdval_t (*pgd_val)(pgd_t); @@ -1084,23 +1083,6 @@ static inline pteval_t pte_val(pte_t pte) return ret; } -static inline pteval_t pte_flags(pte_t pte) -{ - pteval_t ret; - - if (sizeof(pteval_t) > sizeof(long)) - ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags, - pte.pte, (u64)pte.pte >> 32); - else - ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, - pte.pte); - -#ifdef CONFIG_PARAVIRT_DEBUG - BUG_ON(ret & PTE_PFN_MASK); -#endif - return ret; -} - static inline pgd_t __pgd(pgdval_t val) { pgdval_t ret; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e4c8fb60887..202514be592 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -435,7 +435,6 @@ struct pv_mmu_ops pv_mmu_ops = { #endif /* PAGETABLE_LEVELS >= 3 */ .pte_val = native_pte_val, - .pte_flags = native_pte_flags, .pgd_val = native_pgd_val, .make_pte = native_make_pte, diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bea215230b2..6f1bb71aa13 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1314,7 +1314,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { .ptep_modify_prot_commit = __ptep_modify_prot_commit, .pte_val = xen_pte_val, - .pte_flags = native_pte_flags, .pgd_val = xen_pgd_val, .make_pte = xen_make_pte, -- cgit v1.2.3 From 6522869c34664dd5f05a0a327e93915b1281c90d Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 22 Jan 2009 14:24:22 -0800 Subject: x86: add pte_set_flags/clear_flags for pte flag manipulation It's not necessary to deconstruct and reconstruct a pte every time its flags are being updated. Introduce pte_set_flags and pte_clear_flags to set and clear flags in a pte. This allows the flag manipulation code to be inlined, and avoids calls via paravirt-ops. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable.h | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 06bbcbd66e9..6ceaef08486 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -240,64 +240,78 @@ static inline int pmd_large(pmd_t pte) (_PAGE_PSE | _PAGE_PRESENT); } +static inline pte_t pte_set_flags(pte_t pte, pteval_t set) +{ + pteval_t v = native_pte_val(pte); + + return native_make_pte(v | set); +} + +static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) +{ + pteval_t v = native_pte_val(pte); + + return native_make_pte(v & ~clear); +} + static inline pte_t pte_mkclean(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_DIRTY); + return pte_clear_flags(pte, _PAGE_DIRTY); } static inline pte_t pte_mkold(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_ACCESSED); + return pte_clear_flags(pte, _PAGE_ACCESSED); } static inline pte_t pte_wrprotect(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_RW); + return pte_clear_flags(pte, _PAGE_RW); } static inline pte_t pte_mkexec(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_NX); + return pte_clear_flags(pte, _PAGE_NX); } static inline pte_t pte_mkdirty(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_DIRTY); + return pte_set_flags(pte, _PAGE_DIRTY); } static inline pte_t pte_mkyoung(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_ACCESSED); + return pte_set_flags(pte, _PAGE_ACCESSED); } static inline pte_t pte_mkwrite(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_RW); + return pte_set_flags(pte, _PAGE_RW); } static inline pte_t pte_mkhuge(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_PSE); + return pte_set_flags(pte, _PAGE_PSE); } static inline pte_t pte_clrhuge(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_PSE); + return pte_clear_flags(pte, _PAGE_PSE); } static inline pte_t pte_mkglobal(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_GLOBAL); + return pte_set_flags(pte, _PAGE_GLOBAL); } static inline pte_t pte_clrglobal(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_GLOBAL); + return pte_clear_flags(pte, _PAGE_GLOBAL); } static inline pte_t pte_mkspecial(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_SPECIAL); + return pte_set_flags(pte, _PAGE_SPECIAL); } extern pteval_t __supported_pte_mask; -- cgit v1.2.3 From 03d2989df9c1c7df5b33c7a87e0790465461836a Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 23 Jan 2009 11:03:28 +0900 Subject: x86: remove idle_timestamp from 32bit irq_cpustat_t Impact: bogus irq_cpustat field removed idle_timestamp is left over from the removed irqbalance code. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/hardirq_32.h | 1 - arch/x86/kernel/process_32.c | 1 - 2 files changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h index d4b5d731073..a70ed050fde 100644 --- a/arch/x86/include/asm/hardirq_32.h +++ b/arch/x86/include/asm/hardirq_32.h @@ -6,7 +6,6 @@ typedef struct { unsigned int __softirq_pending; - unsigned long idle_timestamp; unsigned int __nmi_count; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */ unsigned int irq0_irqs; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 2c00a57ccb9..1a1ae8edc40 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -108,7 +108,6 @@ void cpu_idle(void) play_dead(); local_irq_disable(); - __get_cpu_var(irq_stat).idle_timestamp = jiffies; /* Don't trace irqs off for idle */ stop_critical_timings(); pm_idle(); -- cgit v1.2.3 From 3819cd489ec5d18a4cbd2f05acdc516473caa105 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 23 Jan 2009 11:03:29 +0900 Subject: x86: remove include of apic.h from hardirq_64.h Impact: cleanup APIC definitions aren't needed here. Remove the include and fix up the fallout. tj: added include to mce_intel_64.c. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/hardirq_64.h | 1 - arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 1 + arch/x86/kernel/efi_64.c | 1 + arch/x86/kernel/irq_64.c | 1 + 4 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h index a65bab20f6c..873c3c7bffc 100644 --- a/arch/x86/include/asm/hardirq_64.h +++ b/arch/x86/include/asm/hardirq_64.h @@ -3,7 +3,6 @@ #include #include -#include typedef struct { unsigned int __softirq_pending; diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 4b48f251fd3..5e8c79e748a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c index 652c5287215..a4ee29127fd 100644 --- a/arch/x86/kernel/efi_64.c +++ b/arch/x86/kernel/efi_64.c @@ -36,6 +36,7 @@ #include #include #include +#include static pgd_t save_pgd __initdata; static unsigned long efi_flags __initdata; diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 0b254de8408..018963aa6ee 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -18,6 +18,7 @@ #include #include #include +#include DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); -- cgit v1.2.3 From 658a9a2c34914e8114eea9c4d85d4ecd3ee33b98 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 23 Jan 2009 11:03:31 +0900 Subject: x86: sync hardirq_{32,64}.h Impact: better code generation and removal of unused field for 32bit In general, use the 64-bit version. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/hardirq_32.h | 14 ++++++++++---- arch/x86/include/asm/hardirq_64.h | 10 +++++----- 2 files changed, 15 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h index a70ed050fde..e5a332c28c9 100644 --- a/arch/x86/include/asm/hardirq_32.h +++ b/arch/x86/include/asm/hardirq_32.h @@ -14,6 +14,7 @@ typedef struct { unsigned int irq_tlb_count; unsigned int irq_thermal_count; unsigned int irq_spurious_count; + unsigned int irq_threshold_count; } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU(irq_cpustat_t, irq_stat); @@ -22,11 +23,16 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat); #define MAX_HARDIRQS_PER_CPU NR_VECTORS #define __ARCH_IRQ_STAT -#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) -#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++) +#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) -void ack_bad_irq(unsigned int irq); -#include +#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) + +#define __ARCH_SET_SOFTIRQ_PENDING + +#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) +#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) + +extern void ack_bad_irq(unsigned int irq); #endif /* _ASM_X86_HARDIRQ_32_H */ diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h index 873c3c7bffc..392e7d61457 100644 --- a/arch/x86/include/asm/hardirq_64.h +++ b/arch/x86/include/asm/hardirq_64.h @@ -22,16 +22,16 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat); /* We can have at most NR_VECTORS irqs routed to a cpu at a time */ #define MAX_HARDIRQS_PER_CPU NR_VECTORS -#define __ARCH_IRQ_STAT 1 +#define __ARCH_IRQ_STAT #define inc_irq_stat(member) percpu_add(irq_stat.member, 1) -#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) +#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) -#define __ARCH_SET_SOFTIRQ_PENDING 1 +#define __ARCH_SET_SOFTIRQ_PENDING -#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) -#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) +#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) +#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) extern void ack_bad_irq(unsigned int irq); -- cgit v1.2.3 From 22da7b3df3a2e26a87a8581575dbf26e465a6ac7 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 23 Jan 2009 11:03:31 +0900 Subject: x86: merge hardirq_{32,64}.h into hardirq.h Impact: cleanup Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/hardirq.h | 43 ++++++++++++++++++++++++++++++++++----- arch/x86/include/asm/hardirq_32.h | 38 ---------------------------------- arch/x86/include/asm/hardirq_64.h | 38 ---------------------------------- 3 files changed, 38 insertions(+), 81 deletions(-) delete mode 100644 arch/x86/include/asm/hardirq_32.h delete mode 100644 arch/x86/include/asm/hardirq_64.h (limited to 'arch') diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 000787df66e..f4a95f20f8e 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -1,11 +1,44 @@ -#ifdef CONFIG_X86_32 -# include "hardirq_32.h" -#else -# include "hardirq_64.h" -#endif +#ifndef _ASM_X86_HARDIRQ_H +#define _ASM_X86_HARDIRQ_H + +#include +#include + +typedef struct { + unsigned int __softirq_pending; + unsigned int __nmi_count; /* arch dependent */ + unsigned int apic_timer_irqs; /* arch dependent */ + unsigned int irq0_irqs; + unsigned int irq_resched_count; + unsigned int irq_call_count; + unsigned int irq_tlb_count; + unsigned int irq_thermal_count; + unsigned int irq_spurious_count; + unsigned int irq_threshold_count; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU(irq_cpustat_t, irq_stat); + +/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ +#define MAX_HARDIRQS_PER_CPU NR_VECTORS + +#define __ARCH_IRQ_STAT + +#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) + +#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) + +#define __ARCH_SET_SOFTIRQ_PENDING + +#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) +#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) + +extern void ack_bad_irq(unsigned int irq); extern u64 arch_irq_stat_cpu(unsigned int cpu); #define arch_irq_stat_cpu arch_irq_stat_cpu extern u64 arch_irq_stat(void); #define arch_irq_stat arch_irq_stat + +#endif /* _ASM_X86_HARDIRQ_H */ diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h deleted file mode 100644 index e5a332c28c9..00000000000 --- a/arch/x86/include/asm/hardirq_32.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _ASM_X86_HARDIRQ_32_H -#define _ASM_X86_HARDIRQ_32_H - -#include -#include - -typedef struct { - unsigned int __softirq_pending; - unsigned int __nmi_count; /* arch dependent */ - unsigned int apic_timer_irqs; /* arch dependent */ - unsigned int irq0_irqs; - unsigned int irq_resched_count; - unsigned int irq_call_count; - unsigned int irq_tlb_count; - unsigned int irq_thermal_count; - unsigned int irq_spurious_count; - unsigned int irq_threshold_count; -} ____cacheline_aligned irq_cpustat_t; - -DECLARE_PER_CPU(irq_cpustat_t, irq_stat); - -/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ -#define MAX_HARDIRQS_PER_CPU NR_VECTORS - -#define __ARCH_IRQ_STAT - -#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) - -#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) - -#define __ARCH_SET_SOFTIRQ_PENDING - -#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) -#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) - -extern void ack_bad_irq(unsigned int irq); - -#endif /* _ASM_X86_HARDIRQ_32_H */ diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h deleted file mode 100644 index 392e7d61457..00000000000 --- a/arch/x86/include/asm/hardirq_64.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _ASM_X86_HARDIRQ_64_H -#define _ASM_X86_HARDIRQ_64_H - -#include -#include - -typedef struct { - unsigned int __softirq_pending; - unsigned int __nmi_count; /* arch dependent */ - unsigned int apic_timer_irqs; /* arch dependent */ - unsigned int irq0_irqs; - unsigned int irq_resched_count; - unsigned int irq_call_count; - unsigned int irq_tlb_count; - unsigned int irq_thermal_count; - unsigned int irq_spurious_count; - unsigned int irq_threshold_count; -} ____cacheline_aligned irq_cpustat_t; - -DECLARE_PER_CPU(irq_cpustat_t, irq_stat); - -/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ -#define MAX_HARDIRQS_PER_CPU NR_VECTORS - -#define __ARCH_IRQ_STAT - -#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) - -#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) - -#define __ARCH_SET_SOFTIRQ_PENDING - -#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) -#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) - -extern void ack_bad_irq(unsigned int irq); - -#endif /* _ASM_X86_HARDIRQ_64_H */ -- cgit v1.2.3 From 2de3a5f7956eb81447feea3aec68193ddd8534bb Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 23 Jan 2009 11:03:32 +0900 Subject: x86: make irq_cpustat_t fields conditional Impact: shrink size of irq_cpustat_t when possible Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/hardirq.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index f4a95f20f8e..176f058e715 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -7,14 +7,22 @@ typedef struct { unsigned int __softirq_pending; unsigned int __nmi_count; /* arch dependent */ - unsigned int apic_timer_irqs; /* arch dependent */ unsigned int irq0_irqs; +#ifdef CONFIG_X86_LOCAL_APIC + unsigned int apic_timer_irqs; /* arch dependent */ + unsigned int irq_spurious_count; +#endif +#ifdef CONFIG_SMP unsigned int irq_resched_count; unsigned int irq_call_count; unsigned int irq_tlb_count; +#endif +#ifdef CONFIG_X86_MCE unsigned int irq_thermal_count; - unsigned int irq_spurious_count; +# ifdef CONFIG_X86_64 unsigned int irq_threshold_count; +# endif +#endif } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU(irq_cpustat_t, irq_stat); -- cgit v1.2.3 From 99d0000f710f3432182761f65f9658f1cf0bf455 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 23 Jan 2009 11:09:15 +0100 Subject: x86, xen: fix hardirq.h merge fallout Impact: build fix This build error: arch/x86/xen/suspend.c:22: error: implicit declaration of function 'fix_to_virt' arch/x86/xen/suspend.c:22: error: 'FIX_PARAVIRT_BOOTMAP' undeclared (first use in this function) arch/x86/xen/suspend.c:22: error: (Each undeclared identifier is reported only once arch/x86/xen/suspend.c:22: error: for each function it appears in.) triggers because the hardirq.h unification removed an implicit fixmap.h include - on which arch/x86/xen/suspend.c depended. Add the fixmap.h include explicitly. Signed-off-by: Ingo Molnar --- arch/x86/xen/suspend.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 212ffe012b7..95be7b43472 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -6,6 +6,7 @@ #include #include +#include #include "xen-ops.h" #include "mmu.h" -- cgit v1.2.3 From fe40c0af3cff3ea461cf25bddb979abc7279d4df Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 23 Jan 2009 15:49:41 -0800 Subject: x86: uaccess: introduce try and catch framework Impact: introduce new uaccess exception handling framework Introduce {get|put}_user_try and {get|put}_user_catch as new uaccess exception handling framework. {get|put}_user_try begins exception block and {get|put}_user_catch(err) ends the block and gets err if an exception occured in {get|put}_user_ex() in the block. The exception is stored thread_info->uaccess_err. The example usage of this framework is below; int func() { int err = 0; get_user_try { get_user_ex(...); get_user_ex(...); : } get_user_catch(err); return err; } Note: get_user_ex() is not clear the value when an exception occurs, it's different from the behavior of __get_user(), but I think it doesn't matter. Signed-off-by: Hiroshi Shimamoto Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/thread_info.h | 1 + arch/x86/include/asm/uaccess.h | 103 +++++++++++++++++++++++++++++++++++++ arch/x86/mm/extable.c | 6 +++ 3 files changed, 110 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 98789647baa..3f90aeb456b 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -40,6 +40,7 @@ struct thread_info { */ __u8 supervisor_stack[0]; #endif + int uaccess_err; }; #define INIT_THREAD_INFO(tsk) \ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 69d2757cca9..0ec6de4bcb0 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -199,12 +199,22 @@ extern int __get_user_bad(void); : "=r" (err) \ : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) +#define __put_user_asm_ex_u64(x, addr) \ + asm volatile("1: movl %%eax,0(%1)\n" \ + "2: movl %%edx,4(%1)\n" \ + "3:\n" \ + _ASM_EXTABLE(1b, 2b - 1b) \ + _ASM_EXTABLE(2b, 3b - 2b) \ + : : "A" (x), "r" (addr)) + #define __put_user_x8(x, ptr, __ret_pu) \ asm volatile("call __put_user_8" : "=a" (__ret_pu) \ : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #else #define __put_user_asm_u64(x, ptr, retval) \ __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) +#define __put_user_asm_ex_u64(x, addr) \ + __put_user_asm_ex(x, addr, "q", "", "Zr") #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) #endif @@ -286,6 +296,27 @@ do { \ } \ } while (0) +#define __put_user_size_ex(x, ptr, size) \ +do { \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ + break; \ + case 2: \ + __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ + break; \ + case 4: \ + __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ + break; \ + case 8: \ + __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ + break; \ + default: \ + __put_user_bad(); \ + } \ +} while (0) + #else #define __put_user_size(x, ptr, size, retval, errret) \ @@ -311,9 +342,12 @@ do { \ #ifdef CONFIG_X86_32 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() +#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() #else #define __get_user_asm_u64(x, ptr, retval, errret) \ __get_user_asm(x, ptr, retval, "q", "", "=r", errret) +#define __get_user_asm_ex_u64(x, ptr) \ + __get_user_asm_ex(x, ptr, "q", "", "=r") #endif #define __get_user_size(x, ptr, size, retval, errret) \ @@ -350,6 +384,33 @@ do { \ : "=r" (err), ltype(x) \ : "m" (__m(addr)), "i" (errret), "0" (err)) +#define __get_user_size_ex(x, ptr, size) \ +do { \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ + break; \ + case 2: \ + __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ + break; \ + case 4: \ + __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ + break; \ + case 8: \ + __get_user_asm_ex_u64(x, ptr); \ + break; \ + default: \ + (x) = __get_user_bad(); \ + } \ +} while (0) + +#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ + asm volatile("1: mov"itype" %1,%"rtype"0\n" \ + "2:\n" \ + _ASM_EXTABLE(1b, 2b - 1b) \ + : ltype(x) : "m" (__m(addr))) + #define __put_user_nocheck(x, ptr, size) \ ({ \ int __pu_err; \ @@ -385,6 +446,26 @@ struct __large_struct { unsigned long buf[100]; }; _ASM_EXTABLE(1b, 3b) \ : "=r"(err) \ : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) + +#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ + asm volatile("1: mov"itype" %"rtype"0,%1\n" \ + "2:\n" \ + _ASM_EXTABLE(1b, 2b - 1b) \ + : : ltype(x), "m" (__m(addr))) + +/* + * uaccess_try and catch + */ +#define uaccess_try do { \ + int prev_err = current_thread_info()->uaccess_err; \ + current_thread_info()->uaccess_err = 0; \ + barrier(); + +#define uaccess_catch(err) \ + (err) |= current_thread_info()->uaccess_err; \ + current_thread_info()->uaccess_err = prev_err; \ +} while (0) + /** * __get_user: - Get a simple variable from user space, with less checking. * @x: Variable to store result. @@ -408,6 +489,7 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr))) + /** * __put_user: - Write a simple value into user space, with less checking. * @x: Value to copy to user space. @@ -434,6 +516,27 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user +/* + * {get|put}_user_try and catch + * + * get_user_try { + * get_user_ex(...); + * } get_user_catch(err) + */ +#define get_user_try uaccess_try +#define get_user_catch(err) uaccess_catch(err) +#define put_user_try uaccess_try +#define put_user_catch(err) uaccess_catch(err) + +#define get_user_ex(x, ptr) do { \ + unsigned long __gue_val; \ + __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ + (x) = (__force __typeof__(*(ptr)))__gue_val; \ +} while (0) + +#define put_user_ex(x, ptr) \ + __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + /* * movsl can be slow when source and dest are not both 8-byte aligned */ diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 7e8db53528a..61b41ca3b5a 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs) fixup = search_exception_tables(regs->ip); if (fixup) { + /* If fixup is less than 16, it means uaccess error */ + if (fixup->fixup < 16) { + current_thread_info()->uaccess_err = -EFAULT; + regs->ip += fixup->fixup; + return 1; + } regs->ip = fixup->fixup; return 1; } -- cgit v1.2.3 From 98e3d45edad207b4358948d6e2cac4e482c3bb5d Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 23 Jan 2009 15:50:10 -0800 Subject: x86: signal: use {get|put}_user_try and catch Impact: use new framework Use {get|put}_user_try, catch, and _ex in arch/x86/kernel/signal.c. Note: this patch contains "WARNING: line over 80 characters", because when introducing new block I insert an indent to avoid mistakes by edit. Signed-off-by: Hiroshi Shimamoto Signed-off-by: H. Peter Anvin --- arch/x86/kernel/signal.c | 291 +++++++++++++++++++++++++---------------------- 1 file changed, 154 insertions(+), 137 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 89bb7668041..cf34eb37fbe 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -51,24 +51,24 @@ #endif #define COPY(x) { \ - err |= __get_user(regs->x, &sc->x); \ + get_user_ex(regs->x, &sc->x); \ } #define COPY_SEG(seg) { \ unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ + get_user_ex(tmp, &sc->seg); \ regs->seg = tmp; \ } #define COPY_SEG_CPL3(seg) { \ unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ + get_user_ex(tmp, &sc->seg); \ regs->seg = tmp | 3; \ } #define GET_SEG(seg) { \ unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ + get_user_ex(tmp, &sc->seg); \ loadsegment(seg, tmp); \ } @@ -83,45 +83,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; + get_user_try { + #ifdef CONFIG_X86_32 - GET_SEG(gs); - COPY_SEG(fs); - COPY_SEG(es); - COPY_SEG(ds); + GET_SEG(gs); + COPY_SEG(fs); + COPY_SEG(es); + COPY_SEG(ds); #endif /* CONFIG_X86_32 */ - COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); - COPY(dx); COPY(cx); COPY(ip); + COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); + COPY(dx); COPY(cx); COPY(ip); #ifdef CONFIG_X86_64 - COPY(r8); - COPY(r9); - COPY(r10); - COPY(r11); - COPY(r12); - COPY(r13); - COPY(r14); - COPY(r15); + COPY(r8); + COPY(r9); + COPY(r10); + COPY(r11); + COPY(r12); + COPY(r13); + COPY(r14); + COPY(r15); #endif /* CONFIG_X86_64 */ #ifdef CONFIG_X86_32 - COPY_SEG_CPL3(cs); - COPY_SEG_CPL3(ss); + COPY_SEG_CPL3(cs); + COPY_SEG_CPL3(ss); #else /* !CONFIG_X86_32 */ - /* Kernel saves and restores only the CS segment register on signals, - * which is the bare minimum needed to allow mixed 32/64-bit code. - * App's signal handler can save/restore other segments if needed. */ - COPY_SEG_CPL3(cs); + /* Kernel saves and restores only the CS segment register on signals, + * which is the bare minimum needed to allow mixed 32/64-bit code. + * App's signal handler can save/restore other segments if needed. */ + COPY_SEG_CPL3(cs); #endif /* CONFIG_X86_32 */ - err |= __get_user(tmpflags, &sc->flags); - regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); - regs->orig_ax = -1; /* disable syscall checks */ + get_user_ex(tmpflags, &sc->flags); + regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); + regs->orig_ax = -1; /* disable syscall checks */ + + get_user_ex(buf, &sc->fpstate); + err |= restore_i387_xstate(buf); - err |= __get_user(buf, &sc->fpstate); - err |= restore_i387_xstate(buf); + get_user_ex(*pax, &sc->ax); + } get_user_catch(err); - err |= __get_user(*pax, &sc->ax); return err; } @@ -131,57 +135,60 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, { int err = 0; + put_user_try { + #ifdef CONFIG_X86_32 - { - unsigned int tmp; + { + unsigned int tmp; - savesegment(gs, tmp); - err |= __put_user(tmp, (unsigned int __user *)&sc->gs); - } - err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs); - err |= __put_user(regs->es, (unsigned int __user *)&sc->es); - err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds); + savesegment(gs, tmp); + put_user_ex(tmp, (unsigned int __user *)&sc->gs); + } + put_user_ex(regs->fs, (unsigned int __user *)&sc->fs); + put_user_ex(regs->es, (unsigned int __user *)&sc->es); + put_user_ex(regs->ds, (unsigned int __user *)&sc->ds); #endif /* CONFIG_X86_32 */ - err |= __put_user(regs->di, &sc->di); - err |= __put_user(regs->si, &sc->si); - err |= __put_user(regs->bp, &sc->bp); - err |= __put_user(regs->sp, &sc->sp); - err |= __put_user(regs->bx, &sc->bx); - err |= __put_user(regs->dx, &sc->dx); - err |= __put_user(regs->cx, &sc->cx); - err |= __put_user(regs->ax, &sc->ax); + put_user_ex(regs->di, &sc->di); + put_user_ex(regs->si, &sc->si); + put_user_ex(regs->bp, &sc->bp); + put_user_ex(regs->sp, &sc->sp); + put_user_ex(regs->bx, &sc->bx); + put_user_ex(regs->dx, &sc->dx); + put_user_ex(regs->cx, &sc->cx); + put_user_ex(regs->ax, &sc->ax); #ifdef CONFIG_X86_64 - err |= __put_user(regs->r8, &sc->r8); - err |= __put_user(regs->r9, &sc->r9); - err |= __put_user(regs->r10, &sc->r10); - err |= __put_user(regs->r11, &sc->r11); - err |= __put_user(regs->r12, &sc->r12); - err |= __put_user(regs->r13, &sc->r13); - err |= __put_user(regs->r14, &sc->r14); - err |= __put_user(regs->r15, &sc->r15); + put_user_ex(regs->r8, &sc->r8); + put_user_ex(regs->r9, &sc->r9); + put_user_ex(regs->r10, &sc->r10); + put_user_ex(regs->r11, &sc->r11); + put_user_ex(regs->r12, &sc->r12); + put_user_ex(regs->r13, &sc->r13); + put_user_ex(regs->r14, &sc->r14); + put_user_ex(regs->r15, &sc->r15); #endif /* CONFIG_X86_64 */ - err |= __put_user(current->thread.trap_no, &sc->trapno); - err |= __put_user(current->thread.error_code, &sc->err); - err |= __put_user(regs->ip, &sc->ip); + put_user_ex(current->thread.trap_no, &sc->trapno); + put_user_ex(current->thread.error_code, &sc->err); + put_user_ex(regs->ip, &sc->ip); #ifdef CONFIG_X86_32 - err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); - err |= __put_user(regs->flags, &sc->flags); - err |= __put_user(regs->sp, &sc->sp_at_signal); - err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); + put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); + put_user_ex(regs->flags, &sc->flags); + put_user_ex(regs->sp, &sc->sp_at_signal); + put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); #else /* !CONFIG_X86_32 */ - err |= __put_user(regs->flags, &sc->flags); - err |= __put_user(regs->cs, &sc->cs); - err |= __put_user(0, &sc->gs); - err |= __put_user(0, &sc->fs); + put_user_ex(regs->flags, &sc->flags); + put_user_ex(regs->cs, &sc->cs); + put_user_ex(0, &sc->gs); + put_user_ex(0, &sc->fs); #endif /* CONFIG_X86_32 */ - err |= __put_user(fpstate, &sc->fpstate); + put_user_ex(fpstate, &sc->fpstate); - /* non-iBCS2 extensions.. */ - err |= __put_user(mask, &sc->oldmask); - err |= __put_user(current->thread.cr2, &sc->cr2); + /* non-iBCS2 extensions.. */ + put_user_ex(mask, &sc->oldmask); + put_user_ex(current->thread.cr2, &sc->cr2); + } put_user_catch(err); return err; } @@ -336,43 +343,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; - err |= __put_user(sig, &frame->sig); - err |= __put_user(&frame->info, &frame->pinfo); - err |= __put_user(&frame->uc, &frame->puc); - err |= copy_siginfo_to_user(&frame->info, info); - if (err) - return -EFAULT; - - /* Create the ucontext. */ - if (cpu_has_xsave) - err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); - else - err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); - err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, - regs, set->sig[0]); - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - if (err) - return -EFAULT; + put_user_try { + put_user_ex(sig, &frame->sig); + put_user_ex(&frame->info, &frame->pinfo); + put_user_ex(&frame->uc, &frame->puc); + err |= copy_siginfo_to_user(&frame->info, info); - /* Set up to return from userspace. */ - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); - if (ka->sa.sa_flags & SA_RESTORER) - restorer = ka->sa.sa_restorer; - err |= __put_user(restorer, &frame->pretcode); + /* Create the ucontext. */ + if (cpu_has_xsave) + put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); + else + put_user_ex(0, &frame->uc.uc_flags); + put_user_ex(0, &frame->uc.uc_link); + put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + put_user_ex(sas_ss_flags(regs->sp), + &frame->uc.uc_stack.ss_flags); + put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, + regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Set up to return from userspace. */ + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + if (ka->sa.sa_flags & SA_RESTORER) + restorer = ka->sa.sa_restorer; + put_user_ex(restorer, &frame->pretcode); - /* - * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 - * - * WE DO NOT USE IT ANY MORE! It's only left here for historical - * reasons and because gdb uses it as a signature to notice - * signal handler stack frames. - */ - err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode); + /* + * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 + * + * WE DO NOT USE IT ANY MORE! It's only left here for historical + * reasons and because gdb uses it as a signature to notice + * signal handler stack frames. + */ + put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); + } put_user_catch(err); if (err) return -EFAULT; @@ -436,28 +441,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, return -EFAULT; } - /* Create the ucontext. */ - if (cpu_has_xsave) - err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); - else - err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); - err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - - /* Set up to return from userspace. If provided, use a stub - already in userspace. */ - /* x86-64 should always use SA_RESTORER. */ - if (ka->sa.sa_flags & SA_RESTORER) { - err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); - } else { - /* could use a vstub here */ - return -EFAULT; - } + put_user_try { + /* Create the ucontext. */ + if (cpu_has_xsave) + put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); + else + put_user_ex(0, &frame->uc.uc_flags); + put_user_ex(0, &frame->uc.uc_link); + put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + put_user_ex(sas_ss_flags(regs->sp), + &frame->uc.uc_stack.ss_flags); + put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + /* x86-64 should always use SA_RESTORER. */ + if (ka->sa.sa_flags & SA_RESTORER) { + put_user_ex(ka->sa.sa_restorer, &frame->pretcode); + } else { + /* could use a vstub here */ + err |= -EFAULT; + } + } put_user_catch(err); if (err) return -EFAULT; @@ -509,31 +516,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; - int ret; + int ret = 0; if (act) { old_sigset_t mask; - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) + if (!access_ok(VERIFY_READ, act, sizeof(*act))) return -EFAULT; - __get_user(new_ka.sa.sa_flags, &act->sa_flags); - __get_user(mask, &act->sa_mask); + get_user_try { + get_user_ex(new_ka.sa.sa_handler, &act->sa_handler); + get_user_ex(new_ka.sa.sa_flags, &act->sa_flags); + get_user_ex(mask, &act->sa_mask); + get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer); + } get_user_catch(ret); + + if (ret) + return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) return -EFAULT; - __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + put_user_try { + put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler); + put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags); + put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer); + } put_user_catch(ret); + + if (ret) + return -EFAULT; } return ret; -- cgit v1.2.3 From 3b4b75700a245d0d48fc52a4d2f67d3155812aba Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 23 Jan 2009 15:50:38 -0800 Subject: x86: ia32_signal: use {get|put}_user_try and catch Impact: use new framework Use {get|put}_user_try, catch, and _ex in arch/x86/ia32/ia32_signal.c. Note: this patch contains "WARNING: line over 80 characters", because when introducing new block I insert an indent to avoid mistakes by edit. Signed-off-by: Hiroshi Shimamoto Signed-off-by: H. Peter Anvin --- arch/x86/ia32/ia32_signal.c | 365 +++++++++++++++++++++++--------------------- 1 file changed, 195 insertions(+), 170 deletions(-) (limited to 'arch') diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 9dabd00e980..dd77ac0cac4 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -46,78 +46,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where); int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) { - int err; + int err = 0; if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) return -EFAULT; - /* If you change siginfo_t structure, please make sure that - this code is fixed accordingly. - It should never copy any pad contained in the structure - to avoid security leaks, but must copy the generic - 3 ints plus the relevant union member. */ - err = __put_user(from->si_signo, &to->si_signo); - err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); - - if (from->si_code < 0) { - err |= __put_user(from->si_pid, &to->si_pid); - err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); - } else { - /* - * First 32bits of unions are always present: - * si_pid === si_band === si_tid === si_addr(LS half) - */ - err |= __put_user(from->_sifields._pad[0], - &to->_sifields._pad[0]); - switch (from->si_code >> 16) { - case __SI_FAULT >> 16: - break; - case __SI_CHLD >> 16: - err |= __put_user(from->si_utime, &to->si_utime); - err |= __put_user(from->si_stime, &to->si_stime); - err |= __put_user(from->si_status, &to->si_status); - /* FALL THROUGH */ - default: - case __SI_KILL >> 16: - err |= __put_user(from->si_uid, &to->si_uid); - break; - case __SI_POLL >> 16: - err |= __put_user(from->si_fd, &to->si_fd); - break; - case __SI_TIMER >> 16: - err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user(ptr_to_compat(from->si_ptr), - &to->si_ptr); - break; - /* This is not generated by the kernel as of now. */ - case __SI_RT >> 16: - case __SI_MESGQ >> 16: - err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user(from->si_int, &to->si_int); - break; + put_user_try { + /* If you change siginfo_t structure, please make sure that + this code is fixed accordingly. + It should never copy any pad contained in the structure + to avoid security leaks, but must copy the generic + 3 ints plus the relevant union member. */ + put_user_ex(from->si_signo, &to->si_signo); + put_user_ex(from->si_errno, &to->si_errno); + put_user_ex((short)from->si_code, &to->si_code); + + if (from->si_code < 0) { + put_user_ex(from->si_pid, &to->si_pid); + put_user_ex(from->si_uid, &to->si_uid); + put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); + } else { + /* + * First 32bits of unions are always present: + * si_pid === si_band === si_tid === si_addr(LS half) + */ + put_user_ex(from->_sifields._pad[0], + &to->_sifields._pad[0]); + switch (from->si_code >> 16) { + case __SI_FAULT >> 16: + break; + case __SI_CHLD >> 16: + put_user_ex(from->si_utime, &to->si_utime); + put_user_ex(from->si_stime, &to->si_stime); + put_user_ex(from->si_status, &to->si_status); + /* FALL THROUGH */ + default: + case __SI_KILL >> 16: + put_user_ex(from->si_uid, &to->si_uid); + break; + case __SI_POLL >> 16: + put_user_ex(from->si_fd, &to->si_fd); + break; + case __SI_TIMER >> 16: + put_user_ex(from->si_overrun, &to->si_overrun); + put_user_ex(ptr_to_compat(from->si_ptr), + &to->si_ptr); + break; + /* This is not generated by the kernel as of now. */ + case __SI_RT >> 16: + case __SI_MESGQ >> 16: + put_user_ex(from->si_uid, &to->si_uid); + put_user_ex(from->si_int, &to->si_int); + break; + } } - } + } put_user_catch(err); + return err; } int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - int err; + int err = 0; u32 ptr32; if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) return -EFAULT; - err = __get_user(to->si_signo, &from->si_signo); - err |= __get_user(to->si_errno, &from->si_errno); - err |= __get_user(to->si_code, &from->si_code); + get_user_try { + get_user_ex(to->si_signo, &from->si_signo); + get_user_ex(to->si_errno, &from->si_errno); + get_user_ex(to->si_code, &from->si_code); - err |= __get_user(to->si_pid, &from->si_pid); - err |= __get_user(to->si_uid, &from->si_uid); - err |= __get_user(ptr32, &from->si_ptr); - to->si_ptr = compat_ptr(ptr32); + get_user_ex(to->si_pid, &from->si_pid); + get_user_ex(to->si_uid, &from->si_uid); + get_user_ex(ptr32, &from->si_ptr); + to->si_ptr = compat_ptr(ptr32); + } get_user_catch(err); return err; } @@ -142,17 +147,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, struct pt_regs *regs) { stack_t uss, uoss; - int ret; + int ret, err = 0; mm_segment_t seg; if (uss_ptr) { u32 ptr; memset(&uss, 0, sizeof(stack_t)); - if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) || - __get_user(ptr, &uss_ptr->ss_sp) || - __get_user(uss.ss_flags, &uss_ptr->ss_flags) || - __get_user(uss.ss_size, &uss_ptr->ss_size)) + if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t))) + return -EFAULT; + + get_user_try { + get_user_ex(ptr, &uss_ptr->ss_sp); + get_user_ex(uss.ss_flags, &uss_ptr->ss_flags); + get_user_ex(uss.ss_size, &uss_ptr->ss_size); + } get_user_catch(err); + + if (err) return -EFAULT; uss.ss_sp = compat_ptr(ptr); } @@ -161,10 +172,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp); set_fs(seg); if (ret >= 0 && uoss_ptr) { - if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) || - __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || - __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || - __put_user(uoss.ss_size, &uoss_ptr->ss_size)) + if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t))) + return -EFAULT; + + put_user_try { + put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp); + put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags); + put_user_ex(uoss.ss_size, &uoss_ptr->ss_size); + } put_user_catch(err); + + if (err) ret = -EFAULT; } return ret; @@ -174,18 +191,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, * Do a signal return; undo the signal stack. */ #define COPY(x) { \ - err |= __get_user(regs->x, &sc->x); \ + get_user_ex(regs->x, &sc->x); \ } #define COPY_SEG_CPL3(seg) { \ unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ + get_user_ex(tmp, &sc->seg); \ regs->seg = tmp | 3; \ } #define RELOAD_SEG(seg) { \ unsigned int cur, pre; \ - err |= __get_user(pre, &sc->seg); \ + get_user_ex(pre, &sc->seg); \ savesegment(seg, cur); \ pre |= 3; \ if (pre != cur) \ @@ -209,39 +226,42 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, sc, sc->err, sc->ip, sc->cs, sc->flags); #endif - /* - * Reload fs and gs if they have changed in the signal - * handler. This does not handle long fs/gs base changes in - * the handler, but does not clobber them at least in the - * normal case. - */ - err |= __get_user(gs, &sc->gs); - gs |= 3; - savesegment(gs, oldgs); - if (gs != oldgs) - load_gs_index(gs); - - RELOAD_SEG(fs); - RELOAD_SEG(ds); - RELOAD_SEG(es); - - COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); - COPY(dx); COPY(cx); COPY(ip); - /* Don't touch extended registers */ - - COPY_SEG_CPL3(cs); - COPY_SEG_CPL3(ss); - - err |= __get_user(tmpflags, &sc->flags); - regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); - /* disable syscall checks */ - regs->orig_ax = -1; - - err |= __get_user(tmp, &sc->fpstate); - buf = compat_ptr(tmp); - err |= restore_i387_xstate_ia32(buf); - - err |= __get_user(*pax, &sc->ax); + get_user_try { + /* + * Reload fs and gs if they have changed in the signal + * handler. This does not handle long fs/gs base changes in + * the handler, but does not clobber them at least in the + * normal case. + */ + get_user_ex(gs, &sc->gs); + gs |= 3; + savesegment(gs, oldgs); + if (gs != oldgs) + load_gs_index(gs); + + RELOAD_SEG(fs); + RELOAD_SEG(ds); + RELOAD_SEG(es); + + COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); + COPY(dx); COPY(cx); COPY(ip); + /* Don't touch extended registers */ + + COPY_SEG_CPL3(cs); + COPY_SEG_CPL3(ss); + + get_user_ex(tmpflags, &sc->flags); + regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); + /* disable syscall checks */ + regs->orig_ax = -1; + + get_user_ex(tmp, &sc->fpstate); + buf = compat_ptr(tmp); + err |= restore_i387_xstate_ia32(buf); + + get_user_ex(*pax, &sc->ax); + } get_user_catch(err); + return err; } @@ -319,36 +339,38 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, { int tmp, err = 0; - savesegment(gs, tmp); - err |= __put_user(tmp, (unsigned int __user *)&sc->gs); - savesegment(fs, tmp); - err |= __put_user(tmp, (unsigned int __user *)&sc->fs); - savesegment(ds, tmp); - err |= __put_user(tmp, (unsigned int __user *)&sc->ds); - savesegment(es, tmp); - err |= __put_user(tmp, (unsigned int __user *)&sc->es); - - err |= __put_user(regs->di, &sc->di); - err |= __put_user(regs->si, &sc->si); - err |= __put_user(regs->bp, &sc->bp); - err |= __put_user(regs->sp, &sc->sp); - err |= __put_user(regs->bx, &sc->bx); - err |= __put_user(regs->dx, &sc->dx); - err |= __put_user(regs->cx, &sc->cx); - err |= __put_user(regs->ax, &sc->ax); - err |= __put_user(current->thread.trap_no, &sc->trapno); - err |= __put_user(current->thread.error_code, &sc->err); - err |= __put_user(regs->ip, &sc->ip); - err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); - err |= __put_user(regs->flags, &sc->flags); - err |= __put_user(regs->sp, &sc->sp_at_signal); - err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); - - err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate); - - /* non-iBCS2 extensions.. */ - err |= __put_user(mask, &sc->oldmask); - err |= __put_user(current->thread.cr2, &sc->cr2); + put_user_try { + savesegment(gs, tmp); + put_user_ex(tmp, (unsigned int __user *)&sc->gs); + savesegment(fs, tmp); + put_user_ex(tmp, (unsigned int __user *)&sc->fs); + savesegment(ds, tmp); + put_user_ex(tmp, (unsigned int __user *)&sc->ds); + savesegment(es, tmp); + put_user_ex(tmp, (unsigned int __user *)&sc->es); + + put_user_ex(regs->di, &sc->di); + put_user_ex(regs->si, &sc->si); + put_user_ex(regs->bp, &sc->bp); + put_user_ex(regs->sp, &sc->sp); + put_user_ex(regs->bx, &sc->bx); + put_user_ex(regs->dx, &sc->dx); + put_user_ex(regs->cx, &sc->cx); + put_user_ex(regs->ax, &sc->ax); + put_user_ex(current->thread.trap_no, &sc->trapno); + put_user_ex(current->thread.error_code, &sc->err); + put_user_ex(regs->ip, &sc->ip); + put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); + put_user_ex(regs->flags, &sc->flags); + put_user_ex(regs->sp, &sc->sp_at_signal); + put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); + + put_user_ex(ptr_to_compat(fpstate), &sc->fpstate); + + /* non-iBCS2 extensions.. */ + put_user_ex(mask, &sc->oldmask); + put_user_ex(current->thread.cr2, &sc->cr2); + } put_user_catch(err); return err; } @@ -437,13 +459,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, else restorer = &frame->retcode; } - err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); - /* - * These are actually not used anymore, but left because some - * gdb versions depend on them as a marker. - */ - err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode); + put_user_try { + put_user_ex(ptr_to_compat(restorer), &frame->pretcode); + + /* + * These are actually not used anymore, but left because some + * gdb versions depend on them as a marker. + */ + put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); + } put_user_catch(err); + if (err) return -EFAULT; @@ -496,41 +522,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; - err |= __put_user(sig, &frame->sig); - err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); - err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); - err |= copy_siginfo_to_user32(&frame->info, info); - if (err) - return -EFAULT; + put_user_try { + put_user_ex(sig, &frame->sig); + put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo); + put_user_ex(ptr_to_compat(&frame->uc), &frame->puc); + err |= copy_siginfo_to_user32(&frame->info, info); - /* Create the ucontext. */ - if (cpu_has_xsave) - err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); - else - err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); - err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, - regs, set->sig[0]); - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - if (err) - return -EFAULT; + /* Create the ucontext. */ + if (cpu_has_xsave) + put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); + else + put_user_ex(0, &frame->uc.uc_flags); + put_user_ex(0, &frame->uc.uc_link); + put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + put_user_ex(sas_ss_flags(regs->sp), + &frame->uc.uc_stack.ss_flags); + put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, + regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + if (ka->sa.sa_flags & SA_RESTORER) + restorer = ka->sa.sa_restorer; + else + restorer = VDSO32_SYMBOL(current->mm->context.vdso, + rt_sigreturn); + put_user_ex(ptr_to_compat(restorer), &frame->pretcode); + + /* + * Not actually used anymore, but left because some gdb + * versions need it. + */ + put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); + } put_user_catch(err); - if (ka->sa.sa_flags & SA_RESTORER) - restorer = ka->sa.sa_restorer; - else - restorer = VDSO32_SYMBOL(current->mm->context.vdso, - rt_sigreturn); - err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); - - /* - * Not actually used anymore, but left because some gdb - * versions need it. - */ - err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode); if (err) return -EFAULT; -- cgit v1.2.3 From b1882e68d17a93b523dce09c3a181319aace2f0e Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 23 Jan 2009 17:18:52 -0800 Subject: x86: clean up stray space in Impact: Whitespace cleanup only Clean up a stray space character in arch/x86/include/asm/processor.h. Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 091cd8855f2..ac8fab3b868 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -73,7 +73,7 @@ struct cpuinfo_x86 { char pad0; #else /* Number of 4K pages in DTLB/ITLB combined(in pages): */ - int x86_tlbsize; + int x86_tlbsize; __u8 x86_virt_bits; __u8 x86_phys_bits; #endif -- cgit v1.2.3 From 75a048119e76540d73132cfc8e0fa0c0a8bb6c83 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 22 Jan 2009 16:17:05 -0800 Subject: x86: handle PAT more like other CPU features Impact: Cleanup When PAT was originally introduced, it was handled specially for a few reasons: - PAT bugs are hard to track down, so we wanted to maintain a whitelist of CPUs. - The i386 and x86-64 CPUID code was not yet unified. Both of these are now obsolete, so handle PAT like any other features, including ordinary feature blacklisting due to known bugs. Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/pat.h | 4 ---- arch/x86/kernel/cpu/addon_cpuid_features.c | 34 ------------------------------ arch/x86/kernel/cpu/common.c | 2 -- arch/x86/kernel/cpu/intel.c | 12 +++++++++++ arch/x86/mm/pat.c | 31 +++++++++++++++++---------- 5 files changed, 32 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index b8493b3b989..9709fdff661 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -5,10 +5,8 @@ #ifdef CONFIG_X86_PAT extern int pat_enabled; -extern void validate_pat_support(struct cpuinfo_x86 *c); #else static const int pat_enabled; -static inline void validate_pat_support(struct cpuinfo_x86 *c) { } #endif extern void pat_init(void); @@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end, unsigned long req_type, unsigned long *ret_type); extern int free_memtype(u64 start, u64 end); -extern void pat_disable(char *reason); - #endif /* _ASM_X86_PAT_H */ diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 2cf23634b6d..4e581fdc0a5 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -143,37 +143,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) return; #endif } - -#ifdef CONFIG_X86_PAT -void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) -{ - if (!cpu_has_pat) - pat_disable("PAT not supported by CPU."); - - switch (c->x86_vendor) { - case X86_VENDOR_INTEL: - /* - * There is a known erratum on Pentium III and Core Solo - * and Core Duo CPUs. - * " Page with PAT set to WC while associated MTRR is UC - * may consolidate to UC " - * Because of this erratum, it is better to stick with - * setting WC in MTRR rather than using PAT on these CPUs. - * - * Enable PAT WC only on P4, Core 2 or later CPUs. - */ - if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15)) - return; - - pat_disable("PAT WC disabled due to known CPU erratum."); - return; - - case X86_VENDOR_AMD: - case X86_VENDOR_CENTAUR: - case X86_VENDOR_TRANSMETA: - return; - } - - pat_disable("PAT disabled. Not yet verified on this CPU type."); -} -#endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 83492b1f93b..0f8656361e0 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -570,8 +570,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_early_init) this_cpu->c_early_init(c); - validate_pat_support(c); - #ifdef CONFIG_SMP c->cpu_index = boot_cpu_id; #endif diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8ea6929e974..20ce03acf04 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -50,6 +50,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + /* + * There is a known erratum on Pentium III and Core Solo + * and Core Duo CPUs. + * " Page with PAT set to WC while associated MTRR is UC + * may consolidate to UC " + * Because of this erratum, it is better to stick with + * setting WC in MTRR rather than using PAT on these CPUs. + * + * Enable PAT WC only on P4, Core 2 or later CPUs. + */ + if (c->x86 == 6 && c->x86_model < 15) + clear_cpu_cap(c, X86_FEATURE_PAT); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 8b08fb95527..430cb44dd3f 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -30,7 +30,7 @@ #ifdef CONFIG_X86_PAT int __read_mostly pat_enabled = 1; -void __cpuinit pat_disable(char *reason) +void __cpuinit pat_disable(const char *reason) { pat_enabled = 0; printk(KERN_INFO "%s\n", reason); @@ -42,6 +42,11 @@ static int __init nopat(char *str) return 0; } early_param("nopat", nopat); +#else +static inline void pat_disable(const char *reason) +{ + (void)reason; +} #endif @@ -78,16 +83,20 @@ void pat_init(void) if (!pat_enabled) return; - /* Paranoia check. */ - if (!cpu_has_pat && boot_pat_state) { - /* - * If this happens we are on a secondary CPU, but - * switched to PAT on the boot CPU. We have no way to - * undo PAT. - */ - printk(KERN_ERR "PAT enabled, " - "but not supported by secondary CPU\n"); - BUG(); + if (!cpu_has_pat) { + if (!boot_pat_state) { + pat_disable("PAT not supported by CPU."); + return; + } else { + /* + * If this happens we are on a secondary CPU, but + * switched to PAT on the boot CPU. We have no way to + * undo PAT. + */ + printk(KERN_ERR "PAT enabled, " + "but not supported by secondary CPU\n"); + BUG(); + } } /* Set PWT to Write-Combining. All other bits stay the same */ -- cgit v1.2.3 From b38b0665905538e76e26f2a4c686179abb1f69f6 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 23 Jan 2009 17:20:50 -0800 Subject: x86: filter CPU features dependent on unavailable CPUID levels Impact: Fixes potential crashes on misconfigured systems. Some CPU features require specific CPUID levels to be available in order to function, as they contain information about the operation of a specific feature. However, some BIOSes and virtualization software provide the ability to mask CPUID levels in order to support legacy operating systems. We try to enable such CPUID levels when we know how to do it, but for the remaining cases, filter out such CPU features when there is no way for us to support them. Do this in one place, in the CPUID code, with a table-driven approach. Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/common.c | 47 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0f8656361e0..21f086b4c1a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -212,6 +212,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) } #endif +/* + * Some CPU features depend on higher CPUID levels, which may not always + * be available due to CPUID level capping or broken virtualization + * software. Add those features to this table to auto-disable them. + */ +struct cpuid_dependent_feature { + u32 feature; + u32 level; +}; +static const struct cpuid_dependent_feature __cpuinitconst +cpuid_dependent_features[] = { + { X86_FEATURE_MWAIT, 0x00000005 }, + { X86_FEATURE_DCA, 0x00000009 }, + { X86_FEATURE_XSAVE, 0x0000000d }, + { 0, 0 } +}; + +static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) +{ + const struct cpuid_dependent_feature *df; + for (df = cpuid_dependent_features; df->feature; df++) { + /* + * Note: cpuid_level is set to -1 if unavailable, but + * extended_extended_level is set to 0 if unavailable + * and the legitimate extended levels are all negative + * when signed; hence the weird messing around with + * signs here... + */ + if (cpu_has(c, df->feature) && + ((s32)df->feature < 0 ? + (u32)df->feature > (u32)c->extended_cpuid_level : + (s32)df->feature > (s32)c->cpuid_level)) { + clear_cpu_cap(c, df->feature); + if (warn) + printk(KERN_WARNING + "CPU: CPU feature %s disabled " + "due to lack of CPUID level 0x%x\n", + x86_cap_flags[df->feature], + df->level); + } + } +} + /* * Naming convention should be: [()] * This table only is used unless init_() below doesn't set it; @@ -573,6 +616,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) #ifdef CONFIG_SMP c->cpu_index = boot_cpu_id; #endif + filter_cpuid_features(c, false); } void __init early_cpu_init(void) @@ -706,6 +750,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) * we do "generic changes." */ + /* Filter out anything that depends on CPUID levels we don't have */ + filter_cpuid_features(c, true); + /* If the model name is still unset, do table lookup. */ if (!c->x86_model_id[0]) { char *p; -- cgit v1.2.3 From 2d4d57db692ea790e185656516e6ebe8791f1788 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Jan 2009 12:50:13 -0800 Subject: x86: micro-optimize __raw_read_trylock() The current version of __raw_read_trylock starts with decrementing the lock and read its new value as a separate operation after that. That makes 3 dereferences (read, write (after sub), read) whereas a single atomic_dec_return does only two pointers dereferences (read, write). Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- arch/x86/include/asm/spinlock.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index d17c91981da..4d3dcc51cac 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -329,8 +329,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; - atomic_dec(count); - if (atomic_read(count) >= 0) + if (atomic_dec_return(count) >= 0) return 1; atomic_inc(count); return 0; -- cgit v1.2.3 From 34707bcd0452aba644396767bc9fb61585bdab4f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 26 Jan 2009 14:18:05 +0100 Subject: x86, debug: remove early_printk() #ifdefs from head_32.S Impact: cleanup Remove such constructs: #ifdef CONFIG_EARLY_PRINTK call early_printk #else call printk #endif Not only are they ugly, they are also pointless: a call to printk() maps to early_printk during early bootup anyway, if CONFIG_EARLY_PRINTK is enabled. Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_32.S | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index e835b4eea70..9f141071160 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -548,11 +548,7 @@ early_fault: pushl %eax pushl %edx /* trapno */ pushl $fault_msg -#ifdef CONFIG_EARLY_PRINTK - call early_printk -#else call printk -#endif #endif call dump_stack hlt_loop: @@ -580,11 +576,7 @@ ignore_int: pushl 32(%esp) pushl 40(%esp) pushl $int_msg -#ifdef CONFIG_EARLY_PRINTK - call early_printk -#else call printk -#endif addl $(5*4),%esp popl %ds popl %es -- cgit v1.2.3 From d5e397cb49b53381e4c99a064ca733c665646de8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 26 Jan 2009 06:09:00 +0100 Subject: x86: improve early fault/irq printout Impact: add a stack dump to early IRQs/faults Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_32.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 9f141071160..84d05a4d7fc 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -577,6 +577,9 @@ ignore_int: pushl 40(%esp) pushl $int_msg call printk + + call dump_stack + addl $(5*4),%esp popl %ds popl %es @@ -652,7 +655,7 @@ early_recursion_flag: .long 0 int_msg: - .asciz "Unknown interrupt or fault at EIP %p %p %p\n" + .asciz "Unknown interrupt or fault at: %p %p %p\n" fault_msg: /* fault info: */ -- cgit v1.2.3 From 0d77e7f04d5da160307f4f5c030a171e004f602b Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:47 +0900 Subject: x86: merge setup_per_cpu_maps() into setup_per_cpu_areas() Impact: minor optimization Eliminates the need for two loops over possible cpus. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 48 +++++++++++++++++------------------------- 1 file changed, 19 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 90b8e154bb5..d0b1476490a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -97,33 +97,6 @@ static inline void setup_cpu_local_masks(void) #endif /* CONFIG_X86_32 */ #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA -/* - * Copy data used in early init routines from the initial arrays to the - * per cpu data areas. These arrays then become expendable and the - * *_early_ptr's are zeroed indicating that the static arrays are gone. - */ -static void __init setup_per_cpu_maps(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { - per_cpu(x86_cpu_to_apicid, cpu) = - early_per_cpu_map(x86_cpu_to_apicid, cpu); - per_cpu(x86_bios_cpu_apicid, cpu) = - early_per_cpu_map(x86_bios_cpu_apicid, cpu); -#ifdef X86_64_NUMA - per_cpu(x86_cpu_to_node_map, cpu) = - early_per_cpu_map(x86_cpu_to_node_map, cpu); -#endif - } - - /* indicate the early static arrays will soon be gone */ - early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; - early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; -#ifdef X86_64_NUMA - early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; -#endif -} #ifdef CONFIG_X86_64 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { @@ -181,6 +154,19 @@ void __init setup_per_cpu_areas(void) per_cpu_offset(cpu) = ptr - __per_cpu_start; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; + /* + * Copy data used in early init routines from the initial arrays to the + * per cpu data areas. These arrays then become expendable and the + * *_early_ptr's are zeroed indicating that the static arrays are gone. + */ + per_cpu(x86_cpu_to_apicid, cpu) = + early_per_cpu_map(x86_cpu_to_apicid, cpu); + per_cpu(x86_bios_cpu_apicid, cpu) = + early_per_cpu_map(x86_bios_cpu_apicid, cpu); +#ifdef X86_64_NUMA + per_cpu(x86_cpu_to_node_map, cpu) = + early_per_cpu_map(x86_cpu_to_node_map, cpu); +#endif #ifdef CONFIG_X86_64 per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; @@ -195,8 +181,12 @@ void __init setup_per_cpu_areas(void) DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } - /* Setup percpu data maps */ - setup_per_cpu_maps(); + /* indicate the early static arrays will soon be gone */ + early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; + early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; +#ifdef X86_64_NUMA + early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; +#endif /* Setup node to cpumask map */ setup_node_to_cpumask_map(); -- cgit v1.2.3 From 6470aff619fbb9dff8dfe8afa5033084cd55ca20 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:47 +0900 Subject: x86: move 64-bit NUMA code Impact: Code movement, no functional change. Move the 64-bit NUMA code from setup_percpu.c to numa_64.c Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/topology.h | 6 + arch/x86/kernel/setup_percpu.c | 237 +--------------------------------------- arch/x86/mm/numa_64.c | 217 ++++++++++++++++++++++++++++++++++++ 3 files changed, 228 insertions(+), 232 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 10022ed3a4b..77cfb2cfb38 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node) return &node_to_cpumask_map[node]; } +static inline void setup_node_to_cpumask_map(void) { } + #else /* CONFIG_X86_64 */ /* Mappings between node number and cpus on that node. */ @@ -120,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node) #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ +extern void setup_node_to_cpumask_map(void); + /* * Replace default node_to_cpumask_ptr with optimized version * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" @@ -218,6 +222,8 @@ static inline int node_to_first_cpu(int node) return first_cpu(cpu_online_map); } +static inline void setup_node_to_cpumask_map(void) { } + /* * Replace default node_to_cpumask_ptr with optimized version * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d0b1476490a..cb6d622520b 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -51,32 +51,6 @@ DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) -#define X86_64_NUMA 1 /* (used later) */ -DEFINE_PER_CPU(int, node_number) = 0; -EXPORT_PER_CPU_SYMBOL(node_number); - -/* - * Map cpu index to node index - */ -DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); -EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); - -/* - * Which logical CPUs are on which nodes - */ -cpumask_t *node_to_cpumask_map; -EXPORT_SYMBOL(node_to_cpumask_map); - -/* - * Setup node_to_cpumask_map - */ -static void __init setup_node_to_cpumask_map(void); - -#else -static inline void setup_node_to_cpumask_map(void) { } -#endif - #ifdef CONFIG_X86_64 /* correctly size the local cpu masks */ @@ -163,13 +137,13 @@ void __init setup_per_cpu_areas(void) early_per_cpu_map(x86_cpu_to_apicid, cpu); per_cpu(x86_bios_cpu_apicid, cpu) = early_per_cpu_map(x86_bios_cpu_apicid, cpu); -#ifdef X86_64_NUMA - per_cpu(x86_cpu_to_node_map, cpu) = - early_per_cpu_map(x86_cpu_to_node_map, cpu); -#endif #ifdef CONFIG_X86_64 per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; +#ifdef CONFIG_NUMA + per_cpu(x86_cpu_to_node_map, cpu) = + early_per_cpu_map(x86_cpu_to_node_map, cpu); +#endif /* * Up to this point, CPU0 has been using .data.init * area. Reload %gs offset for CPU0. @@ -184,7 +158,7 @@ void __init setup_per_cpu_areas(void) /* indicate the early static arrays will soon be gone */ early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; -#ifdef X86_64_NUMA +#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; #endif @@ -197,204 +171,3 @@ void __init setup_per_cpu_areas(void) #endif -#ifdef X86_64_NUMA - -/* - * Allocate node_to_cpumask_map based on number of available nodes - * Requires node_possible_map to be valid. - * - * Note: node_to_cpumask() is not valid until after this is done. - * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) - */ -static void __init setup_node_to_cpumask_map(void) -{ - unsigned int node, num = 0; - cpumask_t *map; - - /* setup nr_node_ids if not done yet */ - if (nr_node_ids == MAX_NUMNODES) { - for_each_node_mask(node, node_possible_map) - num = node; - nr_node_ids = num + 1; - } - - /* allocate the map */ - map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); - DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); - - pr_debug("Node to cpumask map at %p for %d nodes\n", - map, nr_node_ids); - - /* node_to_cpumask() will now work */ - node_to_cpumask_map = map; -} - -void __cpuinit numa_set_node(int cpu, int node) -{ - int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); - - /* early setting, no percpu area yet */ - if (cpu_to_node_map) { - cpu_to_node_map[cpu] = node; - return; - } - -#ifdef CONFIG_DEBUG_PER_CPU_MAPS - if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { - printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); - dump_stack(); - return; - } -#endif - per_cpu(x86_cpu_to_node_map, cpu) = node; - - if (node != NUMA_NO_NODE) - per_cpu(node_number, cpu) = node; -} - -void __cpuinit numa_clear_node(int cpu) -{ - numa_set_node(cpu, NUMA_NO_NODE); -} - -#ifndef CONFIG_DEBUG_PER_CPU_MAPS - -void __cpuinit numa_add_cpu(int cpu) -{ - cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); -} - -void __cpuinit numa_remove_cpu(int cpu) -{ - cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); -} - -#else /* CONFIG_DEBUG_PER_CPU_MAPS */ - -/* - * --------- debug versions of the numa functions --------- - */ -static void __cpuinit numa_set_cpumask(int cpu, int enable) -{ - int node = early_cpu_to_node(cpu); - cpumask_t *mask; - char buf[64]; - - if (node_to_cpumask_map == NULL) { - printk(KERN_ERR "node_to_cpumask_map NULL\n"); - dump_stack(); - return; - } - - mask = &node_to_cpumask_map[node]; - if (enable) - cpu_set(cpu, *mask); - else - cpu_clear(cpu, *mask); - - cpulist_scnprintf(buf, sizeof(buf), mask); - printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", - enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); -} - -void __cpuinit numa_add_cpu(int cpu) -{ - numa_set_cpumask(cpu, 1); -} - -void __cpuinit numa_remove_cpu(int cpu) -{ - numa_set_cpumask(cpu, 0); -} - -int cpu_to_node(int cpu) -{ - if (early_per_cpu_ptr(x86_cpu_to_node_map)) { - printk(KERN_WARNING - "cpu_to_node(%d): usage too early!\n", cpu); - dump_stack(); - return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; - } - return per_cpu(x86_cpu_to_node_map, cpu); -} -EXPORT_SYMBOL(cpu_to_node); - -/* - * Same function as cpu_to_node() but used if called before the - * per_cpu areas are setup. - */ -int early_cpu_to_node(int cpu) -{ - if (early_per_cpu_ptr(x86_cpu_to_node_map)) - return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; - - if (!per_cpu_offset(cpu)) { - printk(KERN_WARNING - "early_cpu_to_node(%d): no per_cpu area!\n", cpu); - dump_stack(); - return NUMA_NO_NODE; - } - return per_cpu(x86_cpu_to_node_map, cpu); -} - - -/* empty cpumask */ -static const cpumask_t cpu_mask_none; - -/* - * Returns a pointer to the bitmask of CPUs on Node 'node'. - */ -const cpumask_t *cpumask_of_node(int node) -{ - if (node_to_cpumask_map == NULL) { - printk(KERN_WARNING - "cpumask_of_node(%d): no node_to_cpumask_map!\n", - node); - dump_stack(); - return (const cpumask_t *)&cpu_online_map; - } - if (node >= nr_node_ids) { - printk(KERN_WARNING - "cpumask_of_node(%d): node > nr_node_ids(%d)\n", - node, nr_node_ids); - dump_stack(); - return &cpu_mask_none; - } - return &node_to_cpumask_map[node]; -} -EXPORT_SYMBOL(cpumask_of_node); - -/* - * Returns a bitmask of CPUs on Node 'node'. - * - * Side note: this function creates the returned cpumask on the stack - * so with a high NR_CPUS count, excessive stack space is used. The - * node_to_cpumask_ptr function should be used whenever possible. - */ -cpumask_t node_to_cpumask(int node) -{ - if (node_to_cpumask_map == NULL) { - printk(KERN_WARNING - "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); - dump_stack(); - return cpu_online_map; - } - if (node >= nr_node_ids) { - printk(KERN_WARNING - "node_to_cpumask(%d): node > nr_node_ids(%d)\n", - node, nr_node_ids); - dump_stack(); - return cpu_mask_none; - } - return node_to_cpumask_map[node]; -} -EXPORT_SYMBOL(node_to_cpumask); - -/* - * --------- end of debug versions of the numa functions --------- - */ - -#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ - -#endif /* X86_64_NUMA */ - diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 71a14f89f89..08d140fbc31 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -20,6 +20,12 @@ #include #include +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +# define DBG(x...) printk(KERN_DEBUG x) +#else +# define DBG(x...) +#endif + struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); @@ -33,6 +39,21 @@ int numa_off __initdata; static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; +DEFINE_PER_CPU(int, node_number) = 0; +EXPORT_PER_CPU_SYMBOL(node_number); + +/* + * Map cpu index to node index + */ +DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); +EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); + +/* + * Which logical CPUs are on which nodes + */ +cpumask_t *node_to_cpumask_map; +EXPORT_SYMBOL(node_to_cpumask_map); + /* * Given a shift value, try to populate memnodemap[] * Returns : @@ -640,3 +661,199 @@ void __init init_cpu_to_node(void) #endif +/* + * Allocate node_to_cpumask_map based on number of available nodes + * Requires node_possible_map to be valid. + * + * Note: node_to_cpumask() is not valid until after this is done. + * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) + */ +void __init setup_node_to_cpumask_map(void) +{ + unsigned int node, num = 0; + cpumask_t *map; + + /* setup nr_node_ids if not done yet */ + if (nr_node_ids == MAX_NUMNODES) { + for_each_node_mask(node, node_possible_map) + num = node; + nr_node_ids = num + 1; + } + + /* allocate the map */ + map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); + DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); + + pr_debug("Node to cpumask map at %p for %d nodes\n", + map, nr_node_ids); + + /* node_to_cpumask() will now work */ + node_to_cpumask_map = map; +} + +void __cpuinit numa_set_node(int cpu, int node) +{ + int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); + + /* early setting, no percpu area yet */ + if (cpu_to_node_map) { + cpu_to_node_map[cpu] = node; + return; + } + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { + printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); + dump_stack(); + return; + } +#endif + per_cpu(x86_cpu_to_node_map, cpu) = node; + + if (node != NUMA_NO_NODE) + per_cpu(node_number, cpu) = node; +} + +void __cpuinit numa_clear_node(int cpu) +{ + numa_set_node(cpu, NUMA_NO_NODE); +} + +#ifndef CONFIG_DEBUG_PER_CPU_MAPS + +void __cpuinit numa_add_cpu(int cpu) +{ + cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); +} + +#else /* CONFIG_DEBUG_PER_CPU_MAPS */ + +/* + * --------- debug versions of the numa functions --------- + */ +static void __cpuinit numa_set_cpumask(int cpu, int enable) +{ + int node = early_cpu_to_node(cpu); + cpumask_t *mask; + char buf[64]; + + if (node_to_cpumask_map == NULL) { + printk(KERN_ERR "node_to_cpumask_map NULL\n"); + dump_stack(); + return; + } + + mask = &node_to_cpumask_map[node]; + if (enable) + cpu_set(cpu, *mask); + else + cpu_clear(cpu, *mask); + + cpulist_scnprintf(buf, sizeof(buf), mask); + printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", + enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); +} + +void __cpuinit numa_add_cpu(int cpu) +{ + numa_set_cpumask(cpu, 1); +} + +void __cpuinit numa_remove_cpu(int cpu) +{ + numa_set_cpumask(cpu, 0); +} + +int cpu_to_node(int cpu) +{ + if (early_per_cpu_ptr(x86_cpu_to_node_map)) { + printk(KERN_WARNING + "cpu_to_node(%d): usage too early!\n", cpu); + dump_stack(); + return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; + } + return per_cpu(x86_cpu_to_node_map, cpu); +} +EXPORT_SYMBOL(cpu_to_node); + +/* + * Same function as cpu_to_node() but used if called before the + * per_cpu areas are setup. + */ +int early_cpu_to_node(int cpu) +{ + if (early_per_cpu_ptr(x86_cpu_to_node_map)) + return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; + + if (!per_cpu_offset(cpu)) { + printk(KERN_WARNING + "early_cpu_to_node(%d): no per_cpu area!\n", cpu); + dump_stack(); + return NUMA_NO_NODE; + } + return per_cpu(x86_cpu_to_node_map, cpu); +} + + +/* empty cpumask */ +static const cpumask_t cpu_mask_none; + +/* + * Returns a pointer to the bitmask of CPUs on Node 'node'. + */ +const cpumask_t *cpumask_of_node(int node) +{ + if (node_to_cpumask_map == NULL) { + printk(KERN_WARNING + "cpumask_of_node(%d): no node_to_cpumask_map!\n", + node); + dump_stack(); + return (const cpumask_t *)&cpu_online_map; + } + if (node >= nr_node_ids) { + printk(KERN_WARNING + "cpumask_of_node(%d): node > nr_node_ids(%d)\n", + node, nr_node_ids); + dump_stack(); + return &cpu_mask_none; + } + return &node_to_cpumask_map[node]; +} +EXPORT_SYMBOL(cpumask_of_node); + +/* + * Returns a bitmask of CPUs on Node 'node'. + * + * Side note: this function creates the returned cpumask on the stack + * so with a high NR_CPUS count, excessive stack space is used. The + * node_to_cpumask_ptr function should be used whenever possible. + */ +cpumask_t node_to_cpumask(int node) +{ + if (node_to_cpumask_map == NULL) { + printk(KERN_WARNING + "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); + dump_stack(); + return cpu_online_map; + } + if (node >= nr_node_ids) { + printk(KERN_WARNING + "node_to_cpumask(%d): node > nr_node_ids(%d)\n", + node, nr_node_ids); + dump_stack(); + return cpu_mask_none; + } + return node_to_cpumask_map[node]; +} +EXPORT_SYMBOL(node_to_cpumask); + +/* + * --------- end of debug versions of the numa functions --------- + */ + +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ -- cgit v1.2.3 From 2f2f52bad72f5e1ca5d1b9ad00a7b57a8cbd9159 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:47 +0900 Subject: x86: move setup_cpu_local_masks() Impact: Code movement, no functional change. Move setup_cpu_local_masks() to kernel/cpu/common.c, where the masks are defined. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/cpumask.h | 4 ++++ arch/x86/kernel/cpu/common.c | 9 +++++++++ arch/x86/kernel/setup_percpu.c | 19 ------------------- 3 files changed, 13 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h index 26c6dad9047..a7f3c75f8ad 100644 --- a/arch/x86/include/asm/cpumask.h +++ b/arch/x86/include/asm/cpumask.h @@ -10,6 +10,8 @@ extern cpumask_var_t cpu_callout_mask; extern cpumask_var_t cpu_initialized_mask; extern cpumask_var_t cpu_sibling_setup_mask; +extern void setup_cpu_local_masks(void); + #else /* CONFIG_X86_32 */ extern cpumask_t cpu_callin_map; @@ -22,6 +24,8 @@ extern cpumask_t cpu_sibling_setup_map; #define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) #define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) +static inline void setup_cpu_local_masks(void) { } + #endif /* CONFIG_X86_32 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 99904f288d6..67e30c8a282 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -52,6 +52,15 @@ cpumask_var_t cpu_initialized_mask; /* representing cpus for which sibling maps can be computed */ cpumask_var_t cpu_sibling_setup_mask; +/* correctly size the local cpu masks */ +void setup_cpu_local_masks(void) +{ + alloc_bootmem_cpumask_var(&cpu_initialized_mask); + alloc_bootmem_cpumask_var(&cpu_callin_mask); + alloc_bootmem_cpumask_var(&cpu_callout_mask); + alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); +} + #else /* CONFIG_X86_32 */ cpumask_t cpu_callin_map; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index cb6d622520b..7bebdba8eb8 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -51,25 +51,6 @@ DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); -#ifdef CONFIG_X86_64 - -/* correctly size the local cpu masks */ -static void setup_cpu_local_masks(void) -{ - alloc_bootmem_cpumask_var(&cpu_initialized_mask); - alloc_bootmem_cpumask_var(&cpu_callin_mask); - alloc_bootmem_cpumask_var(&cpu_callout_mask); - alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); -} - -#else /* CONFIG_X86_32 */ - -static inline void setup_cpu_local_masks(void) -{ -} - -#endif /* CONFIG_X86_32 */ - #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA #ifdef CONFIG_X86_64 -- cgit v1.2.3 From 74631a248dc2c2129a96f6b8b706ed54bb5c3d3c Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:47 +0900 Subject: x86: always page-align per-cpu area start and size Impact: cleanup The way the code is written, align is always PAGE_SIZE. Simplify the code by removing the align variable. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 7bebdba8eb8..5d4a4964a8b 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -69,15 +69,12 @@ EXPORT_SYMBOL(__per_cpu_offset); */ void __init setup_per_cpu_areas(void) { - ssize_t size, old_size; + ssize_t size; char *ptr; int cpu; - unsigned long align = 1; /* Copy section for each CPU (we discard the original) */ - old_size = PERCPU_ENOUGH_ROOM; - align = max_t(unsigned long, PAGE_SIZE, align); - size = roundup(old_size, align); + size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE); pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); @@ -86,20 +83,17 @@ void __init setup_per_cpu_areas(void) for_each_possible_cpu(cpu) { #ifndef CONFIG_NEED_MULTIPLE_NODES - ptr = __alloc_bootmem(size, align, - __pa(MAX_DMA_ADDRESS)); + ptr = alloc_bootmem_pages(size); #else int node = early_cpu_to_node(cpu); if (!node_online(node) || !NODE_DATA(node)) { - ptr = __alloc_bootmem(size, align, - __pa(MAX_DMA_ADDRESS)); + ptr = alloc_bootmem_pages(size); pr_info("cpu %d has no node %d or node-local memory\n", cpu, node); pr_debug("per cpu data for cpu%d at %016lx\n", cpu, __pa(ptr)); } else { - ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, - __pa(MAX_DMA_ADDRESS)); + ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); pr_debug("per cpu data for cpu%d on node%d at %016lx\n", cpu, node, __pa(ptr)); } -- cgit v1.2.3 From ec70de8b04bf37213982a5e8f303bc38679f3f8e Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:47 +0900 Subject: x86: move apic variables to apic.c Impact: Code movement Move the variable definitions to apic.c. Ifdef the copying of the two early per-cpu variables, since Voyager doesn't use them. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/apic.c | 18 ++++++++++++++++++ arch/x86/kernel/setup_percpu.c | 22 ++-------------------- 2 files changed, 20 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 1df341a528a..c6f15647eba 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -60,6 +60,24 @@ # error SPURIOUS_APIC_VECTOR definition error #endif +unsigned int num_processors; +unsigned disabled_cpus __cpuinitdata; +/* Processor that is doing the boot up */ +unsigned int boot_cpu_physical_apicid = -1U; +EXPORT_SYMBOL(boot_cpu_physical_apicid); +unsigned int max_physical_apicid; + +/* Bitmask of physically existing CPUs */ +physid_mask_t phys_cpu_present_map; + +/* + * Map cpu index to physical APIC ID + */ +DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); +DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); +EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); +EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); + #ifdef CONFIG_X86_32 /* * Knob to control our willingness to enable the local APIC. diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 5d4a4964a8b..d367996693e 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -31,26 +31,6 @@ DEFINE_PER_CPU(int, cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number); #endif -#ifdef CONFIG_X86_LOCAL_APIC -unsigned int num_processors; -unsigned disabled_cpus __cpuinitdata; -/* Processor that is doing the boot up */ -unsigned int boot_cpu_physical_apicid = -1U; -EXPORT_SYMBOL(boot_cpu_physical_apicid); -unsigned int max_physical_apicid; - -/* Bitmask of physically existing CPUs */ -physid_mask_t phys_cpu_present_map; -#endif - -/* - * Map cpu index to physical APIC ID - */ -DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); -DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); -EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); -EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); - #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA #ifdef CONFIG_X86_64 @@ -108,10 +88,12 @@ void __init setup_per_cpu_areas(void) * per cpu data areas. These arrays then become expendable and the * *_early_ptr's are zeroed indicating that the static arrays are gone. */ +#ifdef CONFIG_X86_LOCAL_APIC per_cpu(x86_cpu_to_apicid, cpu) = early_per_cpu_map(x86_cpu_to_apicid, cpu); per_cpu(x86_bios_cpu_apicid, cpu) = early_per_cpu_map(x86_bios_cpu_apicid, cpu); +#endif #ifdef CONFIG_X86_64 per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; -- cgit v1.2.3 From 996db817e3d1529d711e55b938d72ae4060b39fd Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:47 +0900 Subject: x86: only compile setup_percpu.o on SMP Impact: Minor build optimization Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/Makefile | 3 ++- arch/x86/kernel/setup_percpu.c | 6 ------ 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index a99437c965c..73de055c29c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp) obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o -obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o +obj-y += setup.o i8259.o irqinit_$(BITS).o obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o @@ -59,6 +59,7 @@ apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp.o obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o +obj-$(CONFIG_SMP) += setup_percpu.o obj-$(CONFIG_X86_32_SMP) += smpcommon.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d367996693e..f30ff691c34 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -22,14 +22,8 @@ # define DBG(x...) #endif -/* - * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but - * voyager wants cpu_number too. - */ -#ifdef CONFIG_SMP DEFINE_PER_CPU(int, cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number); -#endif #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA -- cgit v1.2.3 From 1688401a0fddba8991aa5c0943b8ae9583998d60 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:48 +0900 Subject: x86: move this_cpu_offset Impact: Small cleanup Define BOOT_PERCPU_OFFSET and use it for this_cpu_offset and __per_cpu_offset initializers. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 15 ++++++++++----- arch/x86/kernel/smpcommon.c | 7 ------- 2 files changed, 10 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index f30ff691c34..36c2e81dfc3 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -25,15 +25,20 @@ DEFINE_PER_CPU(int, cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number); +#ifdef CONFIG_X86_64 +#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) +#else +#define BOOT_PERCPU_OFFSET 0 +#endif + +DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; +EXPORT_PER_CPU_SYMBOL(this_cpu_off); + #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA -#ifdef CONFIG_X86_64 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { - [0] = (unsigned long)__per_cpu_load, + [0] = BOOT_PERCPU_OFFSET, }; -#else -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; -#endif EXPORT_SYMBOL(__per_cpu_offset); /* diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index add36b4e37c..5ec29a1a846 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c @@ -5,13 +5,6 @@ #include #include -#ifdef CONFIG_X86_64 -DEFINE_PER_CPU(unsigned long, this_cpu_off) = (unsigned long)__per_cpu_load; -#else -DEFINE_PER_CPU(unsigned long, this_cpu_off); -#endif -EXPORT_PER_CPU_SYMBOL(this_cpu_off); - #ifdef CONFIG_X86_32 /* * Initialize the CPU's GDT. This is either the boot CPU doing itself -- cgit v1.2.3 From 34019be1cd2941128b5de6d7c0fbdb51f967d268 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:48 +0900 Subject: x86: don't assume boot cpu is #0 Impact: minor cleanup Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 36c2e81dfc3..be77f1a1231 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef CONFIG_DEBUG_PER_CPU_MAPS # define DBG(x...) printk(KERN_DEBUG x) @@ -37,7 +38,7 @@ EXPORT_PER_CPU_SYMBOL(this_cpu_off); #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { - [0] = BOOT_PERCPU_OFFSET, + [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, }; EXPORT_SYMBOL(__per_cpu_offset); @@ -101,10 +102,10 @@ void __init setup_per_cpu_areas(void) early_per_cpu_map(x86_cpu_to_node_map, cpu); #endif /* - * Up to this point, CPU0 has been using .data.init - * area. Reload %gs offset for CPU0. + * Up to this point, the boot CPU has been using .data.init + * area. Reload %gs offset for the boot CPU. */ - if (cpu == 0) + if (cpu == boot_cpu_id) load_gs_base(cpu); #endif -- cgit v1.2.3 From 89c9c4c58ee86e6e8802597271f23679e0c46647 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:48 +0900 Subject: x86: make Voyager use x86 per-cpu setup. Impact: standardize all x86 platforms on same setup code With the preceding changes, Voyager can use the same per-cpu setup code as all the other x86 platforms. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/Kconfig | 2 +- arch/x86/kernel/setup_percpu.c | 5 ----- arch/x86/mach-voyager/voyager_smp.c | 3 --- 3 files changed, 1 insertion(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5a29b792cb8..d6218e6c982 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -133,7 +133,7 @@ config ARCH_HAS_CACHE_LINE_SIZE def_bool y config HAVE_SETUP_PER_CPU_AREA - def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER) + def_bool y config HAVE_CPUMASK_OF_CPU_MAP def_bool X86_64_SMP diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index be77f1a1231..599dc1cc1da 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -35,8 +35,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_number); DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; EXPORT_PER_CPU_SYMBOL(this_cpu_off); -#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA - unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, }; @@ -125,6 +123,3 @@ void __init setup_per_cpu_areas(void) /* Setup cpu initialized, callin, callout masks */ setup_cpu_local_masks(); } - -#endif - diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 96f15b09a4c..dd82f2052f3 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -531,7 +531,6 @@ static void __init do_boot_cpu(__u8 cpu) stack_start.sp = (void *)idle->thread.sp; init_gdt(cpu); - per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; per_cpu(current_task, cpu) = idle; early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); irq_ctx_init(cpu); @@ -1749,7 +1748,6 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) static void __cpuinit voyager_smp_prepare_boot_cpu(void) { init_gdt(smp_processor_id()); - per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; switch_to_new_gdt(); cpu_set(smp_processor_id(), cpu_online_map); @@ -1782,7 +1780,6 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus) void __init smp_setup_processor_id(void) { current_thread_info()->cpu = hard_smp_processor_id(); - percpu_write(cpu_number, hard_smp_processor_id()); } static void voyager_send_call_func(cpumask_t callmask) -- cgit v1.2.3 From b2d2f4312b117a6cc647c8521e2643a88771f757 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:48 +0900 Subject: x86: initialize per-cpu GDT segment in per-cpu setup Impact: cleanup Rename init_gdt() to setup_percpu_segment(), and move it to setup_percpu.c. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/processor.h | 1 - arch/x86/kernel/Makefile | 3 +-- arch/x86/kernel/setup_percpu.c | 14 ++++++++++++++ arch/x86/kernel/smpboot.c | 4 ---- arch/x86/kernel/smpcommon.c | 25 ------------------------- arch/x86/mach-voyager/voyager_smp.c | 2 -- arch/x86/xen/smp.c | 1 - 7 files changed, 15 insertions(+), 35 deletions(-) delete mode 100644 arch/x86/kernel/smpcommon.c (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 48676b943b9..32c30b02b51 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -778,7 +778,6 @@ extern struct desc_ptr early_gdt_descr; extern void cpu_set_gdt(int); extern void switch_to_new_gdt(void); extern void cpu_init(void); -extern void init_gdt(int cpu); static inline unsigned long get_debugctlmsr(void) { diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 73de055c29c..37fa30bada1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -60,8 +60,7 @@ obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp.o obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o obj-$(CONFIG_SMP) += setup_percpu.o -obj-$(CONFIG_X86_32_SMP) += smpcommon.o -obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o +obj-$(CONFIG_X86_64_SMP) += tsc_sync.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 599dc1cc1da..bcca3a7b374 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -40,6 +40,19 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { }; EXPORT_SYMBOL(__per_cpu_offset); +static inline void setup_percpu_segment(int cpu) +{ +#ifdef CONFIG_X86_32 + struct desc_struct gdt; + + pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, + 0x2 | DESCTYPE_S, 0x8); + gdt.s = 1; + write_gdt_entry(get_cpu_gdt_table(cpu), + GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); +#endif +} + /* * Great future plan: * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. @@ -81,6 +94,7 @@ void __init setup_per_cpu_areas(void) per_cpu_offset(cpu) = ptr - __per_cpu_start; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; + setup_percpu_segment(cpu); /* * Copy data used in early init routines from the initial arrays to the * per cpu data areas. These arrays then become expendable and the diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index def770b57b5..f9dbcff4354 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -793,7 +793,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) do_rest: per_cpu(current_task, cpu) = c_idle.idle; #ifdef CONFIG_X86_32 - init_gdt(cpu); /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); #else @@ -1186,9 +1185,6 @@ out: void __init native_smp_prepare_boot_cpu(void) { int me = smp_processor_id(); -#ifdef CONFIG_X86_32 - init_gdt(me); -#endif switch_to_new_gdt(); /* already set me in cpu_online_mask in boot_cpu_init() */ cpumask_set_cpu(me, cpu_callout_mask); diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c deleted file mode 100644 index 5ec29a1a846..00000000000 --- a/arch/x86/kernel/smpcommon.c +++ /dev/null @@ -1,25 +0,0 @@ -/* - * SMP stuff which is common to all sub-architectures. - */ -#include -#include -#include - -#ifdef CONFIG_X86_32 -/* - * Initialize the CPU's GDT. This is either the boot CPU doing itself - * (still using the master per-cpu area), or a CPU doing it for a - * secondary which will soon come up. - */ -__cpuinit void init_gdt(int cpu) -{ - struct desc_struct gdt; - - pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF, - 0x2 | DESCTYPE_S, 0x8); - gdt.s = 1; - - write_gdt_entry(get_cpu_gdt_table(cpu), - GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); -} -#endif diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index dd82f2052f3..331cd6d5648 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -530,7 +530,6 @@ static void __init do_boot_cpu(__u8 cpu) /* init_tasks (in sched.c) is indexed logically */ stack_start.sp = (void *)idle->thread.sp; - init_gdt(cpu); per_cpu(current_task, cpu) = idle; early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); irq_ctx_init(cpu); @@ -1747,7 +1746,6 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) static void __cpuinit voyager_smp_prepare_boot_cpu(void) { - init_gdt(smp_processor_id()); switch_to_new_gdt(); cpu_set(smp_processor_id(), cpu_online_map); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 72c2eb9b64c..7735e3dd359 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -281,7 +281,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) per_cpu(current_task, cpu) = idle; #ifdef CONFIG_X86_32 - init_gdt(cpu); irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); -- cgit v1.2.3 From 1825b8edc2034c012ae48f797d74efd1bd9d4f72 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:48 +0900 Subject: x86: remove extra barriers from load_gs_base() Impact: optimization mb() generates an mfence instruction, which is not needed here. Only a compiler barrier is needed, and that is handled by the memory clobber in the wrmsrl function. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/processor.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 32c30b02b51..794234eba31 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -397,10 +397,7 @@ DECLARE_PER_CPU(char *, irq_stack_ptr); static inline void load_gs_base(int cpu) { - /* Memory clobbers used to order pda/percpu accesses */ - mb(); wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); - mb(); } #endif -- cgit v1.2.3 From 2697fbd5faf19c84c17441b1752bdcbdcfd1248c Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 27 Jan 2009 12:56:48 +0900 Subject: x86: load new GDT after setting up boot cpu per-cpu area Impact: sync 32 and 64-bit code Merge load_gs_base() into switch_to_new_gdt(). Load the GDT and per-cpu state for the boot cpu when its new area is set up. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/processor.h | 5 ----- arch/x86/kernel/cpu/common.c | 15 +++++++++------ arch/x86/kernel/setup_percpu.c | 6 +++--- 3 files changed, 12 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 794234eba31..befa20b4a68 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -394,11 +394,6 @@ union irq_stack_union { DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); DECLARE_PER_CPU(char *, irq_stack_ptr); - -static inline void load_gs_base(int cpu) -{ - wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); -} #endif extern void print_cpu_info(struct cpuinfo_x86 *); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 67e30c8a282..0c766b80d91 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -258,12 +258,17 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; void switch_to_new_gdt(void) { struct desc_ptr gdt_descr; + int cpu = smp_processor_id(); - gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); + gdt_descr.address = (long)get_cpu_gdt_table(cpu); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); + /* Reload the per-cpu base */ #ifdef CONFIG_X86_32 - asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); + loadsegment(fs, __KERNEL_PERCPU); +#else + loadsegment(gs, 0); + wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); #endif } @@ -968,10 +973,6 @@ void __cpuinit cpu_init(void) struct task_struct *me; int i; - loadsegment(fs, 0); - loadsegment(gs, 0); - load_gs_base(cpu); - #ifdef CONFIG_NUMA if (cpu != 0 && percpu_read(node_number) == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) @@ -993,6 +994,8 @@ void __cpuinit cpu_init(void) */ switch_to_new_gdt(); + loadsegment(fs, 0); + load_idt((const struct desc_ptr *)&idt_descr); memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index bcca3a7b374..4caa78d7cb1 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -112,14 +112,14 @@ void __init setup_per_cpu_areas(void) #ifdef CONFIG_NUMA per_cpu(x86_cpu_to_node_map, cpu) = early_per_cpu_map(x86_cpu_to_node_map, cpu); +#endif #endif /* * Up to this point, the boot CPU has been using .data.init - * area. Reload %gs offset for the boot CPU. + * area. Reload any changed state for the boot CPU. */ if (cpu == boot_cpu_id) - load_gs_base(cpu); -#endif + switch_to_new_gdt(); DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } -- cgit v1.2.3 From 22f25138c345ec46a13744c93c093ff822cd98d1 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 27 Jan 2009 14:21:37 +0900 Subject: x86: fix build breakage on voyage Impact: build fix x86_cpu_to_apicid and x86_bios_cpu_apicid aren't defined for voyage. Earlier patch forgot to conditionalize early percpu clearing. Fix it. Signed-off-by: James Bottomley Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 4caa78d7cb1..c7458ead22d 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -125,8 +125,10 @@ void __init setup_per_cpu_areas(void) } /* indicate the early static arrays will soon be gone */ +#ifdef CONFIG_X86_LOCAL_APIC early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; +#endif #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; #endif -- cgit v1.2.3 From cf3997f507624757f149fcc42b76fb03c151fb65 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 27 Jan 2009 14:25:05 +0900 Subject: x86: clean up indentation in setup_per_cpu_areas() Impact: cosmetic cleanup Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index c7458ead22d..0d1e7ac439f 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -96,22 +96,25 @@ void __init setup_per_cpu_areas(void) per_cpu(cpu_number, cpu) = cpu; setup_percpu_segment(cpu); /* - * Copy data used in early init routines from the initial arrays to the - * per cpu data areas. These arrays then become expendable and the - * *_early_ptr's are zeroed indicating that the static arrays are gone. + * Copy data used in early init routines from the + * initial arrays to the per cpu data areas. These + * arrays then become expendable and the *_early_ptr's + * are zeroed indicating that the static arrays are + * gone. */ #ifdef CONFIG_X86_LOCAL_APIC per_cpu(x86_cpu_to_apicid, cpu) = - early_per_cpu_map(x86_cpu_to_apicid, cpu); + early_per_cpu_map(x86_cpu_to_apicid, cpu); per_cpu(x86_bios_cpu_apicid, cpu) = - early_per_cpu_map(x86_bios_cpu_apicid, cpu); + early_per_cpu_map(x86_bios_cpu_apicid, cpu); #endif #ifdef CONFIG_X86_64 per_cpu(irq_stack_ptr, cpu) = - per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; + per_cpu(irq_stack_union.irq_stack, cpu) + + IRQ_STACK_SIZE - 64; #ifdef CONFIG_NUMA per_cpu(x86_cpu_to_node_map, cpu) = - early_per_cpu_map(x86_cpu_to_node_map, cpu); + early_per_cpu_map(x86_cpu_to_node_map, cpu); #endif #endif /* -- cgit v1.2.3 From 18801be7f805b891876a6676ec7fac2e1acdec13 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 25 Dec 2008 18:17:09 +0900 Subject: sh: make gpio_get/set_value() O(1) This patch modifies the table based SuperH gpio implementation to make use of direct table lookups. With this change the functions gpio_get_value() and gpio_set_value() are O(1). Tested on Migo-R using bitbanging mmc. Performance is improved from 11 KBytes/s to 26 Kbytes/s. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/gpio.h | 7 ++++++- arch/sh/kernel/gpio.c | 48 +++++++++++++++++++++++++++++++++++----------- 2 files changed, 43 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h index 90673658eb1..942fefa61c7 100644 --- a/arch/sh/include/asm/gpio.h +++ b/arch/sh/include/asm/gpio.h @@ -20,7 +20,7 @@ #endif typedef unsigned short pinmux_enum_t; -typedef unsigned char pinmux_flag_t; +typedef unsigned short pinmux_flag_t; #define PINMUX_TYPE_NONE 0 #define PINMUX_TYPE_FUNCTION 1 @@ -34,6 +34,11 @@ typedef unsigned char pinmux_flag_t; #define PINMUX_FLAG_WANT_PULLUP (1 << 3) #define PINMUX_FLAG_WANT_PULLDOWN (1 << 4) +#define PINMUX_FLAG_DBIT_SHIFT 5 +#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT) +#define PINMUX_FLAG_DREG_SHIFT 10 +#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT) + struct pinmux_gpio { pinmux_enum_t enum_id; pinmux_flag_t flags; diff --git a/arch/sh/kernel/gpio.c b/arch/sh/kernel/gpio.c index d3716536103..fe92ba151b8 100644 --- a/arch/sh/kernel/gpio.c +++ b/arch/sh/kernel/gpio.c @@ -95,14 +95,13 @@ static int read_write_reg(unsigned long reg, unsigned long reg_width, return 0; } -static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio, - struct pinmux_data_reg **drp, int *bitp) +static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio) { - pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id; + struct pinmux_gpio *gpiop = &gpioc->gpios[gpio]; struct pinmux_data_reg *data_reg; int k, n; - if (!enum_in_range(enum_id, &gpioc->data)) + if (!enum_in_range(gpiop->enum_id, &gpioc->data)) return -1; k = 0; @@ -113,19 +112,38 @@ static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio, break; for (n = 0; n < data_reg->reg_width; n++) { - if (data_reg->enum_ids[n] == enum_id) { - *drp = data_reg; - *bitp = n; + if (data_reg->enum_ids[n] == gpiop->enum_id) { + gpiop->flags &= ~PINMUX_FLAG_DREG; + gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT); + gpiop->flags &= ~PINMUX_FLAG_DBIT; + gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT); return 0; - } } k++; } + BUG(); + return -1; } +static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio, + struct pinmux_data_reg **drp, int *bitp) +{ + struct pinmux_gpio *gpiop = &gpioc->gpios[gpio]; + int k, n; + + if (!enum_in_range(gpiop->enum_id, &gpioc->data)) + return -1; + + k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT; + n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT; + *drp = gpioc->data_regs + k; + *bitp = n; + return 0; +} + static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id, struct pinmux_cfg_reg **crp, int *indexp, unsigned long **cntp) @@ -341,7 +359,8 @@ int __gpio_request(unsigned gpio) BUG(); } - gpioc->gpios[gpio].flags = pinmux_type; + gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE; + gpioc->gpios[gpio].flags |= pinmux_type; ret = 0; err_unlock: @@ -364,7 +383,8 @@ void gpio_free(unsigned gpio) pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE; pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE); - gpioc->gpios[gpio].flags = PINMUX_TYPE_NONE; + gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE; + gpioc->gpios[gpio].flags |= PINMUX_TYPE_NONE; spin_unlock_irqrestore(&gpio_lock, flags); } @@ -401,7 +421,8 @@ static int pinmux_direction(struct pinmux_info *gpioc, GPIO_CFG_REQ) != 0) BUG(); - gpioc->gpios[gpio].flags = new_pinmux_type; + gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE; + gpioc->gpios[gpio].flags |= new_pinmux_type; ret = 0; err_out: @@ -494,9 +515,14 @@ EXPORT_SYMBOL(gpio_set_value); int register_pinmux(struct pinmux_info *pip) { + int k; + registered_gpio = pip; pr_info("pinmux: %s handling gpio %d -> %d\n", pip->name, pip->first_gpio, pip->last_gpio); + for (k = pip->first_gpio; k <= pip->last_gpio; k++) + setup_data_reg(pip, k); + return 0; } -- cgit v1.2.3 From 0fc64cc0a27288e77ee8e12648d59632649371fc Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 25 Dec 2008 18:17:18 +0900 Subject: sh: lockless gpio_get_value() This patch separates the register read and write functions to allow lockless gpio_get_value(). Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/gpio.c | 126 +++++++++++++++++++++++++------------------------- 1 file changed, 63 insertions(+), 63 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/gpio.c b/arch/sh/kernel/gpio.c index fe92ba151b8..f8397a09491 100644 --- a/arch/sh/kernel/gpio.c +++ b/arch/sh/kernel/gpio.c @@ -46,9 +46,8 @@ static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r) return 1; } -static int read_write_reg(unsigned long reg, unsigned long reg_width, - unsigned long field_width, unsigned long in_pos, - unsigned long value, int do_write) +static int gpio_read_reg(unsigned long reg, unsigned long reg_width, + unsigned long field_width, unsigned long in_pos) { unsigned long data, mask, pos; @@ -57,10 +56,9 @@ static int read_write_reg(unsigned long reg, unsigned long reg_width, pos = reg_width - ((in_pos + 1) * field_width); #ifdef DEBUG - pr_info("%s, addr = %lx, value = %ld, pos = %ld, " + pr_info("read_reg: addr = %lx, pos = %ld, " "r_width = %ld, f_width = %ld\n", - do_write ? "write" : "read", reg, value, pos, - reg_width, field_width); + reg, pos, reg_width, field_width); #endif switch (reg_width) { @@ -75,24 +73,38 @@ static int read_write_reg(unsigned long reg, unsigned long reg_width, break; } - if (!do_write) - return (data >> pos) & mask; + return (data >> pos) & mask; +} + +static void gpio_write_reg(unsigned long reg, unsigned long reg_width, + unsigned long field_width, unsigned long in_pos, + unsigned long value) +{ + unsigned long mask, pos; + + mask = (1 << field_width) - 1; + pos = reg_width - ((in_pos + 1) * field_width); - data &= ~(mask << pos); - data |= value << pos; +#ifdef DEBUG + pr_info("write_reg addr = %lx, value = %ld, pos = %ld, " + "r_width = %ld, f_width = %ld\n", + reg, value, pos, reg_width, field_width); +#endif + + mask = ~(mask << pos); + value = value << pos; switch (reg_width) { case 8: - ctrl_outb(data, reg); + ctrl_outb((ctrl_inb(reg) & mask) | value, reg); break; case 16: - ctrl_outw(data, reg); + ctrl_outw((ctrl_inw(reg) & mask) | value, reg); break; case 32: - ctrl_outl(data, reg); + ctrl_outl((ctrl_inl(reg) & mask) | value, reg); break; } - return 0; } static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio) @@ -205,9 +217,9 @@ static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio, return -1; } -static int write_config_reg(struct pinmux_info *gpioc, - struct pinmux_cfg_reg *crp, - int index) +static void write_config_reg(struct pinmux_info *gpioc, + struct pinmux_cfg_reg *crp, + int index) { unsigned long ncomb, pos, value; @@ -215,8 +227,7 @@ static int write_config_reg(struct pinmux_info *gpioc, pos = index / ncomb; value = index % ncomb; - return read_write_reg(crp->reg, crp->reg_width, - crp->field_width, pos, value, 1); + gpio_write_reg(crp->reg, crp->reg_width, crp->field_width, pos, value); } static int check_config_reg(struct pinmux_info *gpioc, @@ -229,8 +240,8 @@ static int check_config_reg(struct pinmux_info *gpioc, pos = index / ncomb; value = index % ncomb; - if (read_write_reg(crp->reg, crp->reg_width, - crp->field_width, pos, 0, 0) == value) + if (gpio_read_reg(crp->reg, crp->reg_width, + crp->field_width, pos) == value) return 0; return -1; @@ -238,8 +249,8 @@ static int check_config_reg(struct pinmux_info *gpioc, enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE }; -int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio, - int pinmux_type, int cfg_mode) +static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio, + int pinmux_type, int cfg_mode) { struct pinmux_cfg_reg *cr = NULL; pinmux_enum_t enum_id; @@ -305,8 +316,7 @@ int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio, break; case GPIO_CFG_REQ: - if (write_config_reg(gpioc, cr, index) != 0) - goto out_err; + write_config_reg(gpioc, cr, index); *cntp = *cntp + 1; break; @@ -393,9 +403,12 @@ EXPORT_SYMBOL(gpio_free); static int pinmux_direction(struct pinmux_info *gpioc, unsigned gpio, int new_pinmux_type) { - int ret, pinmux_type; + int pinmux_type; + int ret = -EINVAL; + + if (!gpioc) + goto err_out; - ret = -EINVAL; pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE; switch (pinmux_type) { @@ -433,68 +446,59 @@ int gpio_direction_input(unsigned gpio) { struct pinmux_info *gpioc = gpio_controller(gpio); unsigned long flags; - int ret = -EINVAL; - - if (!gpioc) - goto err_out; + int ret; spin_lock_irqsave(&gpio_lock, flags); ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_INPUT); spin_unlock_irqrestore(&gpio_lock, flags); - err_out: + return ret; } EXPORT_SYMBOL(gpio_direction_input); -static int __gpio_get_set_value(struct pinmux_info *gpioc, - unsigned gpio, int value, - int do_write) +static void __gpio_set_value(struct pinmux_info *gpioc, + unsigned gpio, int value) { struct pinmux_data_reg *dr = NULL; int bit = 0; - if (get_data_reg(gpioc, gpio, &dr, &bit) != 0) + if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0) BUG(); else - value = read_write_reg(dr->reg, dr->reg_width, - 1, bit, !!value, do_write); - - return value; + gpio_write_reg(dr->reg, dr->reg_width, 1, bit, !!value); } int gpio_direction_output(unsigned gpio, int value) { struct pinmux_info *gpioc = gpio_controller(gpio); unsigned long flags; - int ret = -EINVAL; - - if (!gpioc) - goto err_out; + int ret; spin_lock_irqsave(&gpio_lock, flags); - __gpio_get_set_value(gpioc, gpio, value, 1); + __gpio_set_value(gpioc, gpio, value); ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_OUTPUT); spin_unlock_irqrestore(&gpio_lock, flags); - err_out: + return ret; } EXPORT_SYMBOL(gpio_direction_output); -int gpio_get_value(unsigned gpio) +static int __gpio_get_value(struct pinmux_info *gpioc, unsigned gpio) { - struct pinmux_info *gpioc = gpio_controller(gpio); - unsigned long flags; - int value = 0; + struct pinmux_data_reg *dr = NULL; + int bit = 0; - if (!gpioc) + if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0) { BUG(); - else { - spin_lock_irqsave(&gpio_lock, flags); - value = __gpio_get_set_value(gpioc, gpio, 0, 0); - spin_unlock_irqrestore(&gpio_lock, flags); + return 0; } - return value; + return gpio_read_reg(dr->reg, dr->reg_width, 1, bit); +} + +int gpio_get_value(unsigned gpio) +{ + return __gpio_get_value(gpio_controller(gpio), gpio); } EXPORT_SYMBOL(gpio_get_value); @@ -503,13 +507,9 @@ void gpio_set_value(unsigned gpio, int value) struct pinmux_info *gpioc = gpio_controller(gpio); unsigned long flags; - if (!gpioc) - BUG(); - else { - spin_lock_irqsave(&gpio_lock, flags); - __gpio_get_set_value(gpioc, gpio, value, 1); - spin_unlock_irqrestore(&gpio_lock, flags); - } + spin_lock_irqsave(&gpio_lock, flags); + __gpio_set_value(gpioc, gpio, value); + spin_unlock_irqrestore(&gpio_lock, flags); } EXPORT_SYMBOL(gpio_set_value); -- cgit v1.2.3 From 3292094e88ce6b76714dad8ec4b43d7c5c12ada2 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 25 Dec 2008 18:17:26 +0900 Subject: sh: lockless gpio_set_value() This patch optimizes the gpio data register handling for gpio_set_value(). Instead of using the good old spinlock-plus-read-modify-write strategy we now use a shadow register and atomic operations. This improves the bitbanging mmc performance on Migo-R from 26 Kbytes/s to 40 Kbytes/s. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/gpio.h | 2 +- arch/sh/kernel/gpio.c | 106 ++++++++++++++++++++++++++++++++++----------- 2 files changed, 82 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h index 942fefa61c7..46a6d7914df 100644 --- a/arch/sh/include/asm/gpio.h +++ b/arch/sh/include/asm/gpio.h @@ -59,7 +59,7 @@ struct pinmux_cfg_reg { .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \ struct pinmux_data_reg { - unsigned long reg, reg_width; + unsigned long reg, reg_width, reg_shadow; pinmux_enum_t *enum_ids; }; diff --git a/arch/sh/kernel/gpio.c b/arch/sh/kernel/gpio.c index f8397a09491..28013567372 100644 --- a/arch/sh/kernel/gpio.c +++ b/arch/sh/kernel/gpio.c @@ -46,6 +46,62 @@ static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r) return 1; } +static unsigned long gpio_read_raw_reg(unsigned long reg, + unsigned long reg_width) +{ + switch (reg_width) { + case 8: + return ctrl_inb(reg); + case 16: + return ctrl_inw(reg); + case 32: + return ctrl_inl(reg); + } + + BUG(); + return 0; +} + +static void gpio_write_raw_reg(unsigned long reg, + unsigned long reg_width, + unsigned long data) +{ + switch (reg_width) { + case 8: + ctrl_outb(data, reg); + return; + case 16: + ctrl_outw(data, reg); + return; + case 32: + ctrl_outl(data, reg); + return; + } + + BUG(); +} + +static void gpio_write_bit(struct pinmux_data_reg *dr, + unsigned long in_pos, unsigned long value) +{ + unsigned long pos; + + pos = dr->reg_width - (in_pos + 1); + +#ifdef DEBUG + pr_info("write_bit addr = %lx, value = %ld, pos = %ld, " + "r_width = %ld\n", + dr->reg, !!value, pos, dr->reg_width); +#endif + + if (value) + set_bit(pos, &dr->reg_shadow); + else + clear_bit(pos, &dr->reg_shadow); + + gpio_write_raw_reg(dr->reg, dr->reg_width, dr->reg_shadow); +} + static int gpio_read_reg(unsigned long reg, unsigned long reg_width, unsigned long field_width, unsigned long in_pos) { @@ -61,18 +117,7 @@ static int gpio_read_reg(unsigned long reg, unsigned long reg_width, reg, pos, reg_width, field_width); #endif - switch (reg_width) { - case 8: - data = ctrl_inb(reg); - break; - case 16: - data = ctrl_inw(reg); - break; - case 32: - data = ctrl_inl(reg); - break; - } - + data = gpio_read_raw_reg(reg, reg_width); return (data >> pos) & mask; } @@ -140,6 +185,26 @@ static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio) return -1; } +static void setup_data_regs(struct pinmux_info *gpioc) +{ + struct pinmux_data_reg *drp; + int k; + + for (k = gpioc->first_gpio; k <= gpioc->last_gpio; k++) + setup_data_reg(gpioc, k); + + k = 0; + while (1) { + drp = gpioc->data_regs + k; + + if (!drp->reg_width) + break; + + drp->reg_shadow = gpio_read_raw_reg(drp->reg, drp->reg_width); + k++; + } +} + static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio, struct pinmux_data_reg **drp, int *bitp) { @@ -465,7 +530,7 @@ static void __gpio_set_value(struct pinmux_info *gpioc, if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0) BUG(); else - gpio_write_reg(dr->reg, dr->reg_width, 1, bit, !!value); + gpio_write_bit(dr, bit, value); } int gpio_direction_output(unsigned gpio, int value) @@ -474,8 +539,8 @@ int gpio_direction_output(unsigned gpio, int value) unsigned long flags; int ret; - spin_lock_irqsave(&gpio_lock, flags); __gpio_set_value(gpioc, gpio, value); + spin_lock_irqsave(&gpio_lock, flags); ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_OUTPUT); spin_unlock_irqrestore(&gpio_lock, flags); @@ -504,25 +569,16 @@ EXPORT_SYMBOL(gpio_get_value); void gpio_set_value(unsigned gpio, int value) { - struct pinmux_info *gpioc = gpio_controller(gpio); - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - __gpio_set_value(gpioc, gpio, value); - spin_unlock_irqrestore(&gpio_lock, flags); + __gpio_set_value(gpio_controller(gpio), gpio, value); } EXPORT_SYMBOL(gpio_set_value); int register_pinmux(struct pinmux_info *pip) { - int k; - registered_gpio = pip; + setup_data_regs(pip); pr_info("pinmux: %s handling gpio %d -> %d\n", pip->name, pip->first_gpio, pip->last_gpio); - for (k = pip->first_gpio; k <= pip->last_gpio; k++) - setup_data_reg(pip, k); - return 0; } -- cgit v1.2.3 From 69edbba0021a48fe034849501513930f6175cb5d Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 25 Dec 2008 18:17:34 +0900 Subject: sh: use gpiolib This patch updates the SuperH gpio code to make use of gpiolib. The gpiolib callbacks get() and set() are lockless, but we use our own spinlock for the other operations to make sure hardware register bitfield accesses stay atomic. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 +++ arch/sh/boards/Kconfig | 4 +- arch/sh/include/asm/gpio.h | 61 +++++++++++++++----------- arch/sh/kernel/Makefile_32 | 2 +- arch/sh/kernel/Makefile_64 | 2 +- arch/sh/kernel/gpio.c | 106 ++++++++++++++++++++++----------------------- 6 files changed, 99 insertions(+), 83 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ebabe518e72..3a2be227894 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -126,6 +126,13 @@ config ARCH_HAS_ILOG2_U64 config ARCH_NO_VIRT_TO_BUS def_bool y +config ARCH_WANT_OPTIONAL_GPIOLIB + def_bool y + depends on !ARCH_REQUIRE_GPIOLIB + +config ARCH_REQUIRE_GPIOLIB + def_bool n + config IO_TRAPPED bool diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 861914747e4..802d5c475a7 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -165,7 +165,7 @@ config SH_SH7785LCR_29BIT_PHYSMAPS config SH_MIGOR bool "Migo-R" depends on CPU_SUBTYPE_SH7722 - select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB help Select Migo-R if configuring for the SH7722 Migo-R platform by Renesas System Solutions Asia Pte. Ltd. @@ -173,7 +173,7 @@ config SH_MIGOR config SH_AP325RXA bool "AP-325RXA" depends on CPU_SUBTYPE_SH7723 - select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB help Renesas "AP-325RXA" support. Compatible with ALGO SYSTEM CO.,LTD. "AP-320A" diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h index 46a6d7914df..61f93da2c62 100644 --- a/arch/sh/include/asm/gpio.h +++ b/arch/sh/include/asm/gpio.h @@ -19,6 +19,40 @@ #include #endif +#define ARCH_NR_GPIOS 512 +#include + +#ifdef CONFIG_GPIOLIB + +static inline int gpio_get_value(unsigned gpio) +{ + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value(unsigned gpio, int value) +{ + __gpio_set_value(gpio, value); +} + +static inline int gpio_cansleep(unsigned gpio) +{ + return __gpio_cansleep(gpio); +} + +static inline int gpio_to_irq(unsigned gpio) +{ + WARN_ON(1); + return -ENOSYS; +} + +static inline int irq_to_gpio(unsigned int irq) +{ + WARN_ON(1); + return -EINVAL; +} + +#endif /* CONFIG_GPIOLIB */ + typedef unsigned short pinmux_enum_t; typedef unsigned short pinmux_flag_t; @@ -94,34 +128,9 @@ struct pinmux_info { unsigned int gpio_data_size; unsigned long *gpio_in_use; + struct gpio_chip chip; }; int register_pinmux(struct pinmux_info *pip); -int __gpio_request(unsigned gpio); -static inline int gpio_request(unsigned gpio, const char *label) -{ - return __gpio_request(gpio); -} -void gpio_free(unsigned gpio); -int gpio_direction_input(unsigned gpio); -int gpio_direction_output(unsigned gpio, int value); -int gpio_get_value(unsigned gpio); -void gpio_set_value(unsigned gpio, int value); - -/* IRQ modes are unspported */ -static inline int gpio_to_irq(unsigned gpio) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline int irq_to_gpio(unsigned irq) -{ - WARN_ON(1); - return -EINVAL; -} - -#include - #endif /* __ASM_SH_GPIO_H */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 2e1b86e16ab..7e7d22b5b4c 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -27,7 +27,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_ARCH_REQUIRE_GPIOLIB) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index fe425d7f687..cbcbbb6c049 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -15,6 +15,6 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_ARCH_REQUIRE_GPIOLIB) += gpio.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/gpio.c b/arch/sh/kernel/gpio.c index 28013567372..d22e5af699f 100644 --- a/arch/sh/kernel/gpio.c +++ b/arch/sh/kernel/gpio.c @@ -19,22 +19,6 @@ #include #include -static struct pinmux_info *registered_gpio; - -static struct pinmux_info *gpio_controller(unsigned gpio) -{ - if (!registered_gpio) - return NULL; - - if (gpio < registered_gpio->first_gpio) - return NULL; - - if (gpio > registered_gpio->last_gpio) - return NULL; - - return registered_gpio; -} - static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r) { if (enum_id < r->begin) @@ -398,9 +382,14 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio, static DEFINE_SPINLOCK(gpio_lock); -int __gpio_request(unsigned gpio) +static struct pinmux_info *chip_to_pinmux(struct gpio_chip *chip) { - struct pinmux_info *gpioc = gpio_controller(gpio); + return container_of(chip, struct pinmux_info, chip); +} + +static int sh_gpio_request(struct gpio_chip *chip, unsigned offset) +{ + struct pinmux_info *gpioc = chip_to_pinmux(chip); struct pinmux_data_reg *dummy; unsigned long flags; int i, ret, pinmux_type; @@ -412,30 +401,30 @@ int __gpio_request(unsigned gpio) spin_lock_irqsave(&gpio_lock, flags); - if ((gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE) + if ((gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE) goto err_unlock; /* setup pin function here if no data is associated with pin */ - if (get_data_reg(gpioc, gpio, &dummy, &i) != 0) + if (get_data_reg(gpioc, offset, &dummy, &i) != 0) pinmux_type = PINMUX_TYPE_FUNCTION; else pinmux_type = PINMUX_TYPE_GPIO; if (pinmux_type == PINMUX_TYPE_FUNCTION) { - if (pinmux_config_gpio(gpioc, gpio, + if (pinmux_config_gpio(gpioc, offset, pinmux_type, GPIO_CFG_DRYRUN) != 0) goto err_unlock; - if (pinmux_config_gpio(gpioc, gpio, + if (pinmux_config_gpio(gpioc, offset, pinmux_type, GPIO_CFG_REQ) != 0) BUG(); } - gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE; - gpioc->gpios[gpio].flags |= pinmux_type; + gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE; + gpioc->gpios[offset].flags |= pinmux_type; ret = 0; err_unlock: @@ -443,11 +432,10 @@ int __gpio_request(unsigned gpio) err_out: return ret; } -EXPORT_SYMBOL(__gpio_request); -void gpio_free(unsigned gpio) +static void sh_gpio_free(struct gpio_chip *chip, unsigned offset) { - struct pinmux_info *gpioc = gpio_controller(gpio); + struct pinmux_info *gpioc = chip_to_pinmux(chip); unsigned long flags; int pinmux_type; @@ -456,14 +444,13 @@ void gpio_free(unsigned gpio) spin_lock_irqsave(&gpio_lock, flags); - pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE; - pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE); - gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE; - gpioc->gpios[gpio].flags |= PINMUX_TYPE_NONE; + pinmux_type = gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE; + pinmux_config_gpio(gpioc, offset, pinmux_type, GPIO_CFG_FREE); + gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE; + gpioc->gpios[offset].flags |= PINMUX_TYPE_NONE; spin_unlock_irqrestore(&gpio_lock, flags); } -EXPORT_SYMBOL(gpio_free); static int pinmux_direction(struct pinmux_info *gpioc, unsigned gpio, int new_pinmux_type) @@ -507,21 +494,20 @@ static int pinmux_direction(struct pinmux_info *gpioc, return ret; } -int gpio_direction_input(unsigned gpio) +static int sh_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { - struct pinmux_info *gpioc = gpio_controller(gpio); + struct pinmux_info *gpioc = chip_to_pinmux(chip); unsigned long flags; int ret; spin_lock_irqsave(&gpio_lock, flags); - ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_INPUT); + ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_INPUT); spin_unlock_irqrestore(&gpio_lock, flags); return ret; } -EXPORT_SYMBOL(gpio_direction_input); -static void __gpio_set_value(struct pinmux_info *gpioc, +static void sh_gpio_set_value(struct pinmux_info *gpioc, unsigned gpio, int value) { struct pinmux_data_reg *dr = NULL; @@ -533,22 +519,22 @@ static void __gpio_set_value(struct pinmux_info *gpioc, gpio_write_bit(dr, bit, value); } -int gpio_direction_output(unsigned gpio, int value) +static int sh_gpio_direction_output(struct gpio_chip *chip, unsigned offset, + int value) { - struct pinmux_info *gpioc = gpio_controller(gpio); + struct pinmux_info *gpioc = chip_to_pinmux(chip); unsigned long flags; int ret; - __gpio_set_value(gpioc, gpio, value); + sh_gpio_set_value(gpioc, offset, value); spin_lock_irqsave(&gpio_lock, flags); - ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_OUTPUT); + ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_OUTPUT); spin_unlock_irqrestore(&gpio_lock, flags); return ret; } -EXPORT_SYMBOL(gpio_direction_output); -static int __gpio_get_value(struct pinmux_info *gpioc, unsigned gpio) +static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio) { struct pinmux_data_reg *dr = NULL; int bit = 0; @@ -561,24 +547,38 @@ static int __gpio_get_value(struct pinmux_info *gpioc, unsigned gpio) return gpio_read_reg(dr->reg, dr->reg_width, 1, bit); } -int gpio_get_value(unsigned gpio) +static int sh_gpio_get(struct gpio_chip *chip, unsigned offset) { - return __gpio_get_value(gpio_controller(gpio), gpio); + return sh_gpio_get_value(chip_to_pinmux(chip), offset); } -EXPORT_SYMBOL(gpio_get_value); -void gpio_set_value(unsigned gpio, int value) +static void sh_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { - __gpio_set_value(gpio_controller(gpio), gpio, value); + sh_gpio_set_value(chip_to_pinmux(chip), offset, value); } -EXPORT_SYMBOL(gpio_set_value); int register_pinmux(struct pinmux_info *pip) { - registered_gpio = pip; - setup_data_regs(pip); - pr_info("pinmux: %s handling gpio %d -> %d\n", + struct gpio_chip *chip = &pip->chip; + + pr_info("sh pinmux: %s handling gpio %d -> %d\n", pip->name, pip->first_gpio, pip->last_gpio); - return 0; + setup_data_regs(pip); + + chip->request = sh_gpio_request; + chip->free = sh_gpio_free; + chip->direction_input = sh_gpio_direction_input; + chip->get = sh_gpio_get; + chip->direction_output = sh_gpio_direction_output; + chip->set = sh_gpio_set; + + WARN_ON(pip->first_gpio != 0); /* needs testing */ + + chip->label = pip->name; + chip->owner = THIS_MODULE; + chip->base = pip->first_gpio; + chip->ngpio = (pip->last_gpio - pip->first_gpio) + 1; + + return gpiochip_add(chip); } -- cgit v1.2.3 From 57e97cb8bedd06e8a2e562454423d58aab5827ce Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 6 Jan 2009 12:47:12 +0900 Subject: sh: Fix up GENERIC_GPIO build for ARCH_WANT_OPTIONAL_GPIO cases. CPUs define pinmux tables through the optional interface, while boards that require demux of their own require it explicitly. Roll the Makefile rules back to depend on GENERIC_GPIO, which covers both cases. Fixes a link error with an undefined reference to register_pinmux() on optional platforms. Signed-off-by: Paul Mundt --- arch/sh/kernel/Makefile_32 | 2 +- arch/sh/kernel/Makefile_64 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 7e7d22b5b4c..2e1b86e16ab 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -27,7 +27,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_ARCH_REQUIRE_GPIOLIB) += gpio.o +obj-$(CONFIG_GENERIC_GPIO) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index cbcbbb6c049..fe425d7f687 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -15,6 +15,6 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o -obj-$(CONFIG_ARCH_REQUIRE_GPIOLIB) += gpio.o +obj-$(CONFIG_GENERIC_GPIO) += gpio.o EXTRA_CFLAGS += -Werror -- cgit v1.2.3 From ae5e6d05a606e05e054f816bd01e02f69d38d283 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 7 Jan 2009 17:16:25 +0900 Subject: sh: mach-highlander and mach-rsk require gpiolib. Fix up the build for mach-highlander and mach-rsk. These operated on the assumption that GENERIC_GPIO support with an optional GPIOLIB was possible. This used to be true, but has not been the case since commit-id d56cc8bc661ac1ceded8d45ba2d53bb134fee17d ("sh: use gpiolib"), where the GENERIC_GPIO implementation was rewritten to use GPIOLIB directly. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 ------- arch/sh/boards/Kconfig | 2 +- arch/sh/boards/mach-highlander/Kconfig | 2 +- arch/sh/boards/mach-rsk/Kconfig | 2 +- 4 files changed, 3 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 3a2be227894..ebabe518e72 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -126,13 +126,6 @@ config ARCH_HAS_ILOG2_U64 config ARCH_NO_VIRT_TO_BUS def_bool y -config ARCH_WANT_OPTIONAL_GPIOLIB - def_bool y - depends on !ARCH_REQUIRE_GPIOLIB - -config ARCH_REQUIRE_GPIOLIB - def_bool n - config IO_TRAPPED bool diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 802d5c475a7..c9da37088d2 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -240,7 +240,7 @@ config SH_X3PROTO config SH_MAGIC_PANEL_R2 bool "Magic Panel R2" depends on CPU_SUBTYPE_SH7720 - select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB help Select Magic Panel R2 if configuring for Magic Panel R2. diff --git a/arch/sh/boards/mach-highlander/Kconfig b/arch/sh/boards/mach-highlander/Kconfig index 08057f62687..def49cc0a7b 100644 --- a/arch/sh/boards/mach-highlander/Kconfig +++ b/arch/sh/boards/mach-highlander/Kconfig @@ -18,7 +18,7 @@ config SH_R7780MP config SH_R7785RP bool "R7785RP board support" depends on CPU_SUBTYPE_SH7785 - select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB endchoice diff --git a/arch/sh/boards/mach-rsk/Kconfig b/arch/sh/boards/mach-rsk/Kconfig index bff095dffc0..aeff3b04220 100644 --- a/arch/sh/boards/mach-rsk/Kconfig +++ b/arch/sh/boards/mach-rsk/Kconfig @@ -10,7 +10,7 @@ config SH_RSK7201 config SH_RSK7203 bool "RSK7203" - select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB depends on CPU_SUBTYPE_SH7203 endchoice -- cgit v1.2.3 From 042cbaf88ab48e11afb725541e3c2cbf5b483680 Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Tue, 27 Jan 2009 21:45:57 +0100 Subject: x86 setup: fix asm constraints in vesa_store_edid Impact: fix potential miscompile (currently believed non-manifest) As the comment explains, the VBE DDC call can clobber any register. Tell the compiler about that fact. Signed-off-by: Andreas Schwab Signed-off-by: H. Peter Anvin --- arch/x86/boot/video-vesa.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 75115849af3..4a58c8ce3f6 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -269,9 +269,8 @@ void vesa_store_edid(void) we genuinely have to assume all registers are destroyed here. */ asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" - : "+a" (ax), "+b" (bx) - : "c" (cx), "D" (di) - : "esi"); + : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di) + : : "esi", "edx"); if (ax != 0x004f) return; /* No EDID */ @@ -285,9 +284,9 @@ void vesa_store_edid(void) dx = 0; /* EDID block number */ di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ asm(INT10 - : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info) - : "c" (cx), "D" (di) - : "esi"); + : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info), + "+c" (cx), "+D" (di) + : : "esi"); #endif /* CONFIG_FIRMWARE_EDID */ } -- cgit v1.2.3 From 8f6d86dc4178957d9814b1784848012a927a3898 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Jan 2009 21:41:34 +0100 Subject: x86: cpu_init(): remove ugly #ifdef construct around debug register clear Impact: Cleanup While I was looking through the new and improved bootstrap code - great work that, thanks! I found the below a slight improvement. Remove unnecessary ugly #ifdef construct around debug register clear. Signed-off-by: Peter Zijlstra Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/common.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f0025846244..3f272d42d09 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1071,22 +1071,19 @@ void __cpuinit cpu_init(void) */ if (kgdb_connected && arch_kgdb_ops.correct_hw_break) arch_kgdb_ops.correct_hw_break(); - else { + else #endif - /* - * Clear all 6 debug registers: - */ - - set_debugreg(0UL, 0); - set_debugreg(0UL, 1); - set_debugreg(0UL, 2); - set_debugreg(0UL, 3); - set_debugreg(0UL, 6); - set_debugreg(0UL, 7); -#ifdef CONFIG_KGDB - /* If the kgdb is connected no debug regs should be altered. */ + { + /* + * Clear all 6 debug registers: + */ + set_debugreg(0UL, 0); + set_debugreg(0UL, 1); + set_debugreg(0UL, 2); + set_debugreg(0UL, 3); + set_debugreg(0UL, 6); + set_debugreg(0UL, 7); } -#endif fpu_init(); -- cgit v1.2.3 From 6e7a59944a2971c4fb400bfbecb2f68570086b05 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 23:07:42 +0100 Subject: x86, genapic: refactor genapic_64.h Impact: pre unification cleanup Make genapic_64.h similar to genapic_32.h: reorder fields, unify types and bring in new entries. No existing functionality is affected. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic_64.h | 68 +++++++++++++++++++++++++++++++++------ 1 file changed, 59 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 7bb092c5905..0a5f7122d9f 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h @@ -16,13 +16,62 @@ struct genapic { char *name; + + int (*probe)(void); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); + int (*apic_id_registered)(void); + u32 int_delivery_mode; u32 int_dest_mode; - int (*apic_id_registered)(void); + const struct cpumask *(*target_cpus)(void); + + int ESR_DISABLE; + + int apic_destination_logical; + unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); + unsigned long (*check_apicid_present)(int apicid); + + int no_balance_irq; + int no_ioapic_check; + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); void (*init_apic_ldr)(void); + + physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); + + void (*setup_apic_routing)(void); + int (*multi_timer_check)(int apic, int irq); + int (*apicid_to_node)(int logical_apicid); + int (*cpu_to_logical_apicid)(int cpu); + int (*cpu_present_to_apicid)(int mps_cpu); + physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); + void (*setup_portio_remap)(void); + int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); + void (*enable_apic_mode)(void); +#ifdef CONFIG_X86_32 + u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); +#else + unsigned int (*phys_pkg_id)(int index_msb); +#endif + + /* + * When one of the next two hooks returns 1 the genapic + * is switched to this. Essentially they are additional + * probe functions: + */ + int (*mps_oem_check)(struct mpc_table *mpc, char *oem, + char *productid); + + unsigned int (*get_apic_id)(unsigned long x); + unsigned long (*set_apic_id)(unsigned int id); + unsigned long apic_id_mask; + + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask); + +#ifdef CONFIG_SMP /* ipi */ void (*send_IPI_mask)(const struct cpumask *mask, int vector); void (*send_IPI_mask_allbutself)(const struct cpumask *mask, @@ -30,16 +79,17 @@ struct genapic { void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); void (*send_IPI_self)(int vector); - /* */ - unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); - unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, - const struct cpumask *andmask); - unsigned int (*phys_pkg_id)(int index_msb); - unsigned int (*get_apic_id)(unsigned long x); - unsigned long (*set_apic_id)(unsigned int id); - unsigned long apic_id_mask; +#endif /* wakeup_secondary_cpu */ int (*wakeup_cpu)(int apicid, unsigned long start_eip); + + int trampoline_phys_low; + int trampoline_phys_high; + void (*wait_for_init_deassert)(atomic_t *deassert); + void (*smp_callin_clear_local_apic)(void); + void (*store_NMI_vector)(unsigned short *high, unsigned short *low); + void (*restore_NMI_vector)(unsigned short *high, unsigned short *low); + void (*inquire_remote_apic)(int apicid); }; extern struct genapic *genapic; -- cgit v1.2.3 From 943d0f74d47724d0e33083674c16a834f080af2c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 23:07:42 +0100 Subject: x86, genapic: refactor genapic_32.h Impact: pre unification cleanup Make genapic_32.h similar to genapic_64.h: reorder fields, unify types and bring in new entries. No existing functionality is affected. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic_32.h | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 4334502d366..5808b7daf0a 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -21,19 +21,28 @@ struct mpc_cpu; struct genapic { char *name; - int (*probe)(void); + int (*probe)(void); + int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); int (*apic_id_registered)(void); + + u32 int_delivery_mode; + u32 int_dest_mode; + const struct cpumask *(*target_cpus)(void); - int int_delivery_mode; - int int_dest_mode; + int ESR_DISABLE; + int apic_destination_logical; unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); unsigned long (*check_apicid_present)(int apicid); + int no_balance_irq; int no_ioapic_check; + + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); void (*init_apic_ldr)(void); + physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); void (*setup_apic_routing)(void); @@ -45,22 +54,27 @@ struct genapic { void (*setup_portio_remap)(void); int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); void (*enable_apic_mode)(void); +#ifdef CONFIG_X86_32 u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); +#else + unsigned int (*phys_pkg_id)(int index_msb); +#endif - /* mpparse */ - /* When one of the next two hooks returns 1 the genapic - is switched to this. Essentially they are additional probe - functions. */ + /* + * When one of the next two hooks returns 1 the genapic + * is switched to this. Essentially they are additional + * probe functions: + */ int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); - int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); - unsigned (*get_apic_id)(unsigned long x); + unsigned int (*get_apic_id)(unsigned long x); + unsigned long (*set_apic_id)(unsigned int id); unsigned long apic_id_mask; + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, const struct cpumask *andmask); - void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); #ifdef CONFIG_SMP /* ipi */ @@ -69,8 +83,11 @@ struct genapic { int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); + void (*send_IPI_self)(int vector); #endif + /* wakeup_secondary_cpu */ int (*wakeup_cpu)(int apicid, unsigned long start_eip); + int trampoline_phys_low; int trampoline_phys_high; void (*wait_for_init_deassert)(atomic_t *deassert); -- cgit v1.2.3 From ef7471b13f3ef81074af1972b97355df9df3cdf3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 23:12:02 +0100 Subject: x86, genapic: unify struct genapic Move over the (now identical) struct genapic definitions from genapic_32/64.h to genapic.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 96 +++++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/genapic_32.h | 93 ------------------------------------- arch/x86/include/asm/genapic_64.h | 91 ------------------------------------- 3 files changed, 96 insertions(+), 184 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index d48bee663a6..3dea66a328e 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -1,5 +1,101 @@ +#ifndef _ASM_X86_GENAPIC_H +#define _ASM_X86_GENAPIC_H + +#include + +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC sub-arch data struct. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ + +struct genapic { + char *name; + + int (*probe)(void); + int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); + int (*apic_id_registered)(void); + + u32 int_delivery_mode; + u32 int_dest_mode; + + const struct cpumask *(*target_cpus)(void); + + int ESR_DISABLE; + + int apic_destination_logical; + unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); + unsigned long (*check_apicid_present)(int apicid); + + int no_balance_irq; + int no_ioapic_check; + + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); + void (*init_apic_ldr)(void); + + physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); + + void (*setup_apic_routing)(void); + int (*multi_timer_check)(int apic, int irq); + int (*apicid_to_node)(int logical_apicid); + int (*cpu_to_logical_apicid)(int cpu); + int (*cpu_present_to_apicid)(int mps_cpu); + physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); + void (*setup_portio_remap)(void); + int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); + void (*enable_apic_mode)(void); +#ifdef CONFIG_X86_32 + u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); +#else + unsigned int (*phys_pkg_id)(int index_msb); +#endif + + /* + * When one of the next two hooks returns 1 the genapic + * is switched to this. Essentially they are additional + * probe functions: + */ + int (*mps_oem_check)(struct mpc_table *mpc, char *oem, + char *productid); + + unsigned int (*get_apic_id)(unsigned long x); + unsigned long (*set_apic_id)(unsigned int id); + unsigned long apic_id_mask; + + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask); + +#ifdef CONFIG_SMP + /* ipi */ + void (*send_IPI_mask)(const struct cpumask *mask, int vector); + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, + int vector); + void (*send_IPI_allbutself)(int vector); + void (*send_IPI_all)(int vector); + void (*send_IPI_self)(int vector); +#endif + /* wakeup_secondary_cpu */ + int (*wakeup_cpu)(int apicid, unsigned long start_eip); + + int trampoline_phys_low; + int trampoline_phys_high; + void (*wait_for_init_deassert)(atomic_t *deassert); + void (*smp_callin_clear_local_apic)(void); + void (*store_NMI_vector)(unsigned short *high, unsigned short *low); + void (*restore_NMI_vector)(unsigned short *high, unsigned short *low); + void (*inquire_remote_apic)(int apicid); +}; + #ifdef CONFIG_X86_32 # include "genapic_32.h" #else # include "genapic_64.h" #endif + +#endif /* _ASM_X86_GENAPIC_64_H */ diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 5808b7daf0a..a56785ed12a 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -4,99 +4,6 @@ #include #include -/* - * Generic APIC driver interface. - * - * An straight forward mapping of the APIC related parts of the - * x86 subarchitecture interface to a dynamic object. - * - * This is used by the "generic" x86 subarchitecture. - * - * Copyright 2003 Andi Kleen, SuSE Labs. - */ - -struct mpc_bus; -struct mpc_table; -struct mpc_cpu; - -struct genapic { - char *name; - - int (*probe)(void); - int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); - int (*apic_id_registered)(void); - - u32 int_delivery_mode; - u32 int_dest_mode; - - const struct cpumask *(*target_cpus)(void); - - int ESR_DISABLE; - - int apic_destination_logical; - unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); - unsigned long (*check_apicid_present)(int apicid); - - int no_balance_irq; - int no_ioapic_check; - - void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); - void (*init_apic_ldr)(void); - - physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); - - void (*setup_apic_routing)(void); - int (*multi_timer_check)(int apic, int irq); - int (*apicid_to_node)(int logical_apicid); - int (*cpu_to_logical_apicid)(int cpu); - int (*cpu_present_to_apicid)(int mps_cpu); - physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - void (*setup_portio_remap)(void); - int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); - void (*enable_apic_mode)(void); -#ifdef CONFIG_X86_32 - u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); -#else - unsigned int (*phys_pkg_id)(int index_msb); -#endif - - /* - * When one of the next two hooks returns 1 the genapic - * is switched to this. Essentially they are additional - * probe functions: - */ - int (*mps_oem_check)(struct mpc_table *mpc, char *oem, - char *productid); - - unsigned int (*get_apic_id)(unsigned long x); - unsigned long (*set_apic_id)(unsigned int id); - unsigned long apic_id_mask; - - unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); - unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, - const struct cpumask *andmask); - -#ifdef CONFIG_SMP - /* ipi */ - void (*send_IPI_mask)(const struct cpumask *mask, int vector); - void (*send_IPI_mask_allbutself)(const struct cpumask *mask, - int vector); - void (*send_IPI_allbutself)(int vector); - void (*send_IPI_all)(int vector); - void (*send_IPI_self)(int vector); -#endif - /* wakeup_secondary_cpu */ - int (*wakeup_cpu)(int apicid, unsigned long start_eip); - - int trampoline_phys_low; - int trampoline_phys_high; - void (*wait_for_init_deassert)(atomic_t *deassert); - void (*smp_callin_clear_local_apic)(void); - void (*store_NMI_vector)(unsigned short *high, unsigned short *low); - void (*restore_NMI_vector)(unsigned short *high, unsigned short *low); - void (*inquire_remote_apic)(int apicid); -}; - #define APICFUNC(x) .x = x, /* More functions could be probably marked IPIFUNC and save some space diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 0a5f7122d9f..c70ca0d50eb 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h @@ -1,97 +1,6 @@ #ifndef _ASM_X86_GENAPIC_64_H #define _ASM_X86_GENAPIC_64_H -#include - -/* - * Copyright 2004 James Cleverdon, IBM. - * Subject to the GNU Public License, v.2 - * - * Generic APIC sub-arch data struct. - * - * Hacked for x86-64 by James Cleverdon from i386 architecture code by - * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and - * James Cleverdon. - */ - -struct genapic { - char *name; - - int (*probe)(void); - int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); - int (*apic_id_registered)(void); - - u32 int_delivery_mode; - u32 int_dest_mode; - - const struct cpumask *(*target_cpus)(void); - - int ESR_DISABLE; - - int apic_destination_logical; - unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); - unsigned long (*check_apicid_present)(int apicid); - - int no_balance_irq; - int no_ioapic_check; - - void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); - void (*init_apic_ldr)(void); - - physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); - - void (*setup_apic_routing)(void); - int (*multi_timer_check)(int apic, int irq); - int (*apicid_to_node)(int logical_apicid); - int (*cpu_to_logical_apicid)(int cpu); - int (*cpu_present_to_apicid)(int mps_cpu); - physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - void (*setup_portio_remap)(void); - int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); - void (*enable_apic_mode)(void); -#ifdef CONFIG_X86_32 - u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); -#else - unsigned int (*phys_pkg_id)(int index_msb); -#endif - - /* - * When one of the next two hooks returns 1 the genapic - * is switched to this. Essentially they are additional - * probe functions: - */ - int (*mps_oem_check)(struct mpc_table *mpc, char *oem, - char *productid); - - unsigned int (*get_apic_id)(unsigned long x); - unsigned long (*set_apic_id)(unsigned int id); - unsigned long apic_id_mask; - - unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); - unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, - const struct cpumask *andmask); - -#ifdef CONFIG_SMP - /* ipi */ - void (*send_IPI_mask)(const struct cpumask *mask, int vector); - void (*send_IPI_mask_allbutself)(const struct cpumask *mask, - int vector); - void (*send_IPI_allbutself)(int vector); - void (*send_IPI_all)(int vector); - void (*send_IPI_self)(int vector); -#endif - /* wakeup_secondary_cpu */ - int (*wakeup_cpu)(int apicid, unsigned long start_eip); - - int trampoline_phys_low; - int trampoline_phys_high; - void (*wait_for_init_deassert)(atomic_t *deassert); - void (*smp_callin_clear_local_apic)(void); - void (*store_NMI_vector)(unsigned short *high, unsigned short *low); - void (*restore_NMI_vector)(unsigned short *high, unsigned short *low); - void (*inquire_remote_apic)(int apicid); -}; - extern struct genapic *genapic; extern struct genapic apic_flat; -- cgit v1.2.3 From ced733ec0bfe9a8a5140a7aefdfe802598e4b8c0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 23:15:06 +0100 Subject: x86, genapic: finish unification Unify remaining bits of genapic_32/64.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 80 ++++++++++++++++++++++++++++++++++++++- arch/x86/include/asm/genapic_32.h | 65 ------------------------------- arch/x86/include/asm/genapic_64.h | 19 ---------- 3 files changed, 78 insertions(+), 86 deletions(-) delete mode 100644 arch/x86/include/asm/genapic_32.h delete mode 100644 arch/x86/include/asm/genapic_64.h (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 3dea66a328e..7df1b48fa35 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -93,9 +93,85 @@ struct genapic { }; #ifdef CONFIG_X86_32 -# include "genapic_32.h" + +#include +#include + +#define APICFUNC(x) .x = x, + +/* More functions could be probably marked IPIFUNC and save some space + in UP GENERICARCH kernels, but I don't have the nerve right now + to untangle this mess. -AK */ +#ifdef CONFIG_SMP +#define IPIFUNC(x) APICFUNC(x) #else -# include "genapic_64.h" +#define IPIFUNC(x) #endif +#define APIC_INIT(aname, aprobe) \ +{ \ + .name = aname, \ + .probe = aprobe, \ + .int_delivery_mode = INT_DELIVERY_MODE, \ + .int_dest_mode = INT_DEST_MODE, \ + .no_balance_irq = NO_BALANCE_IRQ, \ + .ESR_DISABLE = esr_disable, \ + .apic_destination_logical = APIC_DEST_LOGICAL, \ + APICFUNC(apic_id_registered) \ + APICFUNC(target_cpus) \ + APICFUNC(check_apicid_used) \ + APICFUNC(check_apicid_present) \ + APICFUNC(init_apic_ldr) \ + APICFUNC(ioapic_phys_id_map) \ + APICFUNC(setup_apic_routing) \ + APICFUNC(multi_timer_check) \ + APICFUNC(apicid_to_node) \ + APICFUNC(cpu_to_logical_apicid) \ + APICFUNC(cpu_present_to_apicid) \ + APICFUNC(apicid_to_cpu_present) \ + APICFUNC(setup_portio_remap) \ + APICFUNC(check_phys_apicid_present) \ + APICFUNC(mps_oem_check) \ + APICFUNC(get_apic_id) \ + .apic_id_mask = APIC_ID_MASK, \ + APICFUNC(cpu_mask_to_apicid) \ + APICFUNC(cpu_mask_to_apicid_and) \ + APICFUNC(vector_allocation_domain) \ + APICFUNC(acpi_madt_oem_check) \ + IPIFUNC(send_IPI_mask) \ + IPIFUNC(send_IPI_allbutself) \ + IPIFUNC(send_IPI_all) \ + APICFUNC(enable_apic_mode) \ + APICFUNC(phys_pkg_id) \ + .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \ + .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \ + APICFUNC(wait_for_init_deassert) \ + APICFUNC(smp_callin_clear_local_apic) \ + APICFUNC(store_NMI_vector) \ + APICFUNC(restore_NMI_vector) \ + APICFUNC(inquire_remote_apic) \ +} + +extern struct genapic *genapic; +extern void es7000_update_genapic_to_cluster(void); + +#else /* CONFIG_X86_64: */ + +extern struct genapic *genapic; + +extern struct genapic apic_flat; +extern struct genapic apic_physflat; +extern struct genapic apic_x2apic_cluster; +extern struct genapic apic_x2apic_phys; +extern int acpi_madt_oem_check(char *, char *); + +extern void apic_send_IPI_self(int vector); + +extern struct genapic apic_x2apic_uv_x; +DECLARE_PER_CPU(int, x2apic_extra_bits); + +extern void setup_apic_routing(void); + +#endif /* CONFIG_X86_64 */ + #endif /* _ASM_X86_GENAPIC_64_H */ diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h deleted file mode 100644 index a56785ed12a..00000000000 --- a/arch/x86/include/asm/genapic_32.h +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef _ASM_X86_GENAPIC_32_H -#define _ASM_X86_GENAPIC_32_H - -#include -#include - -#define APICFUNC(x) .x = x, - -/* More functions could be probably marked IPIFUNC and save some space - in UP GENERICARCH kernels, but I don't have the nerve right now - to untangle this mess. -AK */ -#ifdef CONFIG_SMP -#define IPIFUNC(x) APICFUNC(x) -#else -#define IPIFUNC(x) -#endif - -#define APIC_INIT(aname, aprobe) \ -{ \ - .name = aname, \ - .probe = aprobe, \ - .int_delivery_mode = INT_DELIVERY_MODE, \ - .int_dest_mode = INT_DEST_MODE, \ - .no_balance_irq = NO_BALANCE_IRQ, \ - .ESR_DISABLE = esr_disable, \ - .apic_destination_logical = APIC_DEST_LOGICAL, \ - APICFUNC(apic_id_registered) \ - APICFUNC(target_cpus) \ - APICFUNC(check_apicid_used) \ - APICFUNC(check_apicid_present) \ - APICFUNC(init_apic_ldr) \ - APICFUNC(ioapic_phys_id_map) \ - APICFUNC(setup_apic_routing) \ - APICFUNC(multi_timer_check) \ - APICFUNC(apicid_to_node) \ - APICFUNC(cpu_to_logical_apicid) \ - APICFUNC(cpu_present_to_apicid) \ - APICFUNC(apicid_to_cpu_present) \ - APICFUNC(setup_portio_remap) \ - APICFUNC(check_phys_apicid_present) \ - APICFUNC(mps_oem_check) \ - APICFUNC(get_apic_id) \ - .apic_id_mask = APIC_ID_MASK, \ - APICFUNC(cpu_mask_to_apicid) \ - APICFUNC(cpu_mask_to_apicid_and) \ - APICFUNC(vector_allocation_domain) \ - APICFUNC(acpi_madt_oem_check) \ - IPIFUNC(send_IPI_mask) \ - IPIFUNC(send_IPI_allbutself) \ - IPIFUNC(send_IPI_all) \ - APICFUNC(enable_apic_mode) \ - APICFUNC(phys_pkg_id) \ - .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \ - .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \ - APICFUNC(wait_for_init_deassert) \ - APICFUNC(smp_callin_clear_local_apic) \ - APICFUNC(store_NMI_vector) \ - APICFUNC(restore_NMI_vector) \ - APICFUNC(inquire_remote_apic) \ -} - -extern struct genapic *genapic; -extern void es7000_update_genapic_to_cluster(void); - -#endif /* _ASM_X86_GENAPIC_32_H */ diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h deleted file mode 100644 index c70ca0d50eb..00000000000 --- a/arch/x86/include/asm/genapic_64.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _ASM_X86_GENAPIC_64_H -#define _ASM_X86_GENAPIC_64_H - -extern struct genapic *genapic; - -extern struct genapic apic_flat; -extern struct genapic apic_physflat; -extern struct genapic apic_x2apic_cluster; -extern struct genapic apic_x2apic_phys; -extern int acpi_madt_oem_check(char *, char *); - -extern void apic_send_IPI_self(int vector); - -extern struct genapic apic_x2apic_uv_x; -DECLARE_PER_CPU(int, x2apic_extra_bits); - -extern void setup_apic_routing(void); - -#endif /* _ASM_X86_GENAPIC_64_H */ -- cgit v1.2.3 From 505deeb1a228e5b0bf6ac5d0d78f4a4253a9efe9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 23:23:22 +0100 Subject: x86, genapic: cleanups Unify genapic.h some more. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 7df1b48fa35..19a5193e965 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -3,6 +3,9 @@ #include +#include +#include + /* * Copyright 2004 James Cleverdon, IBM. * Subject to the GNU Public License, v.2 @@ -13,7 +16,6 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ - struct genapic { char *name; @@ -85,6 +87,7 @@ struct genapic { int trampoline_phys_low; int trampoline_phys_high; + void (*wait_for_init_deassert)(atomic_t *deassert); void (*smp_callin_clear_local_apic)(void); void (*store_NMI_vector)(unsigned short *high, unsigned short *low); @@ -92,10 +95,9 @@ struct genapic { void (*inquire_remote_apic)(int apicid); }; -#ifdef CONFIG_X86_32 +extern struct genapic *genapic; -#include -#include +#ifdef CONFIG_X86_32 #define APICFUNC(x) .x = x, @@ -143,8 +145,8 @@ struct genapic { IPIFUNC(send_IPI_all) \ APICFUNC(enable_apic_mode) \ APICFUNC(phys_pkg_id) \ - .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \ - .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \ + .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \ + .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \ APICFUNC(wait_for_init_deassert) \ APICFUNC(smp_callin_clear_local_apic) \ APICFUNC(store_NMI_vector) \ @@ -152,13 +154,10 @@ struct genapic { APICFUNC(inquire_remote_apic) \ } -extern struct genapic *genapic; extern void es7000_update_genapic_to_cluster(void); #else /* CONFIG_X86_64: */ -extern struct genapic *genapic; - extern struct genapic apic_flat; extern struct genapic apic_physflat; extern struct genapic apic_x2apic_cluster; -- cgit v1.2.3 From 6781d948cc05b02df915650f2eb49550a1631df9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 23:54:23 +0100 Subject: x86, genapic: provide IPI callbacks unconditionally 64-bit x86 uses the IPI callbacks even on UP - so provide them generally. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 19a5193e965..c27efde0523 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -73,7 +73,6 @@ struct genapic { unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, const struct cpumask *andmask); -#ifdef CONFIG_SMP /* ipi */ void (*send_IPI_mask)(const struct cpumask *mask, int vector); void (*send_IPI_mask_allbutself)(const struct cpumask *mask, @@ -81,7 +80,7 @@ struct genapic { void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); void (*send_IPI_self)(int vector); -#endif + /* wakeup_secondary_cpu */ int (*wakeup_cpu)(int apicid, unsigned long start_eip); -- cgit v1.2.3 From c8d46cf06dc2e3a8f57a350eb9f9b19fd7f2ffe5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 00:14:11 +0100 Subject: x86: rename 'genapic' to 'apic' Rename genapic-> to apic-> references because in a future chagne we'll open-code all the indirect calls (instead of obscuring them via macros), so we want this reference to be as short as possible. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 2 +- arch/x86/include/asm/mach-default/mach_apic.h | 22 +++---- arch/x86/include/asm/mach-default/mach_apicdef.h | 6 +- arch/x86/include/asm/mach-default/mach_ipi.h | 8 +-- arch/x86/include/asm/mach-generic/mach_apic.h | 50 +++++++-------- arch/x86/include/asm/mach-generic/mach_apicdef.h | 4 +- arch/x86/include/asm/mach-generic/mach_ipi.h | 6 +- arch/x86/include/asm/mach-generic/mach_wakecpu.h | 14 ++--- arch/x86/kernel/es7000_32.c | 6 +- arch/x86/kernel/genapic_64.c | 16 ++--- arch/x86/kernel/io_apic.c | 80 ++++++++++++------------ arch/x86/kernel/numaq_32.c | 2 +- arch/x86/kernel/setup.c | 2 +- arch/x86/mach-generic/es7000.c | 12 ++-- arch/x86/mach-generic/probe.c | 24 +++---- 15 files changed, 127 insertions(+), 127 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index c27efde0523..3970da3245c 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -94,7 +94,7 @@ struct genapic { void (*inquire_remote_apic)(int apicid); }; -extern struct genapic *genapic; +extern struct genapic *apic; #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index cc09cbbee27..2448b927b64 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -22,18 +22,18 @@ static inline const struct cpumask *target_cpus(void) #ifdef CONFIG_X86_64 #include -#define INT_DELIVERY_MODE (genapic->int_delivery_mode) -#define INT_DEST_MODE (genapic->int_dest_mode) -#define TARGET_CPUS (genapic->target_cpus()) -#define apic_id_registered (genapic->apic_id_registered) -#define init_apic_ldr (genapic->init_apic_ldr) -#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) -#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) -#define phys_pkg_id (genapic->phys_pkg_id) -#define vector_allocation_domain (genapic->vector_allocation_domain) +#define INT_DELIVERY_MODE (apic->int_delivery_mode) +#define INT_DEST_MODE (apic->int_dest_mode) +#define TARGET_CPUS (apic->target_cpus()) +#define apic_id_registered (apic->apic_id_registered) +#define init_apic_ldr (apic->init_apic_ldr) +#define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) +#define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) +#define phys_pkg_id (apic->phys_pkg_id) +#define vector_allocation_domain (apic->vector_allocation_domain) #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) -#define send_IPI_self (genapic->send_IPI_self) -#define wakeup_secondary_cpu (genapic->wakeup_cpu) +#define send_IPI_self (apic->send_IPI_self) +#define wakeup_secondary_cpu (apic->wakeup_cpu) extern void setup_apic_routing(void); #else #define INT_DELIVERY_MODE dest_LowestPrio diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h index 53179936d6c..b4dcc0971c7 100644 --- a/arch/x86/include/asm/mach-default/mach_apicdef.h +++ b/arch/x86/include/asm/mach-default/mach_apicdef.h @@ -4,9 +4,9 @@ #include #ifdef CONFIG_X86_64 -#define APIC_ID_MASK (genapic->apic_id_mask) -#define GET_APIC_ID(x) (genapic->get_apic_id(x)) -#define SET_APIC_ID(x) (genapic->set_apic_id(x)) +#define APIC_ID_MASK (apic->apic_id_mask) +#define GET_APIC_ID(x) (apic->get_apic_id(x)) +#define SET_APIC_ID(x) (apic->set_apic_id(x)) #else #define APIC_ID_MASK (0xF<<24) static inline unsigned get_apic_id(unsigned long x) diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h index 191312d155d..089399643df 100644 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ b/arch/x86/include/asm/mach-default/mach_ipi.h @@ -12,8 +12,8 @@ extern int no_broadcast; #ifdef CONFIG_X86_64 #include -#define send_IPI_mask (genapic->send_IPI_mask) -#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) +#define send_IPI_mask (apic->send_IPI_mask) +#define send_IPI_mask_allbutself (apic->send_IPI_mask_allbutself) #else static inline void send_IPI_mask(const struct cpumask *mask, int vector) { @@ -39,8 +39,8 @@ static inline void __local_send_IPI_all(int vector) } #ifdef CONFIG_X86_64 -#define send_IPI_allbutself (genapic->send_IPI_allbutself) -#define send_IPI_all (genapic->send_IPI_all) +#define send_IPI_allbutself (apic->send_IPI_allbutself) +#define send_IPI_all (apic->send_IPI_all) #else static inline void send_IPI_allbutself(int vector) { diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 48553e958ad..59972d94ff1 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,32 +3,32 @@ #include -#define esr_disable (genapic->ESR_DISABLE) -#define NO_BALANCE_IRQ (genapic->no_balance_irq) -#define INT_DELIVERY_MODE (genapic->int_delivery_mode) -#define INT_DEST_MODE (genapic->int_dest_mode) +#define esr_disable (apic->ESR_DISABLE) +#define NO_BALANCE_IRQ (apic->no_balance_irq) +#define INT_DELIVERY_MODE (apic->int_delivery_mode) +#define INT_DEST_MODE (apic->int_dest_mode) #undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL (genapic->apic_destination_logical) -#define TARGET_CPUS (genapic->target_cpus()) -#define apic_id_registered (genapic->apic_id_registered) -#define init_apic_ldr (genapic->init_apic_ldr) -#define ioapic_phys_id_map (genapic->ioapic_phys_id_map) -#define setup_apic_routing (genapic->setup_apic_routing) -#define multi_timer_check (genapic->multi_timer_check) -#define apicid_to_node (genapic->apicid_to_node) -#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) -#define cpu_present_to_apicid (genapic->cpu_present_to_apicid) -#define apicid_to_cpu_present (genapic->apicid_to_cpu_present) -#define setup_portio_remap (genapic->setup_portio_remap) -#define check_apicid_present (genapic->check_apicid_present) -#define check_phys_apicid_present (genapic->check_phys_apicid_present) -#define check_apicid_used (genapic->check_apicid_used) -#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) -#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) -#define vector_allocation_domain (genapic->vector_allocation_domain) -#define enable_apic_mode (genapic->enable_apic_mode) -#define phys_pkg_id (genapic->phys_pkg_id) -#define wakeup_secondary_cpu (genapic->wakeup_cpu) +#define APIC_DEST_LOGICAL (apic->apic_destination_logical) +#define TARGET_CPUS (apic->target_cpus()) +#define apic_id_registered (apic->apic_id_registered) +#define init_apic_ldr (apic->init_apic_ldr) +#define ioapic_phys_id_map (apic->ioapic_phys_id_map) +#define setup_apic_routing (apic->setup_apic_routing) +#define multi_timer_check (apic->multi_timer_check) +#define apicid_to_node (apic->apicid_to_node) +#define cpu_to_logical_apicid (apic->cpu_to_logical_apicid) +#define cpu_present_to_apicid (apic->cpu_present_to_apicid) +#define apicid_to_cpu_present (apic->apicid_to_cpu_present) +#define setup_portio_remap (apic->setup_portio_remap) +#define check_apicid_present (apic->check_apicid_present) +#define check_phys_apicid_present (apic->check_phys_apicid_present) +#define check_apicid_used (apic->check_apicid_used) +#define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) +#define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) +#define vector_allocation_domain (apic->vector_allocation_domain) +#define enable_apic_mode (apic->enable_apic_mode) +#define phys_pkg_id (apic->phys_pkg_id) +#define wakeup_secondary_cpu (apic->wakeup_cpu) extern void generic_bigsmp_probe(void); diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h index 68041f3802f..acc9adddb34 100644 --- a/arch/x86/include/asm/mach-generic/mach_apicdef.h +++ b/arch/x86/include/asm/mach-generic/mach_apicdef.h @@ -4,8 +4,8 @@ #ifndef APIC_DEFINITION #include -#define GET_APIC_ID (genapic->get_apic_id) -#define APIC_ID_MASK (genapic->apic_id_mask) +#define GET_APIC_ID (apic->get_apic_id) +#define APIC_ID_MASK (apic->apic_id_mask) #endif #endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h index ffd637e3c3d..75e54bd6cbd 100644 --- a/arch/x86/include/asm/mach-generic/mach_ipi.h +++ b/arch/x86/include/asm/mach-generic/mach_ipi.h @@ -3,8 +3,8 @@ #include -#define send_IPI_mask (genapic->send_IPI_mask) -#define send_IPI_allbutself (genapic->send_IPI_allbutself) -#define send_IPI_all (genapic->send_IPI_all) +#define send_IPI_mask (apic->send_IPI_mask) +#define send_IPI_allbutself (apic->send_IPI_allbutself) +#define send_IPI_all (apic->send_IPI_all) #endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h index 1ab16b168c8..22006bbee61 100644 --- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-generic/mach_wakecpu.h @@ -1,12 +1,12 @@ #ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H #define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H -#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low) -#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high) -#define wait_for_init_deassert (genapic->wait_for_init_deassert) -#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic) -#define store_NMI_vector (genapic->store_NMI_vector) -#define restore_NMI_vector (genapic->restore_NMI_vector) -#define inquire_remote_apic (genapic->inquire_remote_apic) +#define TRAMPOLINE_PHYS_LOW (apic->trampoline_phys_low) +#define TRAMPOLINE_PHYS_HIGH (apic->trampoline_phys_high) +#define wait_for_init_deassert (apic->wait_for_init_deassert) +#define smp_callin_clear_local_apic (apic->smp_callin_clear_local_apic) +#define store_NMI_vector (apic->store_NMI_vector) +#define restore_NMI_vector (apic->restore_NMI_vector) +#define inquire_remote_apic (apic->inquire_remote_apic) #endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */ diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 53699c931ad..20a2a43c2a9 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -188,14 +188,14 @@ static void noop_wait_for_deassert(atomic_t *deassert_not_used) static int __init es7000_update_genapic(void) { - genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip; + apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; /* MPENTIUMIII */ if (boot_cpu_data.x86 == 6 && (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { es7000_update_genapic_to_cluster(); - genapic->wait_for_init_deassert = noop_wait_for_deassert; - genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip; + apic->wait_for_init_deassert = noop_wait_for_deassert; + apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; } return 0; diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index e656c272115..2b986389a24 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -29,7 +29,7 @@ extern struct genapic apic_x2xpic_uv_x; extern struct genapic apic_x2apic_phys; extern struct genapic apic_x2apic_cluster; -struct genapic __read_mostly *genapic = &apic_flat; +struct genapic __read_mostly *apic = &apic_flat; static struct genapic *apic_probe[] __initdata = { #ifdef CONFIG_X86_UV @@ -46,15 +46,15 @@ static struct genapic *apic_probe[] __initdata = { */ void __init setup_apic_routing(void) { - if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) { + if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { if (!intr_remapping_enabled) - genapic = &apic_flat; + apic = &apic_flat; } - if (genapic == &apic_flat) { + if (apic == &apic_flat) { if (max_physical_apicid >= 8) - genapic = &apic_physflat; - printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); + apic = &apic_physflat; + printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); } if (x86_quirks->update_genapic) @@ -74,9 +74,9 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) for (i = 0; apic_probe[i]; ++i) { if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { - genapic = apic_probe[i]; + apic = apic_probe[i]; printk(KERN_INFO "Setting APIC routing to %s.\n", - genapic->name); + apic->name); return 1; } } diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index bfb7d734062..7283234229f 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1486,7 +1486,7 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t handle_edge_irq, "edge"); } -static int setup_ioapic_entry(int apic, int irq, +static int setup_ioapic_entry(int apic_id, int irq, struct IO_APIC_route_entry *entry, unsigned int destination, int trigger, int polarity, int vector) @@ -1498,18 +1498,18 @@ static int setup_ioapic_entry(int apic, int irq, #ifdef CONFIG_INTR_REMAP if (intr_remapping_enabled) { - struct intel_iommu *iommu = map_ioapic_to_ir(apic); + struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); struct irte irte; struct IR_IO_APIC_route_entry *ir_entry = (struct IR_IO_APIC_route_entry *) entry; int index; if (!iommu) - panic("No mapping iommu for ioapic %d\n", apic); + panic("No mapping iommu for ioapic %d\n", apic_id); index = alloc_irte(iommu, irq, 1); if (index < 0) - panic("Failed to allocate IRTE for ioapic %d\n", apic); + panic("Failed to allocate IRTE for ioapic %d\n", apic_id); memset(&irte, 0, sizeof(irte)); @@ -1547,7 +1547,7 @@ static int setup_ioapic_entry(int apic, int irq, return 0; } -static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc, +static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, int trigger, int polarity) { struct irq_cfg *cfg; @@ -1567,14 +1567,14 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " "IRQ %d Mode:%i Active:%i)\n", - apic, mp_ioapics[apic].apicid, pin, cfg->vector, + apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector, irq, trigger, polarity); - if (setup_ioapic_entry(mp_ioapics[apic].apicid, irq, &entry, + if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry, dest, trigger, polarity, cfg->vector)) { printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", - mp_ioapics[apic].apicid, pin); + mp_ioapics[apic_id].apicid, pin); __clear_irq_vector(irq, cfg); return; } @@ -1583,12 +1583,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de if (irq < NR_IRQS_LEGACY) disable_8259A_irq(irq); - ioapic_write_entry(apic, pin, entry); + ioapic_write_entry(apic_id, pin, entry); } static void __init setup_IO_APIC_irqs(void) { - int apic, pin, idx, irq; + int apic_id, pin, idx, irq; int notcon = 0; struct irq_desc *desc; struct irq_cfg *cfg; @@ -1596,19 +1596,19 @@ static void __init setup_IO_APIC_irqs(void) apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); - for (apic = 0; apic < nr_ioapics; apic++) { - for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { + for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { + for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { - idx = find_irq_entry(apic, pin, mp_INT); + idx = find_irq_entry(apic_id, pin, mp_INT); if (idx == -1) { if (!notcon) { notcon = 1; apic_printk(APIC_VERBOSE, KERN_DEBUG " %d-%d", - mp_ioapics[apic].apicid, pin); + mp_ioapics[apic_id].apicid, pin); } else apic_printk(APIC_VERBOSE, " %d-%d", - mp_ioapics[apic].apicid, pin); + mp_ioapics[apic_id].apicid, pin); continue; } if (notcon) { @@ -1617,9 +1617,9 @@ static void __init setup_IO_APIC_irqs(void) notcon = 0; } - irq = pin_2_irq(idx, apic, pin); + irq = pin_2_irq(idx, apic_id, pin); #ifdef CONFIG_X86_32 - if (multi_timer_check(apic, irq)) + if (multi_timer_check(apic_id, irq)) continue; #endif desc = irq_to_desc_alloc_cpu(irq, cpu); @@ -1628,9 +1628,9 @@ static void __init setup_IO_APIC_irqs(void) continue; } cfg = desc->chip_data; - add_pin_to_irq_cpu(cfg, cpu, apic, pin); + add_pin_to_irq_cpu(cfg, cpu, apic_id, pin); - setup_IO_APIC_irq(apic, pin, irq, desc, + setup_IO_APIC_irq(apic_id, pin, irq, desc, irq_trigger(idx), irq_polarity(idx)); } } @@ -1643,7 +1643,7 @@ static void __init setup_IO_APIC_irqs(void) /* * Set up the timer pin, possibly with the 8259A-master behind. */ -static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, +static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, int vector) { struct IO_APIC_route_entry entry; @@ -1676,7 +1676,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, /* * Add it to the IO-APIC irq-routing table: */ - ioapic_write_entry(apic, pin, entry); + ioapic_write_entry(apic_id, pin, entry); } @@ -2089,7 +2089,7 @@ static void __init setup_ioapic_ids_from_mpc(void) { union IO_APIC_reg_00 reg_00; physid_mask_t phys_id_present_map; - int apic; + int apic_id; int i; unsigned char old_id; unsigned long flags; @@ -2113,21 +2113,21 @@ static void __init setup_ioapic_ids_from_mpc(void) /* * Set the IOAPIC ID to the value stored in the MPC table. */ - for (apic = 0; apic < nr_ioapics; apic++) { + for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { /* Read the register 0 value */ spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(apic, 0); + reg_00.raw = io_apic_read(apic_id, 0); spin_unlock_irqrestore(&ioapic_lock, flags); - old_id = mp_ioapics[apic].apicid; + old_id = mp_ioapics[apic_id].apicid; - if (mp_ioapics[apic].apicid >= get_physical_broadcast()) { + if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", - apic, mp_ioapics[apic].apicid); + apic_id, mp_ioapics[apic_id].apicid); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", reg_00.bits.ID); - mp_ioapics[apic].apicid = reg_00.bits.ID; + mp_ioapics[apic_id].apicid = reg_00.bits.ID; } /* @@ -2136,9 +2136,9 @@ static void __init setup_ioapic_ids_from_mpc(void) * 'stuck on smp_invalidate_needed IPI wait' messages. */ if (check_apicid_used(phys_id_present_map, - mp_ioapics[apic].apicid)) { + mp_ioapics[apic_id].apicid)) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", - apic, mp_ioapics[apic].apicid); + apic_id, mp_ioapics[apic_id].apicid); for (i = 0; i < get_physical_broadcast(); i++) if (!physid_isset(i, phys_id_present_map)) break; @@ -2147,13 +2147,13 @@ static void __init setup_ioapic_ids_from_mpc(void) printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", i); physid_set(i, phys_id_present_map); - mp_ioapics[apic].apicid = i; + mp_ioapics[apic_id].apicid = i; } else { physid_mask_t tmp; - tmp = apicid_to_cpu_present(mp_ioapics[apic].apicid); + tmp = apicid_to_cpu_present(mp_ioapics[apic_id].apicid); apic_printk(APIC_VERBOSE, "Setting %d in the " "phys_id_present_map\n", - mp_ioapics[apic].apicid); + mp_ioapics[apic_id].apicid); physids_or(phys_id_present_map, phys_id_present_map, tmp); } @@ -2162,11 +2162,11 @@ static void __init setup_ioapic_ids_from_mpc(void) * We need to adjust the IRQ routing table * if the ID changed. */ - if (old_id != mp_ioapics[apic].apicid) + if (old_id != mp_ioapics[apic_id].apicid) for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].dstapic == old_id) mp_irqs[i].dstapic - = mp_ioapics[apic].apicid; + = mp_ioapics[apic_id].apicid; /* * Read the right value from the MPC table and @@ -2174,20 +2174,20 @@ static void __init setup_ioapic_ids_from_mpc(void) */ apic_printk(APIC_VERBOSE, KERN_INFO "...changing IO-APIC physical APIC ID to %d ...", - mp_ioapics[apic].apicid); + mp_ioapics[apic_id].apicid); - reg_00.bits.ID = mp_ioapics[apic].apicid; + reg_00.bits.ID = mp_ioapics[apic_id].apicid; spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(apic, 0, reg_00.raw); + io_apic_write(apic_id, 0, reg_00.raw); spin_unlock_irqrestore(&ioapic_lock, flags); /* * Sanity check */ spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(apic, 0); + reg_00.raw = io_apic_read(apic_id, 0); spin_unlock_irqrestore(&ioapic_lock, flags); - if (reg_00.bits.ID != mp_ioapics[apic].apicid) + if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) printk("could not set ID!\n"); else apic_printk(APIC_VERBOSE, " ok.\n"); diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index f2191d4f271..3928280278f 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -236,7 +236,7 @@ static int __init numaq_setup_ioapic_ids(void) static int __init numaq_update_genapic(void) { - genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; + apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; return 0; } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index f41c4486c27..a58e9f5e603 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -590,7 +590,7 @@ static int __init default_update_genapic(void) { #ifdef CONFIG_X86_SMP # if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64) - genapic->wakeup_cpu = wakeup_secondary_cpu_via_init; + apic->wakeup_cpu = wakeup_secondary_cpu_via_init; # endif #endif diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index c2ded144802..2f4f4a6e39b 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -20,14 +20,14 @@ void __init es7000_update_genapic_to_cluster(void) { - genapic->target_cpus = target_cpus_cluster; - genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER; - genapic->int_dest_mode = INT_DEST_MODE_CLUSTER; - genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER; + apic->target_cpus = target_cpus_cluster; + apic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER; + apic->int_dest_mode = INT_DEST_MODE_CLUSTER; + apic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER; - genapic->init_apic_ldr = init_apic_ldr_cluster; + apic->init_apic_ldr = init_apic_ldr_cluster; - genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster; + apic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster; } static int probe_es7000(void) diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index 15a38daef1a..82bf0f520fb 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -23,7 +23,7 @@ extern struct genapic apic_bigsmp; extern struct genapic apic_es7000; extern struct genapic apic_default; -struct genapic *genapic = &apic_default; +struct genapic *apic = &apic_default; static struct genapic *apic_probe[] __initdata = { #ifdef CONFIG_X86_NUMAQ @@ -52,7 +52,7 @@ static int __init parse_apic(char *arg) for (i = 0; apic_probe[i]; i++) { if (!strcmp(apic_probe[i]->name, arg)) { - genapic = apic_probe[i]; + apic = apic_probe[i]; cmdline_apic = 1; return 0; } @@ -76,13 +76,13 @@ void __init generic_bigsmp_probe(void) * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support */ - if (!cmdline_apic && genapic == &apic_default) { + if (!cmdline_apic && apic == &apic_default) { if (apic_bigsmp.probe()) { - genapic = &apic_bigsmp; + apic = &apic_bigsmp; if (x86_quirks->update_genapic) x86_quirks->update_genapic(); printk(KERN_INFO "Overriding APIC driver with %s\n", - genapic->name); + apic->name); } } #endif @@ -94,7 +94,7 @@ void __init generic_apic_probe(void) int i; for (i = 0; apic_probe[i]; i++) { if (apic_probe[i]->probe()) { - genapic = apic_probe[i]; + apic = apic_probe[i]; break; } } @@ -105,7 +105,7 @@ void __init generic_apic_probe(void) if (x86_quirks->update_genapic) x86_quirks->update_genapic(); } - printk(KERN_INFO "Using APIC driver %s\n", genapic->name); + printk(KERN_INFO "Using APIC driver %s\n", apic->name); } /* These functions can switch the APIC even after the initial ->probe() */ @@ -116,11 +116,11 @@ int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) for (i = 0; apic_probe[i]; ++i) { if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) { if (!cmdline_apic) { - genapic = apic_probe[i]; + apic = apic_probe[i]; if (x86_quirks->update_genapic) x86_quirks->update_genapic(); printk(KERN_INFO "Switched to APIC driver `%s'.\n", - genapic->name); + apic->name); } return 1; } @@ -134,11 +134,11 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) for (i = 0; apic_probe[i]; ++i) { if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { if (!cmdline_apic) { - genapic = apic_probe[i]; + apic = apic_probe[i]; if (x86_quirks->update_genapic) x86_quirks->update_genapic(); printk(KERN_INFO "Switched to APIC driver `%s'.\n", - genapic->name); + apic->name); } return 1; } @@ -148,5 +148,5 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) int hard_smp_processor_id(void) { - return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID)); + return apic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID)); } -- cgit v1.2.3 From f2f05ee8b8d346d4edee766384a5fedafdd4f9f8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 02:37:01 +0100 Subject: x86: clean up genapic_flat - reorder fields so that they appear in struct genapic field ordering - add zero-initialized fields too so that it's apparent which functionality is default / missing. Signed-off-by: Ingo Molnar --- arch/x86/kernel/genapic_flat_64.c | 73 +++++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 34185488e4f..f1bfdd3608a 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -175,25 +175,60 @@ static unsigned int phys_pkg_id(int index_msb) } struct genapic apic_flat = { - .name = "flat", - .acpi_madt_oem_check = flat_acpi_madt_oem_check, - .int_delivery_mode = dest_LowestPrio, - .int_dest_mode = (APIC_DEST_LOGICAL != 0), - .target_cpus = flat_target_cpus, - .vector_allocation_domain = flat_vector_allocation_domain, - .apic_id_registered = flat_apic_id_registered, - .init_apic_ldr = flat_init_apic_ldr, - .send_IPI_all = flat_send_IPI_all, - .send_IPI_allbutself = flat_send_IPI_allbutself, - .send_IPI_mask = flat_send_IPI_mask, - .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, - .send_IPI_self = apic_send_IPI_self, - .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, - .phys_pkg_id = phys_pkg_id, - .get_apic_id = get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = (0xFFu<<24), + .name = "flat", + .probe = NULL, + .acpi_madt_oem_check = flat_acpi_madt_oem_check, + .apic_id_registered = flat_apic_id_registered, + + .int_delivery_mode = dest_LowestPrio, + .int_dest_mode = (APIC_DEST_LOGICAL != 0), + + .target_cpus = flat_target_cpus, + .ESR_DISABLE = 0, + .apic_destination_logical = 0, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .no_balance_irq = 0, + .no_ioapic_check = 0, + + .vector_allocation_domain = flat_vector_allocation_domain, + .init_apic_ldr = flat_init_apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = NULL, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = NULL, + .enable_apic_mode = NULL, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFu << 24, + + .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, + + .send_IPI_mask = flat_send_IPI_mask, + .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, + .send_IPI_allbutself = flat_send_IPI_allbutself, + .send_IPI_all = flat_send_IPI_all, + .send_IPI_self = apic_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = 0, + .trampoline_phys_high = 0, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, + .inquire_remote_apic = NULL, }; /* -- cgit v1.2.3 From 4c3e51e05a7eefead4033b187394458ff8626497 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 02:37:01 +0100 Subject: x86: clean up genapic_phys_flat - reorder fields so that they appear in struct genapic field ordering - add zero-initialized fields too so that it's apparent which functionality is default / missing. Signed-off-by: Ingo Molnar --- arch/x86/kernel/genapic_flat_64.c | 75 +++++++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index f1bfdd3608a..e9233374cef 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -320,23 +320,60 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, } struct genapic apic_physflat = { - .name = "physical flat", - .acpi_madt_oem_check = physflat_acpi_madt_oem_check, - .int_delivery_mode = dest_Fixed, - .int_dest_mode = (APIC_DEST_PHYSICAL != 0), - .target_cpus = physflat_target_cpus, - .vector_allocation_domain = physflat_vector_allocation_domain, - .apic_id_registered = flat_apic_id_registered, - .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/ - .send_IPI_all = physflat_send_IPI_all, - .send_IPI_allbutself = physflat_send_IPI_allbutself, - .send_IPI_mask = physflat_send_IPI_mask, - .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, - .send_IPI_self = apic_send_IPI_self, - .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, - .phys_pkg_id = phys_pkg_id, - .get_apic_id = get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = (0xFFu<<24), + + .name = "physical flat", + .probe = NULL, + .acpi_madt_oem_check = physflat_acpi_madt_oem_check, + .apic_id_registered = flat_apic_id_registered, + + .int_delivery_mode = dest_Fixed, + .int_dest_mode = (APIC_DEST_PHYSICAL != 0), + + .target_cpus = physflat_target_cpus, + .ESR_DISABLE = 0, + .apic_destination_logical = 0, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .no_balance_irq = 0, + .no_ioapic_check = 0, + + .vector_allocation_domain = physflat_vector_allocation_domain, + /* not needed, but shouldn't hurt: */ + .init_apic_ldr = flat_init_apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = NULL, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = NULL, + .enable_apic_mode = NULL, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFu<<24, + + .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, + + .send_IPI_mask = physflat_send_IPI_mask, + .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, + .send_IPI_allbutself = physflat_send_IPI_allbutself, + .send_IPI_all = physflat_send_IPI_all, + .send_IPI_self = apic_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = 0, + .trampoline_phys_high = 0, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, + .inquire_remote_apic = NULL, }; -- cgit v1.2.3 From c7967329911013a05920ef12832935c541bb8c9a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 02:37:01 +0100 Subject: x86: clean up apic_x2apic_uv_x - reorder fields so that they appear in struct genapic field ordering - add zero-initialized fields too so that it's apparent which functionality is default / missing. Signed-off-by: Ingo Molnar --- arch/x86/kernel/genx2apic_uv_x.c | 74 +++++++++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index bfe36249145..94f606f204a 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -237,25 +237,61 @@ static void uv_send_IPI_self(int vector) } struct genapic apic_x2apic_uv_x = { - .name = "UV large system", - .acpi_madt_oem_check = uv_acpi_madt_oem_check, - .int_delivery_mode = dest_Fixed, - .int_dest_mode = (APIC_DEST_PHYSICAL != 0), - .target_cpus = uv_target_cpus, - .vector_allocation_domain = uv_vector_allocation_domain, - .apic_id_registered = uv_apic_id_registered, - .init_apic_ldr = uv_init_apic_ldr, - .send_IPI_all = uv_send_IPI_all, - .send_IPI_allbutself = uv_send_IPI_allbutself, - .send_IPI_mask = uv_send_IPI_mask, - .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, - .send_IPI_self = uv_send_IPI_self, - .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, - .phys_pkg_id = phys_pkg_id, - .get_apic_id = get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = (0xFFFFFFFFu), + + .name = "UV large system", + .probe = NULL, + .acpi_madt_oem_check = uv_acpi_madt_oem_check, + .apic_id_registered = uv_apic_id_registered, + + .int_delivery_mode = dest_Fixed, + .int_dest_mode = (APIC_DEST_PHYSICAL != 0), + + .target_cpus = uv_target_cpus, + .ESR_DISABLE = 0, + .apic_destination_logical = 0, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .no_balance_irq = 0, + .no_ioapic_check = 0, + + .vector_allocation_domain = uv_vector_allocation_domain, + .init_apic_ldr = uv_init_apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = NULL, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = NULL, + .enable_apic_mode = NULL, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFFFFFFFu, + + .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, + + .send_IPI_mask = uv_send_IPI_mask, + .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, + .send_IPI_allbutself = uv_send_IPI_allbutself, + .send_IPI_all = uv_send_IPI_all, + .send_IPI_self = uv_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = 0, + .trampoline_phys_high = 0, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, + .inquire_remote_apic = NULL, }; static __cpuinit void set_x2apic_extra_bits(int pnode) -- cgit v1.2.3 From 05c155c235c757329ec89ad591516538ed8352c0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 02:37:01 +0100 Subject: x86: clean up apic_x2apic_phys - reorder fields so that they appear in struct genapic field ordering - add zero-initialized fields too so that it's apparent which functionality is default / missing. Signed-off-by: Ingo Molnar --- arch/x86/kernel/genx2apic_phys.c | 74 +++++++++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 21bcc0e098b..c98361fb7ee 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -172,23 +172,59 @@ static void init_x2apic_ldr(void) } struct genapic apic_x2apic_phys = { - .name = "physical x2apic", - .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, - .int_delivery_mode = dest_Fixed, - .int_dest_mode = (APIC_DEST_PHYSICAL != 0), - .target_cpus = x2apic_target_cpus, - .vector_allocation_domain = x2apic_vector_allocation_domain, - .apic_id_registered = x2apic_apic_id_registered, - .init_apic_ldr = init_x2apic_ldr, - .send_IPI_all = x2apic_send_IPI_all, - .send_IPI_allbutself = x2apic_send_IPI_allbutself, - .send_IPI_mask = x2apic_send_IPI_mask, - .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, - .send_IPI_self = x2apic_send_IPI_self, - .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, - .phys_pkg_id = phys_pkg_id, - .get_apic_id = get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = (0xFFFFFFFFu), + + .name = "physical x2apic", + .probe = NULL, + .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, + .apic_id_registered = x2apic_apic_id_registered, + + .int_delivery_mode = dest_Fixed, + .int_dest_mode = (APIC_DEST_PHYSICAL != 0), + + .target_cpus = x2apic_target_cpus, + .ESR_DISABLE = 0, + .apic_destination_logical = 0, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .no_balance_irq = 0, + .no_ioapic_check = 0, + + .vector_allocation_domain = x2apic_vector_allocation_domain, + .init_apic_ldr = init_x2apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = NULL, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = NULL, + .enable_apic_mode = NULL, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFFFFFFFu, + + .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, + + .send_IPI_mask = x2apic_send_IPI_mask, + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, + .send_IPI_allbutself = x2apic_send_IPI_allbutself, + .send_IPI_all = x2apic_send_IPI_all, + .send_IPI_self = x2apic_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = 0, + .trampoline_phys_high = 0, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, + .inquire_remote_apic = NULL, }; -- cgit v1.2.3 From 504a3c3ad45d200a6ac8be5aa019c8fa05e26dc8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 02:37:01 +0100 Subject: x86: clean up apic_x2apic_cluster - reorder fields so that they appear in struct genapic field ordering - add zero-initialized fields too so that it's apparent which functionality is default / missing. Signed-off-by: Ingo Molnar --- arch/x86/kernel/genx2apic_cluster.c | 74 +++++++++++++++++++++++++++---------- 1 file changed, 55 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 6ce497cc372..fc855e503ac 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -176,23 +176,59 @@ static void init_x2apic_ldr(void) } struct genapic apic_x2apic_cluster = { - .name = "cluster x2apic", - .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, - .int_delivery_mode = dest_LowestPrio, - .int_dest_mode = (APIC_DEST_LOGICAL != 0), - .target_cpus = x2apic_target_cpus, - .vector_allocation_domain = x2apic_vector_allocation_domain, - .apic_id_registered = x2apic_apic_id_registered, - .init_apic_ldr = init_x2apic_ldr, - .send_IPI_all = x2apic_send_IPI_all, - .send_IPI_allbutself = x2apic_send_IPI_allbutself, - .send_IPI_mask = x2apic_send_IPI_mask, - .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, - .send_IPI_self = x2apic_send_IPI_self, - .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, - .phys_pkg_id = phys_pkg_id, - .get_apic_id = get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = (0xFFFFFFFFu), + + .name = "cluster x2apic", + .probe = NULL, + .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, + .apic_id_registered = x2apic_apic_id_registered, + + .int_delivery_mode = dest_LowestPrio, + .int_dest_mode = (APIC_DEST_LOGICAL != 0), + + .target_cpus = x2apic_target_cpus, + .ESR_DISABLE = 0, + .apic_destination_logical = 0, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .no_balance_irq = 0, + .no_ioapic_check = 0, + + .vector_allocation_domain = x2apic_vector_allocation_domain, + .init_apic_ldr = init_x2apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = NULL, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = NULL, + .enable_apic_mode = NULL, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFFFFFFFu, + + .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, + + .send_IPI_mask = x2apic_send_IPI_mask, + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, + .send_IPI_allbutself = x2apic_send_IPI_allbutself, + .send_IPI_all = x2apic_send_IPI_all, + .send_IPI_self = x2apic_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = 0, + .trampoline_phys_high = 0, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, + .inquire_remote_apic = NULL, }; -- cgit v1.2.3 From 0a7e8c64142b2ae5aacdc509ed112b8e362ac8a4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 03:32:03 +0100 Subject: x86, genapic: cleanup 32-bit apic_default template Clean up the APIC driver template: - order fields properly - use the macro names explicitly (so that they can be renamed later) - fill in NULL entries as well Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/default.c | 58 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index e63a4a76d8c..d5fec764fb4 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -24,4 +24,60 @@ static int probe_default(void) return 1; } -struct genapic apic_default = APIC_INIT("default", probe_default); +struct genapic apic_default = { + + .name = "default", + .probe = probe_default, + .acpi_madt_oem_check = acpi_madt_oem_check, + .apic_id_registered = apic_id_registered, + + .int_delivery_mode = INT_DELIVERY_MODE, + .int_dest_mode = INT_DEST_MODE, + + .target_cpus = target_cpus, + .ESR_DISABLE = esr_disable, + .apic_destination_logical = APIC_DEST_LOGICAL, + .check_apicid_used = check_apicid_used, + .check_apicid_present = check_apicid_present, + + .no_balance_irq = NO_BALANCE_IRQ, + .no_ioapic_check = 0, + + .vector_allocation_domain = vector_allocation_domain, + .init_apic_ldr = init_apic_ldr, + + .ioapic_phys_id_map = ioapic_phys_id_map, + .setup_apic_routing = setup_apic_routing, + .multi_timer_check = multi_timer_check, + .apicid_to_node = apicid_to_node, + .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_present_to_apicid = cpu_present_to_apicid, + .apicid_to_cpu_present = apicid_to_cpu_present, + .setup_portio_remap = setup_portio_remap, + .check_phys_apicid_present = check_phys_apicid_present, + .enable_apic_mode = enable_apic_mode, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = mps_oem_check, + + .get_apic_id = get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = APIC_ID_MASK, + + .cpu_mask_to_apicid = cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + + .send_IPI_mask = send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = send_IPI_allbutself, + .send_IPI_all = send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .store_NMI_vector = store_NMI_vector, + .restore_NMI_vector = restore_NMI_vector, + .inquire_remote_apic = inquire_remote_apic, +}; -- cgit v1.2.3 From d26b6d6660d704ffa59f22ad57c9103e3fba289f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 03:32:03 +0100 Subject: x86, genapic: cleanup 32-bit apic_bigsmp template Clean up the APIC driver template: - order fields properly - use the macro names explicitly (so that they can be renamed later) - fill in NULL entries as well Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/bigsmp.c | 58 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index bc4c7840b2a..13e82bc4dae 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -57,4 +57,60 @@ static int probe_bigsmp(void) return dmi_bigsmp; } -struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp); +struct genapic apic_bigsmp = { + + .name = "bigsmp", + .probe = probe_bigsmp, + .acpi_madt_oem_check = acpi_madt_oem_check, + .apic_id_registered = apic_id_registered, + + .int_delivery_mode = INT_DELIVERY_MODE, + .int_dest_mode = INT_DEST_MODE, + + .target_cpus = target_cpus, + .ESR_DISABLE = esr_disable, + .apic_destination_logical = APIC_DEST_LOGICAL, + .check_apicid_used = check_apicid_used, + .check_apicid_present = check_apicid_present, + + .no_balance_irq = NO_BALANCE_IRQ, + .no_ioapic_check = 0, + + .vector_allocation_domain = vector_allocation_domain, + .init_apic_ldr = init_apic_ldr, + + .ioapic_phys_id_map = ioapic_phys_id_map, + .setup_apic_routing = setup_apic_routing, + .multi_timer_check = multi_timer_check, + .apicid_to_node = apicid_to_node, + .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_present_to_apicid = cpu_present_to_apicid, + .apicid_to_cpu_present = apicid_to_cpu_present, + .setup_portio_remap = setup_portio_remap, + .check_phys_apicid_present = check_phys_apicid_present, + .enable_apic_mode = enable_apic_mode, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = mps_oem_check, + + .get_apic_id = get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = APIC_ID_MASK, + + .cpu_mask_to_apicid = cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + + .send_IPI_mask = send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = send_IPI_allbutself, + .send_IPI_all = send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .store_NMI_vector = store_NMI_vector, + .restore_NMI_vector = restore_NMI_vector, + .inquire_remote_apic = inquire_remote_apic, +}; -- cgit v1.2.3 From fea3437adf778cfe69b7f8cff0afb8060d84b647 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 03:32:03 +0100 Subject: x86, genapic: cleanup 32-bit apic_numaq template Clean up the APIC driver template: - order fields properly - use the macro names explicitly (so that they can be renamed later) - fill in NULL entries as well Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/numaq.c | 58 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 3679e225564..fa486ca49c0 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -50,4 +50,60 @@ static void vector_allocation_domain(int cpu, cpumask_t *retmask) *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; } -struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); +struct genapic apic_numaq = { + + .name = "NUMAQ", + .probe = probe_numaq, + .acpi_madt_oem_check = acpi_madt_oem_check, + .apic_id_registered = apic_id_registered, + + .int_delivery_mode = INT_DELIVERY_MODE, + .int_dest_mode = INT_DEST_MODE, + + .target_cpus = target_cpus, + .ESR_DISABLE = esr_disable, + .apic_destination_logical = APIC_DEST_LOGICAL, + .check_apicid_used = check_apicid_used, + .check_apicid_present = check_apicid_present, + + .no_balance_irq = NO_BALANCE_IRQ, + .no_ioapic_check = 0, + + .vector_allocation_domain = vector_allocation_domain, + .init_apic_ldr = init_apic_ldr, + + .ioapic_phys_id_map = ioapic_phys_id_map, + .setup_apic_routing = setup_apic_routing, + .multi_timer_check = multi_timer_check, + .apicid_to_node = apicid_to_node, + .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_present_to_apicid = cpu_present_to_apicid, + .apicid_to_cpu_present = apicid_to_cpu_present, + .setup_portio_remap = setup_portio_remap, + .check_phys_apicid_present = check_phys_apicid_present, + .enable_apic_mode = enable_apic_mode, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = mps_oem_check, + + .get_apic_id = get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = APIC_ID_MASK, + + .cpu_mask_to_apicid = cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + + .send_IPI_mask = send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = send_IPI_allbutself, + .send_IPI_all = send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .store_NMI_vector = store_NMI_vector, + .restore_NMI_vector = restore_NMI_vector, + .inquire_remote_apic = inquire_remote_apic, +}; -- cgit v1.2.3 From fed53ebf3c4e233e085c453a27ae287ccbf149fb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 03:32:03 +0100 Subject: x86, genapic: cleanup 32-bit apic_es7000 template Clean up the APIC driver template: - order fields properly - use the macro names explicitly (so that they can be renamed later) - fill in NULL entries as well Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/es7000.c | 58 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 2f4f4a6e39b..4a404ea5f92 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -100,4 +100,60 @@ static void vector_allocation_domain(int cpu, cpumask_t *retmask) *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; } -struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); +struct genapic apic_es7000 = { + + .name = "es7000", + .probe = probe_es7000, + .acpi_madt_oem_check = acpi_madt_oem_check, + .apic_id_registered = apic_id_registered, + + .int_delivery_mode = INT_DELIVERY_MODE, + .int_dest_mode = INT_DEST_MODE, + + .target_cpus = target_cpus, + .ESR_DISABLE = esr_disable, + .apic_destination_logical = APIC_DEST_LOGICAL, + .check_apicid_used = check_apicid_used, + .check_apicid_present = check_apicid_present, + + .no_balance_irq = NO_BALANCE_IRQ, + .no_ioapic_check = 0, + + .vector_allocation_domain = vector_allocation_domain, + .init_apic_ldr = init_apic_ldr, + + .ioapic_phys_id_map = ioapic_phys_id_map, + .setup_apic_routing = setup_apic_routing, + .multi_timer_check = multi_timer_check, + .apicid_to_node = apicid_to_node, + .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_present_to_apicid = cpu_present_to_apicid, + .apicid_to_cpu_present = apicid_to_cpu_present, + .setup_portio_remap = setup_portio_remap, + .check_phys_apicid_present = check_phys_apicid_present, + .enable_apic_mode = enable_apic_mode, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = mps_oem_check, + + .get_apic_id = get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = APIC_ID_MASK, + + .cpu_mask_to_apicid = cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + + .send_IPI_mask = send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = send_IPI_allbutself, + .send_IPI_all = send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .store_NMI_vector = store_NMI_vector, + .restore_NMI_vector = restore_NMI_vector, + .inquire_remote_apic = inquire_remote_apic, +}; -- cgit v1.2.3 From 491a50c4fbcf6cc39a702a16a2dfaf42f0eb8058 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 03:32:03 +0100 Subject: x86, genapic: cleanup 32-bit apic_summit template Clean up the APIC driver template: - order fields properly - use the macro names explicitly (so that they can be renamed later) - fill in NULL entries as well Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/summit.c | 58 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 2821ffc188b..479c1d40977 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -37,4 +37,60 @@ static void vector_allocation_domain(int cpu, cpumask_t *retmask) *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; } -struct genapic apic_summit = APIC_INIT("summit", probe_summit); +struct genapic apic_summit = { + + .name = "summit", + .probe = probe_summit, + .acpi_madt_oem_check = acpi_madt_oem_check, + .apic_id_registered = apic_id_registered, + + .int_delivery_mode = INT_DELIVERY_MODE, + .int_dest_mode = INT_DEST_MODE, + + .target_cpus = target_cpus, + .ESR_DISABLE = esr_disable, + .apic_destination_logical = APIC_DEST_LOGICAL, + .check_apicid_used = check_apicid_used, + .check_apicid_present = check_apicid_present, + + .no_balance_irq = NO_BALANCE_IRQ, + .no_ioapic_check = 0, + + .vector_allocation_domain = vector_allocation_domain, + .init_apic_ldr = init_apic_ldr, + + .ioapic_phys_id_map = ioapic_phys_id_map, + .setup_apic_routing = setup_apic_routing, + .multi_timer_check = multi_timer_check, + .apicid_to_node = apicid_to_node, + .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_present_to_apicid = cpu_present_to_apicid, + .apicid_to_cpu_present = apicid_to_cpu_present, + .setup_portio_remap = setup_portio_remap, + .check_phys_apicid_present = check_phys_apicid_present, + .enable_apic_mode = enable_apic_mode, + .phys_pkg_id = phys_pkg_id, + .mps_oem_check = mps_oem_check, + + .get_apic_id = get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = APIC_ID_MASK, + + .cpu_mask_to_apicid = cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + + .send_IPI_mask = send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = send_IPI_allbutself, + .send_IPI_all = send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .store_NMI_vector = store_NMI_vector, + .restore_NMI_vector = restore_NMI_vector, + .inquire_remote_apic = inquire_remote_apic, +}; -- cgit v1.2.3 From 9a6801da55e4a4492e8f666ac272efe8186682c8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 03:18:13 +0100 Subject: x86: remove APIC_INIT / APICFUNC / IPIFUNC The APIC_INIT() / APICFUNC / IPIFUNC macros were ugly and obfuscated the true identity of various APIC driver methods. Now that they are not used anymore, remove them. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 63 ++---------------------------------------- 1 file changed, 2 insertions(+), 61 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 3970da3245c..26c5e824a71 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -97,66 +97,8 @@ struct genapic { extern struct genapic *apic; #ifdef CONFIG_X86_32 - -#define APICFUNC(x) .x = x, - -/* More functions could be probably marked IPIFUNC and save some space - in UP GENERICARCH kernels, but I don't have the nerve right now - to untangle this mess. -AK */ -#ifdef CONFIG_SMP -#define IPIFUNC(x) APICFUNC(x) -#else -#define IPIFUNC(x) -#endif - -#define APIC_INIT(aname, aprobe) \ -{ \ - .name = aname, \ - .probe = aprobe, \ - .int_delivery_mode = INT_DELIVERY_MODE, \ - .int_dest_mode = INT_DEST_MODE, \ - .no_balance_irq = NO_BALANCE_IRQ, \ - .ESR_DISABLE = esr_disable, \ - .apic_destination_logical = APIC_DEST_LOGICAL, \ - APICFUNC(apic_id_registered) \ - APICFUNC(target_cpus) \ - APICFUNC(check_apicid_used) \ - APICFUNC(check_apicid_present) \ - APICFUNC(init_apic_ldr) \ - APICFUNC(ioapic_phys_id_map) \ - APICFUNC(setup_apic_routing) \ - APICFUNC(multi_timer_check) \ - APICFUNC(apicid_to_node) \ - APICFUNC(cpu_to_logical_apicid) \ - APICFUNC(cpu_present_to_apicid) \ - APICFUNC(apicid_to_cpu_present) \ - APICFUNC(setup_portio_remap) \ - APICFUNC(check_phys_apicid_present) \ - APICFUNC(mps_oem_check) \ - APICFUNC(get_apic_id) \ - .apic_id_mask = APIC_ID_MASK, \ - APICFUNC(cpu_mask_to_apicid) \ - APICFUNC(cpu_mask_to_apicid_and) \ - APICFUNC(vector_allocation_domain) \ - APICFUNC(acpi_madt_oem_check) \ - IPIFUNC(send_IPI_mask) \ - IPIFUNC(send_IPI_allbutself) \ - IPIFUNC(send_IPI_all) \ - APICFUNC(enable_apic_mode) \ - APICFUNC(phys_pkg_id) \ - .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \ - .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \ - APICFUNC(wait_for_init_deassert) \ - APICFUNC(smp_callin_clear_local_apic) \ - APICFUNC(store_NMI_vector) \ - APICFUNC(restore_NMI_vector) \ - APICFUNC(inquire_remote_apic) \ -} - extern void es7000_update_genapic_to_cluster(void); - -#else /* CONFIG_X86_64: */ - +#else extern struct genapic apic_flat; extern struct genapic apic_physflat; extern struct genapic apic_x2apic_cluster; @@ -169,7 +111,6 @@ extern struct genapic apic_x2apic_uv_x; DECLARE_PER_CPU(int, x2apic_extra_bits); extern void setup_apic_routing(void); - -#endif /* CONFIG_X86_64 */ +#endif #endif /* _ASM_X86_GENAPIC_64_H */ -- cgit v1.2.3 From 306db03b0d71bf9c94155c0c4771a79fc70b4b27 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 03:43:47 +0100 Subject: x86: clean up apic->acpi_madt_oem_check methods Impact: refactor code x86 subarchitectures each defined a "acpi_madt_oem_check()" method, which could be an inline function, or an extern, or a static function, and which was also the name of a genapic field. Untangle this namespace spaghetti by setting ->acpi_madt_oem_check() to NULL on those subarchitectures that have no detection quirks, and rename the other ones (summit, es7000) that do. Also change default_acpi_madt_oem_check() to handle NULL entries, and clean its control flow up as well. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/mpparse.h | 2 +- arch/x86/include/asm/genapic.h | 2 +- arch/x86/include/asm/mach-default/mach_mpparse.h | 2 +- arch/x86/include/asm/mach-generic/mach_mpparse.h | 2 +- arch/x86/include/asm/summit/mpparse.h | 2 +- arch/x86/kernel/acpi/boot.c | 3 ++- arch/x86/kernel/genapic_64.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 6 +++--- arch/x86/mach-generic/numaq.c | 8 +------- arch/x86/mach-generic/probe.c | 24 ++++++++++++++---------- arch/x86/mach-generic/summit.c | 2 +- 13 files changed, 29 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h index c1629b090ec..30692c4ae85 100644 --- a/arch/x86/include/asm/es7000/mpparse.h +++ b/arch/x86/include/asm/es7000/mpparse.h @@ -9,7 +9,7 @@ extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); extern void setup_unisys(void); #ifndef CONFIG_X86_GENERICARCH -extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); +extern int default_acpi_madt_oem_check(char *oem_id, char *oem_table_id); extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid); #endif diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 26c5e824a71..108abdf6953 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -103,7 +103,7 @@ extern struct genapic apic_flat; extern struct genapic apic_physflat; extern struct genapic apic_x2apic_cluster; extern struct genapic apic_x2apic_phys; -extern int acpi_madt_oem_check(char *, char *); +extern int default_acpi_madt_oem_check(char *, char *); extern void apic_send_IPI_self(int vector); diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h index c70a263d68c..8fa01770ba6 100644 --- a/arch/x86/include/asm/mach-default/mach_mpparse.h +++ b/arch/x86/include/asm/mach-default/mach_mpparse.h @@ -8,7 +8,7 @@ mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) } /* Hook from generic ACPI tables.c */ -static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static inline int default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h index 9444ab8dca9..f497d96c76b 100644 --- a/arch/x86/include/asm/mach-generic/mach_mpparse.h +++ b/arch/x86/include/asm/mach-generic/mach_mpparse.h @@ -4,6 +4,6 @@ extern int mps_oem_check(struct mpc_table *, char *, char *); -extern int acpi_madt_oem_check(char *, char *); +extern int default_acpi_madt_oem_check(char *, char *); #endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */ diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h index 380e86c0236..555ed8238e9 100644 --- a/arch/x86/include/asm/summit/mpparse.h +++ b/arch/x86/include/asm/summit/mpparse.h @@ -27,7 +27,7 @@ static inline int mps_oem_check(struct mpc_table *mpc, char *oem, } /* Hook from generic ACPI tables.c */ -static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERVIGIL", 8) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4cb5964f149..314fe0dddef 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -239,7 +239,8 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) madt->address); } - acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); + default_acpi_madt_oem_check(madt->header.oem_id, + madt->header.oem_table_id); return 0; } diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 2b986389a24..060945b8eec 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -68,7 +68,7 @@ void apic_send_IPI_self(int vector) __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); } -int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) +int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { int i; diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 13e82bc4dae..22c3608b80d 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -61,7 +61,7 @@ struct genapic apic_bigsmp = { .name = "bigsmp", .probe = probe_bigsmp, - .acpi_madt_oem_check = acpi_madt_oem_check, + .acpi_madt_oem_check = NULL, .apic_id_registered = apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index d5fec764fb4..cfec3494a96 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -28,7 +28,7 @@ struct genapic apic_default = { .name = "default", .probe = probe_default, - .acpi_madt_oem_check = acpi_madt_oem_check, + .acpi_madt_oem_check = NULL, .apic_id_registered = apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 4a404ea5f92..23fe6f1c969 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -57,7 +57,7 @@ mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) #ifdef CONFIG_ACPI /* Hook from generic ACPI tables.c */ -static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { unsigned long oem_addr = 0; int check_dsdt; @@ -81,7 +81,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) return ret; } #else -static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } @@ -104,7 +104,7 @@ struct genapic apic_es7000 = { .name = "es7000", .probe = probe_es7000, - .acpi_madt_oem_check = acpi_madt_oem_check, + .acpi_madt_oem_check = es7000_acpi_madt_oem_check, .apic_id_registered = apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index fa486ca49c0..9691b4e1654 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -31,12 +31,6 @@ static int probe_numaq(void) return found_numaq; } -/* Hook from generic ACPI tables.c */ -static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 0; -} - static void vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus @@ -54,7 +48,7 @@ struct genapic apic_numaq = { .name = "NUMAQ", .probe = probe_numaq, - .acpi_madt_oem_check = acpi_madt_oem_check, + .acpi_madt_oem_check = NULL, .apic_id_registered = apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index 82bf0f520fb..a21e2b1a701 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -128,20 +128,24 @@ int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) return 0; } -int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) +int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { int i; + for (i = 0; apic_probe[i]; ++i) { - if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { - if (!cmdline_apic) { - apic = apic_probe[i]; - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - apic->name); - } - return 1; + if (!apic_probe[i]->acpi_madt_oem_check) + continue; + if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) + continue; + + if (!cmdline_apic) { + apic = apic_probe[i]; + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); + printk(KERN_INFO "Switched to APIC driver `%s'.\n", + apic->name); } + return 1; } return 0; } diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 479c1d40977..0eea9fbb2a5 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -41,7 +41,7 @@ struct genapic apic_summit = { .name = "summit", .probe = probe_summit, - .acpi_madt_oem_check = acpi_madt_oem_check, + .acpi_madt_oem_check = summit_acpi_madt_oem_check, .apic_id_registered = apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, -- cgit v1.2.3 From 7ed248daa56156f2fd7175f90b62fc6397b0c7b7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 03:43:47 +0100 Subject: x86: clean up apic->apic_id_registered() methods Impact: cleanup x86 subarchitectures each defined a "apic_id_registered()" method, which could be an inline function depending on which subarch we build for, and which was also the name of a genapic field. Untangle this namespace spaghetti by giving each of the instances a separate name. Also remove wrapper macro obfuscation. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 4 ++-- arch/x86/include/asm/es7000/apic.h | 4 ++-- arch/x86/include/asm/mach-default/mach_apic.h | 3 +-- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/apic.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 12 files changed, 13 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index d8dd9f53791..42c56df3ff3 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -4,9 +4,9 @@ #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) #define esr_disable (1) -static inline int apic_id_registered(void) +static inline int bigsmp_apic_id_registered(void) { - return (1); + return 1; } static inline const cpumask_t *target_cpus(void) diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index c58b9cc7446..a1819b510de 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -6,9 +6,9 @@ #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) #define esr_disable (1) -static inline int apic_id_registered(void) +static inline int es7000_apic_id_registered(void) { - return (1); + return 1; } static inline const cpumask_t *target_cpus_cluster(void) diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 2448b927b64..6a454fa0b43 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -25,7 +25,6 @@ static inline const struct cpumask *target_cpus(void) #define INT_DELIVERY_MODE (apic->int_delivery_mode) #define INT_DEST_MODE (apic->int_dest_mode) #define TARGET_CPUS (apic->target_cpus()) -#define apic_id_registered (apic->apic_id_registered) #define init_apic_ldr (apic->init_apic_ldr) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) @@ -57,7 +56,7 @@ static inline void init_apic_ldr(void) apic_write(APIC_LDR, val); } -static inline int apic_id_registered(void) +static inline int default_apic_id_registered(void) { return physid_isset(read_apic_id(), phys_cpu_present_map); } diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 59972d94ff1..cc6e9d70f06 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -10,7 +10,6 @@ #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL (apic->apic_destination_logical) #define TARGET_CPUS (apic->target_cpus()) -#define apic_id_registered (apic->apic_id_registered) #define init_apic_ldr (apic->init_apic_ldr) #define ioapic_phys_id_map (apic->ioapic_phys_id_map) #define setup_apic_routing (apic->setup_apic_routing) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index bf37bc49bd8..59b62b19d02 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -28,7 +28,7 @@ static inline unsigned long check_apicid_present(int bit) } #define apicid_cluster(apicid) (apicid & 0xF0) -static inline int apic_id_registered(void) +static inline int numaq_apic_id_registered(void) { return 1; } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 93d2c8667cf..a36ef6e4b1f 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -74,7 +74,7 @@ static inline int multi_timer_check(int apic, int irq) return 0; } -static inline int apic_id_registered(void) +static inline int summit_apic_id_registered(void) { return 1; } diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index c6f15647eba..b6740de18fb 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1171,7 +1171,7 @@ void __cpuinit setup_local_APIC(void) * Double-check whether this APIC is really registered. * This is meaningless in clustered apic mode, so we skip it. */ - if (!apic_id_registered()) + if (!apic->apic_id_registered()) BUG(); /* diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 22c3608b80d..17abf5c6242 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -62,7 +62,7 @@ struct genapic apic_bigsmp = { .name = "bigsmp", .probe = probe_bigsmp, .acpi_madt_oem_check = NULL, - .apic_id_registered = apic_id_registered, + .apic_id_registered = bigsmp_apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, .int_dest_mode = INT_DEST_MODE, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index cfec3494a96..1f30559e9d8 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -29,7 +29,7 @@ struct genapic apic_default = { .name = "default", .probe = probe_default, .acpi_madt_oem_check = NULL, - .apic_id_registered = apic_id_registered, + .apic_id_registered = default_apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, .int_dest_mode = INT_DEST_MODE, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 23fe6f1c969..d68ca0bce67 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -105,7 +105,7 @@ struct genapic apic_es7000 = { .name = "es7000", .probe = probe_es7000, .acpi_madt_oem_check = es7000_acpi_madt_oem_check, - .apic_id_registered = apic_id_registered, + .apic_id_registered = es7000_apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, .int_dest_mode = INT_DEST_MODE, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 9691b4e1654..b22a79b15b1 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -49,7 +49,7 @@ struct genapic apic_numaq = { .name = "NUMAQ", .probe = probe_numaq, .acpi_madt_oem_check = NULL, - .apic_id_registered = apic_id_registered, + .apic_id_registered = numaq_apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, .int_dest_mode = INT_DEST_MODE, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 0eea9fbb2a5..744fa1b86ef 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -42,7 +42,7 @@ struct genapic apic_summit = { .name = "summit", .probe = probe_summit, .acpi_madt_oem_check = summit_acpi_madt_oem_check, - .apic_id_registered = apic_id_registered, + .apic_id_registered = summit_apic_id_registered, .int_delivery_mode = INT_DELIVERY_MODE, .int_dest_mode = INT_DEST_MODE, -- cgit v1.2.3 From f8987a1093cc7a896137e264c24e04d4048e9f95 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:02:31 +0100 Subject: x86, genapic: rename int_delivery_mode, et. al. int_delivery_mode is supposed to mean 'interrupt delivery mode', but it's quite a misnomer as 'int' we usually think of as an integer type ... The standard naming for such attributes is 'irq' - so rename the following fields and macros: int_delivery_mode => irq_delivery_mode INT_DELIVERY_MODE => IRQ_DELIVERY_MODE int_dest_mode => irq_dest_mode INT_DEST_MODE => IRQ_DEST_MODE Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 4 ++-- arch/x86/include/asm/es7000/apic.h | 4 ++-- arch/x86/include/asm/genapic.h | 4 ++-- arch/x86/include/asm/mach-default/mach_apic.h | 8 +++---- arch/x86/include/asm/mach-generic/mach_apic.h | 4 ++-- arch/x86/include/asm/numaq/apic.h | 4 ++-- arch/x86/include/asm/summit/apic.h | 4 ++-- arch/x86/kernel/genapic_flat_64.c | 8 +++---- arch/x86/kernel/genx2apic_cluster.c | 4 ++-- arch/x86/kernel/genx2apic_phys.c | 4 ++-- arch/x86/kernel/genx2apic_uv_x.c | 4 ++-- arch/x86/kernel/io_apic.c | 30 +++++++++++++-------------- arch/x86/mach-generic/bigsmp.c | 4 ++-- arch/x86/mach-generic/default.c | 4 ++-- arch/x86/mach-generic/es7000.c | 8 +++---- arch/x86/mach-generic/numaq.c | 4 ++-- arch/x86/mach-generic/summit.c | 4 ++-- 17 files changed, 53 insertions(+), 53 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 42c56df3ff3..8ff8bba8833 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -21,8 +21,8 @@ static inline const cpumask_t *target_cpus(void) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0 #define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define INT_DELIVERY_MODE (dest_Fixed) -#define INT_DEST_MODE (0) /* phys delivery to target proc */ +#define IRQ_DELIVERY_MODE (dest_Fixed) +#define IRQ_DEST_MODE (0) /* phys delivery to target proc */ #define NO_BALANCE_IRQ (0) static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index a1819b510de..830e8731cc0 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -27,8 +27,8 @@ static inline const cpumask_t *target_cpus(void) #define NO_BALANCE_IRQ_CLUSTER (1) #define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define INT_DELIVERY_MODE (dest_Fixed) -#define INT_DEST_MODE (0) /* phys delivery to target procs */ +#define IRQ_DELIVERY_MODE (dest_Fixed) +#define IRQ_DEST_MODE (0) /* phys delivery to target procs */ #define NO_BALANCE_IRQ (0) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0x0 diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 108abdf6953..e998e3df5d2 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -23,8 +23,8 @@ struct genapic { int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); int (*apic_id_registered)(void); - u32 int_delivery_mode; - u32 int_dest_mode; + u32 irq_delivery_mode; + u32 irq_dest_mode; const struct cpumask *(*target_cpus)(void); diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 6a454fa0b43..b5364793262 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -22,8 +22,8 @@ static inline const struct cpumask *target_cpus(void) #ifdef CONFIG_X86_64 #include -#define INT_DELIVERY_MODE (apic->int_delivery_mode) -#define INT_DEST_MODE (apic->int_dest_mode) +#define IRQ_DELIVERY_MODE (apic->irq_delivery_mode) +#define IRQ_DEST_MODE (apic->irq_dest_mode) #define TARGET_CPUS (apic->target_cpus()) #define init_apic_ldr (apic->init_apic_ldr) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) @@ -35,8 +35,8 @@ static inline const struct cpumask *target_cpus(void) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void setup_apic_routing(void); #else -#define INT_DELIVERY_MODE dest_LowestPrio -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ +#define IRQ_DELIVERY_MODE dest_LowestPrio +#define IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ #define TARGET_CPUS (target_cpus()) #define wakeup_secondary_cpu wakeup_secondary_cpu_via_init /* diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index cc6e9d70f06..03492f2219e 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -5,8 +5,8 @@ #define esr_disable (apic->ESR_DISABLE) #define NO_BALANCE_IRQ (apic->no_balance_irq) -#define INT_DELIVERY_MODE (apic->int_delivery_mode) -#define INT_DEST_MODE (apic->int_dest_mode) +#define IRQ_DELIVERY_MODE (apic->irq_delivery_mode) +#define IRQ_DEST_MODE (apic->irq_dest_mode) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL (apic->apic_destination_logical) #define TARGET_CPUS (apic->target_cpus()) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 59b62b19d02..d885e35df18 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -15,8 +15,8 @@ static inline const cpumask_t *target_cpus(void) #define NO_BALANCE_IRQ (1) #define esr_disable (1) -#define INT_DELIVERY_MODE dest_LowestPrio -#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */ +#define IRQ_DELIVERY_MODE dest_LowestPrio +#define IRQ_DEST_MODE 0 /* physical delivery on LOCAL quad */ static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index a36ef6e4b1f..0b7d0d14e56 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -24,8 +24,8 @@ static inline const cpumask_t *target_cpus(void) return &cpumask_of_cpu(0); } -#define INT_DELIVERY_MODE (dest_LowestPrio) -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ +#define IRQ_DELIVERY_MODE (dest_LowestPrio) +#define IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index e9233374cef..0a263d6bb5e 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -180,8 +180,8 @@ struct genapic apic_flat = { .acpi_madt_oem_check = flat_acpi_madt_oem_check, .apic_id_registered = flat_apic_id_registered, - .int_delivery_mode = dest_LowestPrio, - .int_dest_mode = (APIC_DEST_LOGICAL != 0), + .irq_delivery_mode = dest_LowestPrio, + .irq_dest_mode = (APIC_DEST_LOGICAL != 0), .target_cpus = flat_target_cpus, .ESR_DISABLE = 0, @@ -326,8 +326,8 @@ struct genapic apic_physflat = { .acpi_madt_oem_check = physflat_acpi_madt_oem_check, .apic_id_registered = flat_apic_id_registered, - .int_delivery_mode = dest_Fixed, - .int_dest_mode = (APIC_DEST_PHYSICAL != 0), + .irq_delivery_mode = dest_Fixed, + .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), .target_cpus = physflat_target_cpus, .ESR_DISABLE = 0, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index fc855e503ac..e9ff7dc9a0f 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -182,8 +182,8 @@ struct genapic apic_x2apic_cluster = { .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, .apic_id_registered = x2apic_apic_id_registered, - .int_delivery_mode = dest_LowestPrio, - .int_dest_mode = (APIC_DEST_LOGICAL != 0), + .irq_delivery_mode = dest_LowestPrio, + .irq_dest_mode = (APIC_DEST_LOGICAL != 0), .target_cpus = x2apic_target_cpus, .ESR_DISABLE = 0, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index c98361fb7ee..8141b5a88f6 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -178,8 +178,8 @@ struct genapic apic_x2apic_phys = { .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, .apic_id_registered = x2apic_apic_id_registered, - .int_delivery_mode = dest_Fixed, - .int_dest_mode = (APIC_DEST_PHYSICAL != 0), + .irq_delivery_mode = dest_Fixed, + .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), .target_cpus = x2apic_target_cpus, .ESR_DISABLE = 0, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 94f606f204a..6a73cad0d3e 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -243,8 +243,8 @@ struct genapic apic_x2apic_uv_x = { .acpi_madt_oem_check = uv_acpi_madt_oem_check, .apic_id_registered = uv_apic_id_registered, - .int_delivery_mode = dest_Fixed, - .int_dest_mode = (APIC_DEST_PHYSICAL != 0), + .irq_delivery_mode = dest_Fixed, + .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), .target_cpus = uv_target_cpus, .ESR_DISABLE = 0, diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 7283234229f..5f967b9c9af 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1514,9 +1514,9 @@ static int setup_ioapic_entry(int apic_id, int irq, memset(&irte, 0, sizeof(irte)); irte.present = 1; - irte.dst_mode = INT_DEST_MODE; + irte.dst_mode = IRQ_DEST_MODE; irte.trigger_mode = trigger; - irte.dlvry_mode = INT_DELIVERY_MODE; + irte.dlvry_mode = IRQ_DELIVERY_MODE; irte.vector = vector; irte.dest_id = IRTE_DEST(destination); @@ -1529,8 +1529,8 @@ static int setup_ioapic_entry(int apic_id, int irq, } else #endif { - entry->delivery_mode = INT_DELIVERY_MODE; - entry->dest_mode = INT_DEST_MODE; + entry->delivery_mode = IRQ_DELIVERY_MODE; + entry->dest_mode = IRQ_DEST_MODE; entry->dest = destination; } @@ -1659,10 +1659,10 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, * We use logical delivery to get the timer IRQ * to the first CPU. */ - entry.dest_mode = INT_DEST_MODE; + entry.dest_mode = IRQ_DEST_MODE; entry.mask = 1; /* mask IRQ now */ entry.dest = cpu_mask_to_apicid(TARGET_CPUS); - entry.delivery_mode = INT_DELIVERY_MODE; + entry.delivery_mode = IRQ_DELIVERY_MODE; entry.polarity = 0; entry.trigger = 0; entry.vector = vector; @@ -3279,9 +3279,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms memset (&irte, 0, sizeof(irte)); irte.present = 1; - irte.dst_mode = INT_DEST_MODE; + irte.dst_mode = IRQ_DEST_MODE; irte.trigger_mode = 0; /* edge */ - irte.dlvry_mode = INT_DELIVERY_MODE; + irte.dlvry_mode = IRQ_DELIVERY_MODE; irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); @@ -3299,10 +3299,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms msg->address_hi = MSI_ADDR_BASE_HI; msg->address_lo = MSI_ADDR_BASE_LO | - ((INT_DEST_MODE == 0) ? + ((IRQ_DEST_MODE == 0) ? MSI_ADDR_DEST_MODE_PHYSICAL: MSI_ADDR_DEST_MODE_LOGICAL) | - ((INT_DELIVERY_MODE != dest_LowestPrio) ? + ((IRQ_DELIVERY_MODE != dest_LowestPrio) ? MSI_ADDR_REDIRECTION_CPU: MSI_ADDR_REDIRECTION_LOWPRI) | MSI_ADDR_DEST_ID(dest); @@ -3310,7 +3310,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms msg->data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | - ((INT_DELIVERY_MODE != dest_LowestPrio) ? + ((IRQ_DELIVERY_MODE != dest_LowestPrio) ? MSI_DATA_DELIVERY_FIXED: MSI_DATA_DELIVERY_LOWPRI) | MSI_DATA_VECTOR(cfg->vector); @@ -3711,11 +3711,11 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) HT_IRQ_LOW_BASE | HT_IRQ_LOW_DEST_ID(dest) | HT_IRQ_LOW_VECTOR(cfg->vector) | - ((INT_DEST_MODE == 0) ? + ((IRQ_DEST_MODE == 0) ? HT_IRQ_LOW_DM_PHYSICAL : HT_IRQ_LOW_DM_LOGICAL) | HT_IRQ_LOW_RQEOI_EDGE | - ((INT_DELIVERY_MODE != dest_LowestPrio) ? + ((IRQ_DELIVERY_MODE != dest_LowestPrio) ? HT_IRQ_LOW_MT_FIXED : HT_IRQ_LOW_MT_ARBITRATED) | HT_IRQ_LOW_IRQ_MASKED; @@ -3763,8 +3763,8 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); entry->vector = cfg->vector; - entry->delivery_mode = INT_DELIVERY_MODE; - entry->dest_mode = INT_DEST_MODE; + entry->delivery_mode = IRQ_DELIVERY_MODE; + entry->dest_mode = IRQ_DEST_MODE; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 17abf5c6242..c15c1aa2dc7 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -64,8 +64,8 @@ struct genapic apic_bigsmp = { .acpi_madt_oem_check = NULL, .apic_id_registered = bigsmp_apic_id_registered, - .int_delivery_mode = INT_DELIVERY_MODE, - .int_dest_mode = INT_DEST_MODE, + .irq_delivery_mode = IRQ_DELIVERY_MODE, + .irq_dest_mode = IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 1f30559e9d8..d32b175eff8 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -31,8 +31,8 @@ struct genapic apic_default = { .acpi_madt_oem_check = NULL, .apic_id_registered = default_apic_id_registered, - .int_delivery_mode = INT_DELIVERY_MODE, - .int_dest_mode = INT_DEST_MODE, + .irq_delivery_mode = IRQ_DELIVERY_MODE, + .irq_dest_mode = IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index d68ca0bce67..06653892953 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -21,8 +21,8 @@ void __init es7000_update_genapic_to_cluster(void) { apic->target_cpus = target_cpus_cluster; - apic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER; - apic->int_dest_mode = INT_DEST_MODE_CLUSTER; + apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER; + apic->irq_dest_mode = INT_DEST_MODE_CLUSTER; apic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER; apic->init_apic_ldr = init_apic_ldr_cluster; @@ -107,8 +107,8 @@ struct genapic apic_es7000 = { .acpi_madt_oem_check = es7000_acpi_madt_oem_check, .apic_id_registered = es7000_apic_id_registered, - .int_delivery_mode = INT_DELIVERY_MODE, - .int_dest_mode = INT_DEST_MODE, + .irq_delivery_mode = IRQ_DELIVERY_MODE, + .irq_dest_mode = IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index b22a79b15b1..401957142fd 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -51,8 +51,8 @@ struct genapic apic_numaq = { .acpi_madt_oem_check = NULL, .apic_id_registered = numaq_apic_id_registered, - .int_delivery_mode = INT_DELIVERY_MODE, - .int_dest_mode = INT_DEST_MODE, + .irq_delivery_mode = IRQ_DELIVERY_MODE, + .irq_dest_mode = IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 744fa1b86ef..946da7aa762 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -44,8 +44,8 @@ struct genapic apic_summit = { .acpi_madt_oem_check = summit_acpi_madt_oem_check, .apic_id_registered = summit_apic_id_registered, - .int_delivery_mode = INT_DELIVERY_MODE, - .int_dest_mode = INT_DEST_MODE, + .irq_delivery_mode = IRQ_DELIVERY_MODE, + .irq_dest_mode = IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, -- cgit v1.2.3 From 9b5bc8dc12421a4b17047061f473d85c1797d543 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:09:58 +0100 Subject: x86, apic: remove IRQ_DEST_MODE / IRQ_DELIVERY_MODE Remove the wrapper macros IRQ_DEST_MODE and IRQ_DELIVERY_MODE. The typical 32-bit and the 64-bit build all dereference via the genapic, so it's pointless to hide that indirection via these ugly macros. Furthermore, it also obscures subarchitecture details. So replace it with apic->irq_dest_mode / etc. accesses. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 4 ++-- arch/x86/include/asm/es7000/apic.h | 4 ++-- arch/x86/include/asm/mach-default/mach_apic.h | 5 ++--- arch/x86/include/asm/mach-generic/mach_apic.h | 2 -- arch/x86/include/asm/numaq/apic.h | 4 ++-- arch/x86/include/asm/summit/apic.h | 4 ++-- arch/x86/kernel/io_apic.c | 30 +++++++++++++-------------- arch/x86/mach-generic/bigsmp.c | 4 ++-- arch/x86/mach-generic/default.c | 4 ++-- arch/x86/mach-generic/es7000.c | 4 ++-- arch/x86/mach-generic/numaq.c | 4 ++-- arch/x86/mach-generic/summit.c | 4 ++-- 12 files changed, 35 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 8ff8bba8833..293551b0e61 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -21,8 +21,8 @@ static inline const cpumask_t *target_cpus(void) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0 #define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define IRQ_DELIVERY_MODE (dest_Fixed) -#define IRQ_DEST_MODE (0) /* phys delivery to target proc */ +#define BIGSMP_IRQ_DELIVERY_MODE (dest_Fixed) +#define BIGSMP_IRQ_DEST_MODE (0) /* phys delivery to target proc */ #define NO_BALANCE_IRQ (0) static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 830e8731cc0..690016683f2 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -27,8 +27,8 @@ static inline const cpumask_t *target_cpus(void) #define NO_BALANCE_IRQ_CLUSTER (1) #define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define IRQ_DELIVERY_MODE (dest_Fixed) -#define IRQ_DEST_MODE (0) /* phys delivery to target procs */ +#define ES7000_IRQ_DELIVERY_MODE (dest_Fixed) +#define ES7000_IRQ_DEST_MODE (0) /* phys delivery to target procs */ #define NO_BALANCE_IRQ (0) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0x0 diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index b5364793262..eafbf4f2038 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -22,7 +22,6 @@ static inline const struct cpumask *target_cpus(void) #ifdef CONFIG_X86_64 #include -#define IRQ_DELIVERY_MODE (apic->irq_delivery_mode) #define IRQ_DEST_MODE (apic->irq_dest_mode) #define TARGET_CPUS (apic->target_cpus()) #define init_apic_ldr (apic->init_apic_ldr) @@ -35,8 +34,8 @@ static inline const struct cpumask *target_cpus(void) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void setup_apic_routing(void); #else -#define IRQ_DELIVERY_MODE dest_LowestPrio -#define IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ +#define DEFAULT_IRQ_DELIVERY_MODE dest_LowestPrio +#define DEFAULT_IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ #define TARGET_CPUS (target_cpus()) #define wakeup_secondary_cpu wakeup_secondary_cpu_via_init /* diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 03492f2219e..387a5d00c43 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -5,8 +5,6 @@ #define esr_disable (apic->ESR_DISABLE) #define NO_BALANCE_IRQ (apic->no_balance_irq) -#define IRQ_DELIVERY_MODE (apic->irq_delivery_mode) -#define IRQ_DEST_MODE (apic->irq_dest_mode) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL (apic->apic_destination_logical) #define TARGET_CPUS (apic->target_cpus()) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index d885e35df18..7746035c591 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -15,8 +15,8 @@ static inline const cpumask_t *target_cpus(void) #define NO_BALANCE_IRQ (1) #define esr_disable (1) -#define IRQ_DELIVERY_MODE dest_LowestPrio -#define IRQ_DEST_MODE 0 /* physical delivery on LOCAL quad */ +#define NUMAQ_IRQ_DELIVERY_MODE dest_LowestPrio +#define NUMAQ_IRQ_DEST_MODE 0 /* physical delivery on LOCAL quad */ static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 0b7d0d14e56..ea2abe9b597 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -24,8 +24,8 @@ static inline const cpumask_t *target_cpus(void) return &cpumask_of_cpu(0); } -#define IRQ_DELIVERY_MODE (dest_LowestPrio) -#define IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ +#define SUMMIT_IRQ_DELIVERY_MODE (dest_LowestPrio) +#define SUMMIT_IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 5f967b9c9af..301b6571d70 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1514,9 +1514,9 @@ static int setup_ioapic_entry(int apic_id, int irq, memset(&irte, 0, sizeof(irte)); irte.present = 1; - irte.dst_mode = IRQ_DEST_MODE; + irte.dst_mode = apic->irq_dest_mode; irte.trigger_mode = trigger; - irte.dlvry_mode = IRQ_DELIVERY_MODE; + irte.dlvry_mode = apic->irq_delivery_mode; irte.vector = vector; irte.dest_id = IRTE_DEST(destination); @@ -1529,8 +1529,8 @@ static int setup_ioapic_entry(int apic_id, int irq, } else #endif { - entry->delivery_mode = IRQ_DELIVERY_MODE; - entry->dest_mode = IRQ_DEST_MODE; + entry->delivery_mode = apic->irq_delivery_mode; + entry->dest_mode = apic->irq_dest_mode; entry->dest = destination; } @@ -1659,10 +1659,10 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, * We use logical delivery to get the timer IRQ * to the first CPU. */ - entry.dest_mode = IRQ_DEST_MODE; + entry.dest_mode = apic->irq_dest_mode; entry.mask = 1; /* mask IRQ now */ entry.dest = cpu_mask_to_apicid(TARGET_CPUS); - entry.delivery_mode = IRQ_DELIVERY_MODE; + entry.delivery_mode = apic->irq_delivery_mode; entry.polarity = 0; entry.trigger = 0; entry.vector = vector; @@ -3279,9 +3279,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms memset (&irte, 0, sizeof(irte)); irte.present = 1; - irte.dst_mode = IRQ_DEST_MODE; + irte.dst_mode = apic->irq_dest_mode; irte.trigger_mode = 0; /* edge */ - irte.dlvry_mode = IRQ_DELIVERY_MODE; + irte.dlvry_mode = apic->irq_delivery_mode; irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); @@ -3299,10 +3299,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms msg->address_hi = MSI_ADDR_BASE_HI; msg->address_lo = MSI_ADDR_BASE_LO | - ((IRQ_DEST_MODE == 0) ? + ((apic->irq_dest_mode == 0) ? MSI_ADDR_DEST_MODE_PHYSICAL: MSI_ADDR_DEST_MODE_LOGICAL) | - ((IRQ_DELIVERY_MODE != dest_LowestPrio) ? + ((apic->irq_delivery_mode != dest_LowestPrio) ? MSI_ADDR_REDIRECTION_CPU: MSI_ADDR_REDIRECTION_LOWPRI) | MSI_ADDR_DEST_ID(dest); @@ -3310,7 +3310,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms msg->data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | - ((IRQ_DELIVERY_MODE != dest_LowestPrio) ? + ((apic->irq_delivery_mode != dest_LowestPrio) ? MSI_DATA_DELIVERY_FIXED: MSI_DATA_DELIVERY_LOWPRI) | MSI_DATA_VECTOR(cfg->vector); @@ -3711,11 +3711,11 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) HT_IRQ_LOW_BASE | HT_IRQ_LOW_DEST_ID(dest) | HT_IRQ_LOW_VECTOR(cfg->vector) | - ((IRQ_DEST_MODE == 0) ? + ((apic->irq_dest_mode == 0) ? HT_IRQ_LOW_DM_PHYSICAL : HT_IRQ_LOW_DM_LOGICAL) | HT_IRQ_LOW_RQEOI_EDGE | - ((IRQ_DELIVERY_MODE != dest_LowestPrio) ? + ((apic->irq_delivery_mode != dest_LowestPrio) ? HT_IRQ_LOW_MT_FIXED : HT_IRQ_LOW_MT_ARBITRATED) | HT_IRQ_LOW_IRQ_MASKED; @@ -3763,8 +3763,8 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); entry->vector = cfg->vector; - entry->delivery_mode = IRQ_DELIVERY_MODE; - entry->dest_mode = IRQ_DEST_MODE; + entry->delivery_mode = apic->irq_delivery_mode; + entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index c15c1aa2dc7..e8c1ceca7c9 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -64,8 +64,8 @@ struct genapic apic_bigsmp = { .acpi_madt_oem_check = NULL, .apic_id_registered = bigsmp_apic_id_registered, - .irq_delivery_mode = IRQ_DELIVERY_MODE, - .irq_dest_mode = IRQ_DEST_MODE, + .irq_delivery_mode = BIGSMP_IRQ_DELIVERY_MODE, + .irq_dest_mode = BIGSMP_IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index d32b175eff8..0482106f0e1 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -31,8 +31,8 @@ struct genapic apic_default = { .acpi_madt_oem_check = NULL, .apic_id_registered = default_apic_id_registered, - .irq_delivery_mode = IRQ_DELIVERY_MODE, - .irq_dest_mode = IRQ_DEST_MODE, + .irq_delivery_mode = DEFAULT_IRQ_DELIVERY_MODE, + .irq_dest_mode = DEFAULT_IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 06653892953..5d97408919b 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -107,8 +107,8 @@ struct genapic apic_es7000 = { .acpi_madt_oem_check = es7000_acpi_madt_oem_check, .apic_id_registered = es7000_apic_id_registered, - .irq_delivery_mode = IRQ_DELIVERY_MODE, - .irq_dest_mode = IRQ_DEST_MODE, + .irq_delivery_mode = ES7000_IRQ_DELIVERY_MODE, + .irq_dest_mode = ES7000_IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 401957142fd..77ac66935fd 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -51,8 +51,8 @@ struct genapic apic_numaq = { .acpi_madt_oem_check = NULL, .apic_id_registered = numaq_apic_id_registered, - .irq_delivery_mode = IRQ_DELIVERY_MODE, - .irq_dest_mode = IRQ_DEST_MODE, + .irq_delivery_mode = NUMAQ_IRQ_DELIVERY_MODE, + .irq_dest_mode = NUMAQ_IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 946da7aa762..7b3f43caf2a 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -44,8 +44,8 @@ struct genapic apic_summit = { .acpi_madt_oem_check = summit_acpi_madt_oem_check, .apic_id_registered = summit_apic_id_registered, - .irq_delivery_mode = IRQ_DELIVERY_MODE, - .irq_dest_mode = IRQ_DEST_MODE, + .irq_delivery_mode = SUMMIT_IRQ_DELIVERY_MODE, + .irq_dest_mode = SUMMIT_IRQ_DEST_MODE, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, -- cgit v1.2.3 From dcafa4a8c95ce063cbae0a5e61632bc3c4924e66 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:16:01 +0100 Subject: x86, apic: remove DEFAULT_IRQ_DELIVERY_MODE and DEFAULT_IRQ_DEST_MODE Impact: cleanup They were only used in a single place and obscured the apic_default template. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mach-default/mach_apic.h | 2 -- arch/x86/mach-generic/default.c | 5 +++-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index eafbf4f2038..f3b2cd42388 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -34,8 +34,6 @@ static inline const struct cpumask *target_cpus(void) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void setup_apic_routing(void); #else -#define DEFAULT_IRQ_DELIVERY_MODE dest_LowestPrio -#define DEFAULT_IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ #define TARGET_CPUS (target_cpus()) #define wakeup_secondary_cpu wakeup_secondary_cpu_via_init /* diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 0482106f0e1..fe97b0114a0 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -31,8 +31,9 @@ struct genapic apic_default = { .acpi_madt_oem_check = NULL, .apic_id_registered = default_apic_id_registered, - .irq_delivery_mode = DEFAULT_IRQ_DELIVERY_MODE, - .irq_dest_mode = DEFAULT_IRQ_DEST_MODE, + .irq_delivery_mode = dest_LowestPrio, + /* logical delivery broadcast to all CPUs: */ + .irq_dest_mode = 1, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, -- cgit v1.2.3 From 82daea6b0890f739be1ad4ab1c1b922b1555582e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:16:01 +0100 Subject: x86, apic: remove SUMMIT_IRQ_DELIVERY_MODE and SUMMIT_IRQ_DEST_MODE Impact: cleanup They were only used in a single place and obscured the apic_summit template. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/summit/apic.h | 3 --- arch/x86/mach-generic/summit.c | 5 +++-- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index ea2abe9b597..427d0889f6f 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -24,9 +24,6 @@ static inline const cpumask_t *target_cpus(void) return &cpumask_of_cpu(0); } -#define SUMMIT_IRQ_DELIVERY_MODE (dest_LowestPrio) -#define SUMMIT_IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ - static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 7b3f43caf2a..1b9164b92b0 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -44,8 +44,9 @@ struct genapic apic_summit = { .acpi_madt_oem_check = summit_acpi_madt_oem_check, .apic_id_registered = summit_apic_id_registered, - .irq_delivery_mode = SUMMIT_IRQ_DELIVERY_MODE, - .irq_dest_mode = SUMMIT_IRQ_DEST_MODE, + .irq_delivery_mode = dest_LowestPrio, + /* logical delivery broadcast to all CPUs: */ + .irq_dest_mode = 1, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, -- cgit v1.2.3 From 1b1bcb3ff4e4934d949574cec90679219ace5412 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:16:01 +0100 Subject: x86, apic: remove NUMAQ_IRQ_DELIVERY_MODE and NUMAQ_IRQ_DEST_MODE Impact: cleanup They were only used in a single place and obscured the apic_numaq template. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/numaq/apic.h | 3 --- arch/x86/mach-generic/numaq.c | 5 +++-- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 7746035c591..a9d846769a0 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -15,9 +15,6 @@ static inline const cpumask_t *target_cpus(void) #define NO_BALANCE_IRQ (1) #define esr_disable (1) -#define NUMAQ_IRQ_DELIVERY_MODE dest_LowestPrio -#define NUMAQ_IRQ_DEST_MODE 0 /* physical delivery on LOCAL quad */ - static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return physid_isset(apicid, bitmap); diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 77ac66935fd..6daddb6949d 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -51,8 +51,9 @@ struct genapic apic_numaq = { .acpi_madt_oem_check = NULL, .apic_id_registered = numaq_apic_id_registered, - .irq_delivery_mode = NUMAQ_IRQ_DELIVERY_MODE, - .irq_dest_mode = NUMAQ_IRQ_DEST_MODE, + .irq_delivery_mode = dest_LowestPrio, + /* physical delivery on LOCAL quad: */ + .irq_dest_mode = 0, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, -- cgit v1.2.3 From d8a3539e64f8e27b0ab5bb7e7ba3b8f34b739224 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:16:01 +0100 Subject: x86, apic: remove BIGSMP_IRQ_DELIVERY_MODE and BIGSMP_IRQ_DEST_MODE Impact: cleanup They were only used in a single place and obscured the apic_bigsmp driver template. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 -- arch/x86/mach-generic/bigsmp.c | 5 +++-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 293551b0e61..dca2d5b01da 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -21,8 +21,6 @@ static inline const cpumask_t *target_cpus(void) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0 #define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define BIGSMP_IRQ_DELIVERY_MODE (dest_Fixed) -#define BIGSMP_IRQ_DEST_MODE (0) /* phys delivery to target proc */ #define NO_BALANCE_IRQ (0) static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index e8c1ceca7c9..06be776067a 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -64,8 +64,9 @@ struct genapic apic_bigsmp = { .acpi_madt_oem_check = NULL, .apic_id_registered = bigsmp_apic_id_registered, - .irq_delivery_mode = BIGSMP_IRQ_DELIVERY_MODE, - .irq_dest_mode = BIGSMP_IRQ_DEST_MODE, + .irq_delivery_mode = dest_Fixed, + /* phys delivery to target CPU: */ + .irq_dest_mode = 0, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, -- cgit v1.2.3 From 38bd77a6c35168b03b65f7438cdcc1257d550924 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:16:01 +0100 Subject: x86, apic: remove ES7000_IRQ_DELIVERY_MODE and ES7000_IRQ_DEST_MODE Impact: cleanup They were only used in a single place and obscured the apic_es7000 driver template. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/apic.h | 2 -- arch/x86/mach-generic/es7000.c | 5 +++-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 690016683f2..342416b3fad 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -27,8 +27,6 @@ static inline const cpumask_t *target_cpus(void) #define NO_BALANCE_IRQ_CLUSTER (1) #define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define ES7000_IRQ_DELIVERY_MODE (dest_Fixed) -#define ES7000_IRQ_DEST_MODE (0) /* phys delivery to target procs */ #define NO_BALANCE_IRQ (0) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0x0 diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 5d97408919b..269a97aef43 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -107,8 +107,9 @@ struct genapic apic_es7000 = { .acpi_madt_oem_check = es7000_acpi_madt_oem_check, .apic_id_registered = es7000_apic_id_registered, - .irq_delivery_mode = ES7000_IRQ_DELIVERY_MODE, - .irq_dest_mode = ES7000_IRQ_DEST_MODE, + .irq_delivery_mode = dest_Fixed, + /* phys delivery to target CPUs: */ + .irq_dest_mode = 0, .target_cpus = target_cpus, .ESR_DISABLE = esr_disable, -- cgit v1.2.3 From 7fe732862d9697cc1863286fbcace9a67f231b4c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:24:16 +0100 Subject: x86, apic: remove IRQ_DEST_MODE Remove leftover definition. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mach-default/mach_apic.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index f3b2cd42388..ce3bc4845b9 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -22,7 +22,6 @@ static inline const struct cpumask *target_cpus(void) #ifdef CONFIG_X86_64 #include -#define IRQ_DEST_MODE (apic->irq_dest_mode) #define TARGET_CPUS (apic->target_cpus()) #define init_apic_ldr (apic->init_apic_ldr) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) -- cgit v1.2.3 From 0a9cc20b9c18372ba5a9fea990f5812f3ee01e32 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:30:40 +0100 Subject: x86, apic: clean up target_cpus methods Impact: cleanup Clean up all the target_cpus() namespace overlap that exists between bigsmp, es7000, mach-default, numaq and summit - by separating the different functions into different names. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 4 ++-- arch/x86/include/asm/mach-default/mach_apic.h | 4 ++-- arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 10 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index dca2d5b01da..d6aeca3c5a8 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -9,7 +9,7 @@ static inline int bigsmp_apic_id_registered(void) return 1; } -static inline const cpumask_t *target_cpus(void) +static inline const cpumask_t *bigsmp_target_cpus(void) { #ifdef CONFIG_SMP return &cpu_online_map; diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 342416b3fad..7e5c31a4f8d 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -16,7 +16,7 @@ static inline const cpumask_t *target_cpus_cluster(void) return &CPU_MASK_ALL; } -static inline const cpumask_t *target_cpus(void) +static inline const cpumask_t *es7000_target_cpus(void) { return &cpumask_of_cpu(smp_processor_id()); } @@ -83,7 +83,7 @@ static inline void setup_apic_routing(void) printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", - nr_ioapics, cpus_addr(*target_cpus())[0]); + nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); } static inline int multi_timer_check(int apic, int irq) diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index ce3bc4845b9..af1607ddd2a 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -8,7 +8,7 @@ #define APIC_DFR_VALUE (APIC_DFR_FLAT) -static inline const struct cpumask *target_cpus(void) +static inline const struct cpumask *default_target_cpus(void) { #ifdef CONFIG_SMP return cpu_online_mask; @@ -33,7 +33,7 @@ static inline const struct cpumask *target_cpus(void) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void setup_apic_routing(void); #else -#define TARGET_CPUS (target_cpus()) +#define TARGET_CPUS (default_target_cpus()) #define wakeup_secondary_cpu wakeup_secondary_cpu_via_init /* * Set up the logical destination ID. diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index a9d846769a0..1111ff9e41d 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -7,7 +7,7 @@ #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -static inline const cpumask_t *target_cpus(void) +static inline const cpumask_t *numaq_target_cpus(void) { return &CPU_MASK_ALL; } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 427d0889f6f..7c1f9151429 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -15,7 +15,7 @@ #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -static inline const cpumask_t *target_cpus(void) +static inline const cpumask_t *summit_target_cpus(void) { /* CPU_MASK_ALL (0xff) has undefined behaviour with * dest_LowestPrio mode logical clustered apic interrupt routing diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 06be776067a..d3cead2d2fc 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -68,7 +68,7 @@ struct genapic apic_bigsmp = { /* phys delivery to target CPU: */ .irq_dest_mode = 0, - .target_cpus = target_cpus, + .target_cpus = bigsmp_target_cpus, .ESR_DISABLE = esr_disable, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index fe97b0114a0..a483e22273e 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -35,7 +35,7 @@ struct genapic apic_default = { /* logical delivery broadcast to all CPUs: */ .irq_dest_mode = 1, - .target_cpus = target_cpus, + .target_cpus = default_target_cpus, .ESR_DISABLE = esr_disable, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 269a97aef43..e31f0c35470 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -111,7 +111,7 @@ struct genapic apic_es7000 = { /* phys delivery to target CPUs: */ .irq_dest_mode = 0, - .target_cpus = target_cpus, + .target_cpus = es7000_target_cpus, .ESR_DISABLE = esr_disable, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 6daddb6949d..4b84b5970fb 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -55,7 +55,7 @@ struct genapic apic_numaq = { /* physical delivery on LOCAL quad: */ .irq_dest_mode = 0, - .target_cpus = target_cpus, + .target_cpus = numaq_target_cpus, .ESR_DISABLE = esr_disable, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 1b9164b92b0..e6b956a0848 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -48,7 +48,7 @@ struct genapic apic_summit = { /* logical delivery broadcast to all CPUs: */ .irq_dest_mode = 1, - .target_cpus = target_cpus, + .target_cpus = summit_target_cpus, .ESR_DISABLE = esr_disable, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, -- cgit v1.2.3 From fe402e1f2b67a63f1e53ab2a316fc20f7ca4ec91 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 04:32:51 +0100 Subject: x86, apic: clean up / remove TARGET_CPUS Impact: cleanup use apic->target_cpus() directly instead of the TARGET_CPUS wrapper. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/apic.h | 4 ++-- arch/x86/include/asm/mach-default/mach_apic.h | 2 -- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/io_apic.c | 22 +++++++++++----------- 5 files changed, 14 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 7e5c31a4f8d..53adda099c9 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -161,7 +161,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) return 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. + * on the same apicid cluster return default value of target_cpus(): */ cpu = cpumask_first(cpumask); apicid = cpu_to_logical_apicid(cpu); @@ -194,7 +194,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return cpu_to_logical_apicid(0); /* * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. + * on the same apicid cluster return default value of target_cpus(): */ cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index af1607ddd2a..77a97247587 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -22,7 +22,6 @@ static inline const struct cpumask *default_target_cpus(void) #ifdef CONFIG_X86_64 #include -#define TARGET_CPUS (apic->target_cpus()) #define init_apic_ldr (apic->init_apic_ldr) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) @@ -33,7 +32,6 @@ static inline const struct cpumask *default_target_cpus(void) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void setup_apic_routing(void); #else -#define TARGET_CPUS (default_target_cpus()) #define wakeup_secondary_cpu wakeup_secondary_cpu_via_init /* * Set up the logical destination ID. diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 387a5d00c43..da2d7780cb5 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -7,7 +7,6 @@ #define NO_BALANCE_IRQ (apic->no_balance_irq) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL (apic->apic_destination_logical) -#define TARGET_CPUS (apic->target_cpus()) #define init_apic_ldr (apic->init_apic_ldr) #define ioapic_phys_id_map (apic->ioapic_phys_id_map) #define setup_apic_routing (apic->setup_apic_routing) diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 7c1f9151429..cf5036f1ce6 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -148,7 +148,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return (int) 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. + * on the same apicid cluster return default value of target_cpus(): */ cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 301b6571d70..7503285e180 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1559,10 +1559,10 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, TARGET_CPUS)) + if (assign_irq_vector(irq, cfg, apic->target_cpus())) return; - dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); + dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " @@ -1661,7 +1661,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, */ entry.dest_mode = apic->irq_dest_mode; entry.mask = 1; /* mask IRQ now */ - entry.dest = cpu_mask_to_apicid(TARGET_CPUS); + entry.dest = cpu_mask_to_apicid(apic->target_cpus()); entry.delivery_mode = apic->irq_delivery_mode; entry.polarity = 0; entry.trigger = 0; @@ -2877,7 +2877,7 @@ static inline void __init check_timer(void) * get/set the timer IRQ vector: */ disable_8259A_irq(0); - assign_irq_vector(0, cfg, TARGET_CPUS); + assign_irq_vector(0, cfg, apic->target_cpus()); /* * As IRQ0 is to be enabled in the 8259A, the virtual @@ -3195,7 +3195,7 @@ unsigned int create_irq_nr(unsigned int irq_want) if (cfg_new->vector != 0) continue; - if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0) + if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) irq = new; break; } @@ -3261,11 +3261,11 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms return -ENXIO; cfg = irq_cfg(irq); - err = assign_irq_vector(irq, cfg, TARGET_CPUS); + err = assign_irq_vector(irq, cfg, apic->target_cpus()); if (err) return err; - dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); + dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); #ifdef CONFIG_INTR_REMAP if (irq_remapped(irq)) { @@ -3698,12 +3698,12 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) return -ENXIO; cfg = irq_cfg(irq); - err = assign_irq_vector(irq, cfg, TARGET_CPUS); + err = assign_irq_vector(irq, cfg, apic->target_cpus()); if (!err) { struct ht_irq_msg msg; unsigned dest; - dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); + dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); @@ -3987,7 +3987,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) /* * This function currently is only a helper for the i386 smp boot process where * we need to reprogram the ioredtbls to cater for the cpus which have come online - * so mask in all cases should simply be TARGET_CPUS + * so mask in all cases should simply be apic->target_cpus() */ #ifdef CONFIG_SMP void __init setup_ioapic_dest(void) @@ -4028,7 +4028,7 @@ void __init setup_ioapic_dest(void) (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) mask = desc->affinity; else - mask = TARGET_CPUS; + mask = apic->target_cpus(); #ifdef CONFIG_INTR_REMAP if (intr_remapping_enabled) -- cgit v1.2.3 From f6f52baf2613dd319e9ba3f3319bf1f1c442e4b3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 05:01:41 +0100 Subject: x86: clean up esr_disable() methods Impact: cleanup Most subarchitectures want to disable the APIC ESR (Error Status Register), because they generally have hardware hacks that wrap standard CPUs into a bigger system and hence the APIC bus is quite non-standard and weirdnesses (lockups) have been seen with ESR reporting. Remove the esr_disable macros and put the desired flag into each subarchitecture's genapic template directly. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 1 - arch/x86/include/asm/es7000/apic.h | 1 - arch/x86/include/asm/mach-default/mach_apic.h | 1 - arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 1 - arch/x86/include/asm/summit/apic.h | 1 - arch/x86/kernel/apic.c | 4 ++-- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 12 files changed, 7 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index d6aeca3c5a8..b550cb11102 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -2,7 +2,6 @@ #define __ASM_MACH_APIC_H #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) -#define esr_disable (1) static inline int bigsmp_apic_id_registered(void) { diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 53adda099c9..aa11c768bed 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -4,7 +4,6 @@ #include #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) -#define esr_disable (1) static inline int es7000_apic_id_registered(void) { diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 77a97247587..5f8d17fdc96 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -18,7 +18,6 @@ static inline const struct cpumask *default_target_cpus(void) } #define NO_BALANCE_IRQ (0) -#define esr_disable (0) #ifdef CONFIG_X86_64 #include diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index da2d7780cb5..63fe985219f 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define esr_disable (apic->ESR_DISABLE) #define NO_BALANCE_IRQ (apic->no_balance_irq) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL (apic->apic_destination_logical) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 1111ff9e41d..8ecb3b45c6c 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -13,7 +13,6 @@ static inline const cpumask_t *numaq_target_cpus(void) } #define NO_BALANCE_IRQ (1) -#define esr_disable (1) static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index cf5036f1ce6..84679e687ad 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -4,7 +4,6 @@ #include #include -#define esr_disable (1) #define NO_BALANCE_IRQ (0) /* In clustered mode, the high nibble of APIC ID is a cluster number. diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index b6740de18fb..69d8c30d571 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1107,7 +1107,7 @@ static void __cpuinit lapic_setup_esr(void) return; } - if (esr_disable) { + if (apic->ESR_DISABLE) { /* * Something untraceable is creating bad interrupts on * secondary quads ... for the moment, just leave the @@ -1157,7 +1157,7 @@ void __cpuinit setup_local_APIC(void) #ifdef CONFIG_X86_32 /* Pound the ESR really hard over the head with a big hammer - mbligh */ - if (lapic_is_integrated() && esr_disable) { + if (lapic_is_integrated() && apic->ESR_DISABLE) { apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index d3cead2d2fc..f0bb72674f7 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -69,7 +69,7 @@ struct genapic apic_bigsmp = { .irq_dest_mode = 0, .target_cpus = bigsmp_target_cpus, - .ESR_DISABLE = esr_disable, + .ESR_DISABLE = 1, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index a483e22273e..c30141a9aca 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -36,7 +36,7 @@ struct genapic apic_default = { .irq_dest_mode = 1, .target_cpus = default_target_cpus, - .ESR_DISABLE = esr_disable, + .ESR_DISABLE = 0, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index e31f0c35470..e8aa8fd4f49 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -112,7 +112,7 @@ struct genapic apic_es7000 = { .irq_dest_mode = 0, .target_cpus = es7000_target_cpus, - .ESR_DISABLE = esr_disable, + .ESR_DISABLE = 1, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 4b84b5970fb..860edc8bd90 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -56,7 +56,7 @@ struct genapic apic_numaq = { .irq_dest_mode = 0, .target_cpus = numaq_target_cpus, - .ESR_DISABLE = esr_disable, + .ESR_DISABLE = 1, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index e6b956a0848..cd5ef115a4e 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -49,7 +49,7 @@ struct genapic apic_summit = { .irq_dest_mode = 1, .target_cpus = summit_target_cpus, - .ESR_DISABLE = esr_disable, + .ESR_DISABLE = 1, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, -- cgit v1.2.3 From 08125d3edab90644724652eedec3e219e3e0f2e7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 05:08:44 +0100 Subject: x86: rename ->ESR_DISABLE to ->disable_esr the ->ESR_DISABLE shouting variant was used to enable the esr_disable macro wrappers. Those ugly macros are removed now so we can rename ->ESR_DISABLE to ->disable_esr Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 2 +- arch/x86/kernel/apic.c | 4 ++-- arch/x86/kernel/genapic_flat_64.c | 4 ++-- arch/x86/kernel/genx2apic_cluster.c | 2 +- arch/x86/kernel/genx2apic_phys.c | 2 +- arch/x86/kernel/genx2apic_uv_x.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index e998e3df5d2..7aee4a4c524 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -28,7 +28,7 @@ struct genapic { const struct cpumask *(*target_cpus)(void); - int ESR_DISABLE; + int disable_esr; int apic_destination_logical; unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 69d8c30d571..3853ed76c88 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1107,7 +1107,7 @@ static void __cpuinit lapic_setup_esr(void) return; } - if (apic->ESR_DISABLE) { + if (apic->disable_esr) { /* * Something untraceable is creating bad interrupts on * secondary quads ... for the moment, just leave the @@ -1157,7 +1157,7 @@ void __cpuinit setup_local_APIC(void) #ifdef CONFIG_X86_32 /* Pound the ESR really hard over the head with a big hammer - mbligh */ - if (lapic_is_integrated() && apic->ESR_DISABLE) { + if (lapic_is_integrated() && apic->disable_esr) { apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 0a263d6bb5e..d437a60cc58 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -184,7 +184,7 @@ struct genapic apic_flat = { .irq_dest_mode = (APIC_DEST_LOGICAL != 0), .target_cpus = flat_target_cpus, - .ESR_DISABLE = 0, + .disable_esr = 0, .apic_destination_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, @@ -330,7 +330,7 @@ struct genapic apic_physflat = { .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), .target_cpus = physflat_target_cpus, - .ESR_DISABLE = 0, + .disable_esr = 0, .apic_destination_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index e9ff7dc9a0f..c1cffae4a4c 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -186,7 +186,7 @@ struct genapic apic_x2apic_cluster = { .irq_dest_mode = (APIC_DEST_LOGICAL != 0), .target_cpus = x2apic_target_cpus, - .ESR_DISABLE = 0, + .disable_esr = 0, .apic_destination_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 8141b5a88f6..c59602be035 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -182,7 +182,7 @@ struct genapic apic_x2apic_phys = { .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), .target_cpus = x2apic_target_cpus, - .ESR_DISABLE = 0, + .disable_esr = 0, .apic_destination_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 6a73cad0d3e..525b4e480a7 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -247,7 +247,7 @@ struct genapic apic_x2apic_uv_x = { .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), .target_cpus = uv_target_cpus, - .ESR_DISABLE = 0, + .disable_esr = 0, .apic_destination_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index f0bb72674f7..fe9bf252c06 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -69,7 +69,7 @@ struct genapic apic_bigsmp = { .irq_dest_mode = 0, .target_cpus = bigsmp_target_cpus, - .ESR_DISABLE = 1, + .disable_esr = 1, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index c30141a9aca..d3fe8017e94 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -36,7 +36,7 @@ struct genapic apic_default = { .irq_dest_mode = 1, .target_cpus = default_target_cpus, - .ESR_DISABLE = 0, + .disable_esr = 0, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index e8aa8fd4f49..b4f8abfb714 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -112,7 +112,7 @@ struct genapic apic_es7000 = { .irq_dest_mode = 0, .target_cpus = es7000_target_cpus, - .ESR_DISABLE = 1, + .disable_esr = 1, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 860edc8bd90..f3b7840d227 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -56,7 +56,7 @@ struct genapic apic_numaq = { .irq_dest_mode = 0, .target_cpus = numaq_target_cpus, - .ESR_DISABLE = 1, + .disable_esr = 1, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index cd5ef115a4e..95e075b61bd 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -49,7 +49,7 @@ struct genapic apic_summit = { .irq_dest_mode = 1, .target_cpus = summit_target_cpus, - .ESR_DISABLE = 1, + .disable_esr = 1, .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, -- cgit v1.2.3 From 0b06e734bff7554c31eac4aad2fc9be4adb7c1c1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 05:13:04 +0100 Subject: x86: clean up the APIC_DEST_LOGICAL logic Impact: cleanup The bigsmp and es7000 subarchitectures un-defined APIC_DEST_LOGICAL in a rather nasty way by re-defining it to zero. That is infinitely fragile and makes it very hard to see what to code really does in a given context. The very same constant has different meanings and values - depending on which subarch is enabled. Untangle this mess by never undefining the constant, but instead propagating the right values into the genapic driver templates. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 -- arch/x86/include/asm/es7000/apic.h | 2 -- arch/x86/include/asm/mach-generic/mach_apic.h | 2 -- arch/x86/kernel/genapic_flat_64.c | 12 ++++++------ arch/x86/kernel/genx2apic_cluster.c | 10 +++++----- arch/x86/kernel/genx2apic_phys.c | 2 +- arch/x86/kernel/genx2apic_uv_x.c | 4 ++-- arch/x86/kernel/io_apic.c | 2 +- arch/x86/kernel/ipi.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- 12 files changed, 19 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index b550cb11102..7e6e33a6db0 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -17,8 +17,6 @@ static inline const cpumask_t *bigsmp_target_cpus(void) #endif } -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL 0 #define APIC_DFR_VALUE (APIC_DFR_FLAT) #define NO_BALANCE_IRQ (0) diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index aa11c768bed..0d770fce4b2 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -27,8 +27,6 @@ static inline const cpumask_t *es7000_target_cpus(void) #define APIC_DFR_VALUE (APIC_DFR_FLAT) #define NO_BALANCE_IRQ (0) -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL 0x0 static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 63fe985219f..00d5fe6e676 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -4,8 +4,6 @@ #include #define NO_BALANCE_IRQ (apic->no_balance_irq) -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL (apic->apic_destination_logical) #define init_apic_ldr (apic->init_apic_ldr) #define ioapic_phys_id_map (apic->ioapic_phys_id_map) #define setup_apic_routing (apic->setup_apic_routing) diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index d437a60cc58..fd242c6b3ba 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -74,7 +74,7 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector) unsigned long flags; local_irq_save(flags); - __send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL); + __send_IPI_dest_field(mask, vector, apic->apic_destination_logical); local_irq_restore(flags); } @@ -114,7 +114,7 @@ static void flat_send_IPI_allbutself(int vector) _flat_send_IPI_mask(mask, vector); } } else if (num_online_cpus() > 1) { - __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); + __send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->apic_destination_logical); } } @@ -123,7 +123,7 @@ static void flat_send_IPI_all(int vector) if (vector == NMI_VECTOR) flat_send_IPI_mask(cpu_online_mask, vector); else - __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); + __send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->apic_destination_logical); } static unsigned int get_apic_id(unsigned long x) @@ -181,11 +181,11 @@ struct genapic apic_flat = { .apic_id_registered = flat_apic_id_registered, .irq_delivery_mode = dest_LowestPrio, - .irq_dest_mode = (APIC_DEST_LOGICAL != 0), + .irq_dest_mode = 1, /* logical */ .target_cpus = flat_target_cpus, .disable_esr = 0, - .apic_destination_logical = 0, + .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, @@ -327,7 +327,7 @@ struct genapic apic_physflat = { .apic_id_registered = flat_apic_id_registered, .irq_delivery_mode = dest_Fixed, - .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), + .irq_dest_mode = 0, /* physical */ .target_cpus = physflat_target_cpus, .disable_esr = 0, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index c1cffae4a4c..a76e75ecc20 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -64,7 +64,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) for_each_cpu(query_cpu, mask) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, APIC_DEST_LOGICAL); + vector, apic->apic_destination_logical); local_irq_restore(flags); } @@ -80,7 +80,7 @@ static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, if (query_cpu != this_cpu) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, APIC_DEST_LOGICAL); + vector, apic->apic_destination_logical); local_irq_restore(flags); } @@ -95,7 +95,7 @@ static void x2apic_send_IPI_allbutself(int vector) if (query_cpu != this_cpu) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, APIC_DEST_LOGICAL); + vector, apic->apic_destination_logical); local_irq_restore(flags); } @@ -183,11 +183,11 @@ struct genapic apic_x2apic_cluster = { .apic_id_registered = x2apic_apic_id_registered, .irq_delivery_mode = dest_LowestPrio, - .irq_dest_mode = (APIC_DEST_LOGICAL != 0), + .irq_dest_mode = 1, /* logical */ .target_cpus = x2apic_target_cpus, .disable_esr = 0, - .apic_destination_logical = 0, + .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index c59602be035..9b6d68deb14 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -179,7 +179,7 @@ struct genapic apic_x2apic_phys = { .apic_id_registered = x2apic_apic_id_registered, .irq_delivery_mode = dest_Fixed, - .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), + .irq_dest_mode = 0, /* physical */ .target_cpus = x2apic_target_cpus, .disable_esr = 0, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 525b4e480a7..0a756800c11 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -244,11 +244,11 @@ struct genapic apic_x2apic_uv_x = { .apic_id_registered = uv_apic_id_registered, .irq_delivery_mode = dest_Fixed, - .irq_dest_mode = (APIC_DEST_PHYSICAL != 0), + .irq_dest_mode = 1, /* logical */ .target_cpus = uv_target_cpus, .disable_esr = 0, - .apic_destination_logical = 0, + .apic_destination_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 7503285e180..17526d7a8ab 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -806,7 +806,7 @@ void send_IPI_self(int vector) * Wait for idle. */ apic_wait_icr_idle(); - cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL; + cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | apic->apic_destination_logical; /* * Send the IPI. The write to APIC_ICR fires this off. */ diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 285bbf8831f..400b7bd48f6 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -30,7 +30,7 @@ static inline int __prepare_ICR(unsigned int shortcut, int vector) { - unsigned int icr = shortcut | APIC_DEST_LOGICAL; + unsigned int icr = shortcut | apic->apic_destination_logical; switch (vector) { default: diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f9dbcff4354..f0a173718d9 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -583,7 +583,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) /* Target chip */ /* Boot on the stack */ /* Kick the second */ - apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid); + apic_icr_write(APIC_DM_NMI | apic->apic_destination_logical, logical_apicid); pr_debug("Waiting for send to finish...\n"); send_status = safe_apic_wait_icr_idle(); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index fe9bf252c06..13ee7dc9b95 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -70,7 +70,7 @@ struct genapic apic_bigsmp = { .target_cpus = bigsmp_target_cpus, .disable_esr = 1, - .apic_destination_logical = APIC_DEST_LOGICAL, + .apic_destination_logical = 0, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index b4f8abfb714..61b5da213ce 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -113,7 +113,7 @@ struct genapic apic_es7000 = { .target_cpus = es7000_target_cpus, .disable_esr = 1, - .apic_destination_logical = APIC_DEST_LOGICAL, + .apic_destination_logical = 0, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, -- cgit v1.2.3 From bdb1a9b62fc182d4da3143e346f7a0925d243352 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 05:29:25 +0100 Subject: x86, apic: rename genapic::apic_destination_logical to genapic::dest_logical This field name was unreasonably long - shorten it. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 2 +- arch/x86/kernel/genapic_flat_64.c | 10 +++++----- arch/x86/kernel/genx2apic_cluster.c | 8 ++++---- arch/x86/kernel/genx2apic_phys.c | 2 +- arch/x86/kernel/genx2apic_uv_x.c | 2 +- arch/x86/kernel/io_apic.c | 2 +- arch/x86/kernel/ipi.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 13 files changed, 20 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 7aee4a4c524..f9d1ec018fd 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -30,7 +30,7 @@ struct genapic { int disable_esr; - int apic_destination_logical; + int dest_logical; unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); unsigned long (*check_apicid_present)(int apicid); diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index fd242c6b3ba..d22cbdaee20 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -74,7 +74,7 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector) unsigned long flags; local_irq_save(flags); - __send_IPI_dest_field(mask, vector, apic->apic_destination_logical); + __send_IPI_dest_field(mask, vector, apic->dest_logical); local_irq_restore(flags); } @@ -114,7 +114,7 @@ static void flat_send_IPI_allbutself(int vector) _flat_send_IPI_mask(mask, vector); } } else if (num_online_cpus() > 1) { - __send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->apic_destination_logical); + __send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical); } } @@ -123,7 +123,7 @@ static void flat_send_IPI_all(int vector) if (vector == NMI_VECTOR) flat_send_IPI_mask(cpu_online_mask, vector); else - __send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->apic_destination_logical); + __send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical); } static unsigned int get_apic_id(unsigned long x) @@ -185,7 +185,7 @@ struct genapic apic_flat = { .target_cpus = flat_target_cpus, .disable_esr = 0, - .apic_destination_logical = APIC_DEST_LOGICAL, + .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, @@ -331,7 +331,7 @@ struct genapic apic_physflat = { .target_cpus = physflat_target_cpus, .disable_esr = 0, - .apic_destination_logical = 0, + .dest_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index a76e75ecc20..b91a48eae52 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -64,7 +64,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) for_each_cpu(query_cpu, mask) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, apic->apic_destination_logical); + vector, apic->dest_logical); local_irq_restore(flags); } @@ -80,7 +80,7 @@ static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, if (query_cpu != this_cpu) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, apic->apic_destination_logical); + vector, apic->dest_logical); local_irq_restore(flags); } @@ -95,7 +95,7 @@ static void x2apic_send_IPI_allbutself(int vector) if (query_cpu != this_cpu) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, apic->apic_destination_logical); + vector, apic->dest_logical); local_irq_restore(flags); } @@ -187,7 +187,7 @@ struct genapic apic_x2apic_cluster = { .target_cpus = x2apic_target_cpus, .disable_esr = 0, - .apic_destination_logical = APIC_DEST_LOGICAL, + .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 9b6d68deb14..f070e86af0f 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -183,7 +183,7 @@ struct genapic apic_x2apic_phys = { .target_cpus = x2apic_target_cpus, .disable_esr = 0, - .apic_destination_logical = 0, + .dest_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 0a756800c11..c8a89158679 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -248,7 +248,7 @@ struct genapic apic_x2apic_uv_x = { .target_cpus = uv_target_cpus, .disable_esr = 0, - .apic_destination_logical = APIC_DEST_LOGICAL, + .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 17526d7a8ab..7f8b32b2089 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -806,7 +806,7 @@ void send_IPI_self(int vector) * Wait for idle. */ apic_wait_icr_idle(); - cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | apic->apic_destination_logical; + cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | apic->dest_logical; /* * Send the IPI. The write to APIC_ICR fires this off. */ diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 400b7bd48f6..e2e4895ca69 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -30,7 +30,7 @@ static inline int __prepare_ICR(unsigned int shortcut, int vector) { - unsigned int icr = shortcut | apic->apic_destination_logical; + unsigned int icr = shortcut | apic->dest_logical; switch (vector) { default: diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f0a173718d9..45c096f605f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -583,7 +583,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) /* Target chip */ /* Boot on the stack */ /* Kick the second */ - apic_icr_write(APIC_DM_NMI | apic->apic_destination_logical, logical_apicid); + apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid); pr_debug("Waiting for send to finish...\n"); send_status = safe_apic_wait_icr_idle(); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 13ee7dc9b95..7c52840f205 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -70,7 +70,7 @@ struct genapic apic_bigsmp = { .target_cpus = bigsmp_target_cpus, .disable_esr = 1, - .apic_destination_logical = 0, + .dest_logical = 0, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index d3fe8017e94..53fa1ad8318 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -37,7 +37,7 @@ struct genapic apic_default = { .target_cpus = default_target_cpus, .disable_esr = 0, - .apic_destination_logical = APIC_DEST_LOGICAL, + .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 61b5da213ce..50fed0225cd 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -113,7 +113,7 @@ struct genapic apic_es7000 = { .target_cpus = es7000_target_cpus, .disable_esr = 1, - .apic_destination_logical = 0, + .dest_logical = 0, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index f3b7840d227..1fb1b1a4aa0 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -57,7 +57,7 @@ struct genapic apic_numaq = { .target_cpus = numaq_target_cpus, .disable_esr = 1, - .apic_destination_logical = APIC_DEST_LOGICAL, + .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 95e075b61bd..5c27d4d824e 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -50,7 +50,7 @@ struct genapic apic_summit = { .target_cpus = summit_target_cpus, .disable_esr = 1, - .apic_destination_logical = APIC_DEST_LOGICAL, + .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = check_apicid_used, .check_apicid_present = check_apicid_present, -- cgit v1.2.3 From d1d7cae8fd54a301a0de531b48451649933ffdcf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 05:41:42 +0100 Subject: x86, apic: clean up check_apicid*() callbacks Clean up these methods - to make it clearer which function is used in which case. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 9 +++++---- arch/x86/include/asm/es7000/apic.h | 5 +++-- arch/x86/include/asm/mach-default/mach_apic.h | 4 ++-- arch/x86/include/asm/mach-generic/mach_apic.h | 2 -- arch/x86/include/asm/numaq/apic.h | 5 +++-- arch/x86/include/asm/summit/apic.h | 5 +++-- arch/x86/kernel/io_apic.c | 6 +++--- arch/x86/mach-generic/bigsmp.c | 4 ++-- arch/x86/mach-generic/default.c | 4 ++-- arch/x86/mach-generic/es7000.c | 4 ++-- arch/x86/mach-generic/numaq.c | 4 ++-- arch/x86/mach-generic/summit.c | 4 ++-- 12 files changed, 29 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 7e6e33a6db0..bd52d4d86f0 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -20,14 +20,15 @@ static inline const cpumask_t *bigsmp_target_cpus(void) #define APIC_DFR_VALUE (APIC_DFR_FLAT) #define NO_BALANCE_IRQ (0) -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +static inline unsigned long +bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) { - return (0); + return 0; } -static inline unsigned long check_apicid_present(int bit) +static inline unsigned long bigsmp_check_apicid_present(int bit) { - return (1); + return 1; } static inline unsigned long calculate_ldr(int cpu) diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 0d770fce4b2..cd888daa193 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -28,11 +28,12 @@ static inline const cpumask_t *es7000_target_cpus(void) #define APIC_DFR_VALUE (APIC_DFR_FLAT) #define NO_BALANCE_IRQ (0) -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +static inline unsigned long +es7000_check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } -static inline unsigned long check_apicid_present(int bit) +static inline unsigned long es7000_check_apicid_present(int bit) { return physid_isset(bit, phys_cpu_present_map); } diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 5f8d17fdc96..064bc11a991 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -105,12 +105,12 @@ static inline void vector_allocation_domain(int cpu, struct cpumask *retmask) } #endif -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) { return physid_isset(apicid, bitmap); } -static inline unsigned long check_apicid_present(int bit) +static inline unsigned long default_check_apicid_present(int bit) { return physid_isset(bit, phys_cpu_present_map); } diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 00d5fe6e676..e035f88dfcd 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -13,9 +13,7 @@ #define cpu_present_to_apicid (apic->cpu_present_to_apicid) #define apicid_to_cpu_present (apic->apicid_to_cpu_present) #define setup_portio_remap (apic->setup_portio_remap) -#define check_apicid_present (apic->check_apicid_present) #define check_phys_apicid_present (apic->check_phys_apicid_present) -#define check_apicid_used (apic->check_apicid_used) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) #define vector_allocation_domain (apic->vector_allocation_domain) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 8ecb3b45c6c..571fdaeafaa 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -14,11 +14,12 @@ static inline const cpumask_t *numaq_target_cpus(void) #define NO_BALANCE_IRQ (1) -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +static inline unsigned long +numaq_check_apicid_used(physid_mask_t bitmap, int apicid) { return physid_isset(apicid, bitmap); } -static inline unsigned long check_apicid_present(int bit) +static inline unsigned long numaq_check_apicid_present(int bit) { return physid_isset(bit, phys_cpu_present_map); } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 84679e687ad..482038b244b 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -23,13 +23,14 @@ static inline const cpumask_t *summit_target_cpus(void) return &cpumask_of_cpu(0); } -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +static inline unsigned long +summit_check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } /* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long check_apicid_present(int bit) +static inline unsigned long summit_check_apicid_present(int bit) { return 1; } diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 7f8b32b2089..733ecf17272 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -2135,7 +2135,7 @@ static void __init setup_ioapic_ids_from_mpc(void) * system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ - if (check_apicid_used(phys_id_present_map, + if (apic->check_apicid_used(phys_id_present_map, mp_ioapics[apic_id].apicid)) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", apic_id, mp_ioapics[apic_id].apicid); @@ -3878,10 +3878,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) * Every APIC in a system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ - if (check_apicid_used(apic_id_map, apic_id)) { + if (apic->check_apicid_used(apic_id_map, apic_id)) { for (i = 0; i < get_physical_broadcast(); i++) { - if (!check_apicid_used(apic_id_map, i)) + if (!apic->check_apicid_used(apic_id_map, i)) break; } diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 7c52840f205..aa8443f6c0f 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -71,8 +71,8 @@ struct genapic apic_bigsmp = { .target_cpus = bigsmp_target_cpus, .disable_esr = 1, .dest_logical = 0, - .check_apicid_used = check_apicid_used, - .check_apicid_present = check_apicid_present, + .check_apicid_used = bigsmp_check_apicid_used, + .check_apicid_present = bigsmp_check_apicid_present, .no_balance_irq = NO_BALANCE_IRQ, .no_ioapic_check = 0, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 53fa1ad8318..47f6b5b06ba 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -38,8 +38,8 @@ struct genapic apic_default = { .target_cpus = default_target_cpus, .disable_esr = 0, .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = check_apicid_used, - .check_apicid_present = check_apicid_present, + .check_apicid_used = default_check_apicid_used, + .check_apicid_present = default_check_apicid_present, .no_balance_irq = NO_BALANCE_IRQ, .no_ioapic_check = 0, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 50fed0225cd..5633f3296e1 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -114,8 +114,8 @@ struct genapic apic_es7000 = { .target_cpus = es7000_target_cpus, .disable_esr = 1, .dest_logical = 0, - .check_apicid_used = check_apicid_used, - .check_apicid_present = check_apicid_present, + .check_apicid_used = es7000_check_apicid_used, + .check_apicid_present = es7000_check_apicid_present, .no_balance_irq = NO_BALANCE_IRQ, .no_ioapic_check = 0, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 1fb1b1a4aa0..d85206d8e4a 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -58,8 +58,8 @@ struct genapic apic_numaq = { .target_cpus = numaq_target_cpus, .disable_esr = 1, .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = check_apicid_used, - .check_apicid_present = check_apicid_present, + .check_apicid_used = numaq_check_apicid_used, + .check_apicid_present = numaq_check_apicid_present, .no_balance_irq = NO_BALANCE_IRQ, .no_ioapic_check = 0, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 5c27d4d824e..f54cf73d3ed 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -51,8 +51,8 @@ struct genapic apic_summit = { .target_cpus = summit_target_cpus, .disable_esr = 1, .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = check_apicid_used, - .check_apicid_present = check_apicid_present, + .check_apicid_used = summit_check_apicid_used, + .check_apicid_present = summit_check_apicid_present, .no_balance_irq = NO_BALANCE_IRQ, .no_ioapic_check = 0, -- cgit v1.2.3 From 2e867b17cc02e1799f18126af0ddd7b63dd8f6f4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 05:57:56 +0100 Subject: x86, apic: remove no_balance_irq and no_ioapic_check flags These flags are completely unused. (the in-kernel IRQ balancer has been removed from the upstream kernel.) Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 1 - arch/x86/include/asm/es7000/apic.h | 2 -- arch/x86/include/asm/genapic.h | 3 --- arch/x86/include/asm/mach-default/mach_apic.h | 2 -- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 -- arch/x86/include/asm/summit/apic.h | 2 -- arch/x86/kernel/genapic_flat_64.c | 6 ------ arch/x86/kernel/genx2apic_cluster.c | 3 --- arch/x86/kernel/genx2apic_phys.c | 3 --- arch/x86/kernel/genx2apic_uv_x.c | 3 --- arch/x86/mach-generic/bigsmp.c | 3 --- arch/x86/mach-generic/default.c | 3 --- arch/x86/mach-generic/es7000.c | 4 ---- arch/x86/mach-generic/numaq.c | 3 --- arch/x86/mach-generic/summit.c | 3 --- 16 files changed, 44 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index bd52d4d86f0..916451252b3 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -18,7 +18,6 @@ static inline const cpumask_t *bigsmp_target_cpus(void) } #define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define NO_BALANCE_IRQ (0) static inline unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index cd888daa193..847008a7702 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -23,10 +23,8 @@ static inline const cpumask_t *es7000_target_cpus(void) #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ -#define NO_BALANCE_IRQ_CLUSTER (1) #define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define NO_BALANCE_IRQ (0) static inline unsigned long es7000_check_apicid_used(physid_mask_t bitmap, int apicid) diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index f9d1ec018fd..661898c2229 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -34,9 +34,6 @@ struct genapic { unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); unsigned long (*check_apicid_present)(int apicid); - int no_balance_irq; - int no_ioapic_check; - void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); void (*init_apic_ldr)(void); diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 064bc11a991..8adccf8ee47 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -17,8 +17,6 @@ static inline const struct cpumask *default_target_cpus(void) #endif } -#define NO_BALANCE_IRQ (0) - #ifdef CONFIG_X86_64 #include #define init_apic_ldr (apic->init_apic_ldr) diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index e035f88dfcd..4cb9e2b99e3 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define NO_BALANCE_IRQ (apic->no_balance_irq) #define init_apic_ldr (apic->init_apic_ldr) #define ioapic_phys_id_map (apic->ioapic_phys_id_map) #define setup_apic_routing (apic->setup_apic_routing) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 571fdaeafaa..defee3496ad 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -12,8 +12,6 @@ static inline const cpumask_t *numaq_target_cpus(void) return &CPU_MASK_ALL; } -#define NO_BALANCE_IRQ (1) - static inline unsigned long numaq_check_apicid_used(physid_mask_t bitmap, int apicid) { diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 482038b244b..51df002ecf4 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -4,8 +4,6 @@ #include #include -#define NO_BALANCE_IRQ (0) - /* In clustered mode, the high nibble of APIC ID is a cluster number. * The low nibble is a 4-bit bitmap. */ #define XAPIC_DEST_CPUS_SHIFT 4 diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index d22cbdaee20..9446f372a16 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -189,9 +189,6 @@ struct genapic apic_flat = { .check_apicid_used = NULL, .check_apicid_present = NULL, - .no_balance_irq = 0, - .no_ioapic_check = 0, - .vector_allocation_domain = flat_vector_allocation_domain, .init_apic_ldr = flat_init_apic_ldr, @@ -335,9 +332,6 @@ struct genapic apic_physflat = { .check_apicid_used = NULL, .check_apicid_present = NULL, - .no_balance_irq = 0, - .no_ioapic_check = 0, - .vector_allocation_domain = physflat_vector_allocation_domain, /* not needed, but shouldn't hurt: */ .init_apic_ldr = flat_init_apic_ldr, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index b91a48eae52..2eeca6e744a 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -191,9 +191,6 @@ struct genapic apic_x2apic_cluster = { .check_apicid_used = NULL, .check_apicid_present = NULL, - .no_balance_irq = 0, - .no_ioapic_check = 0, - .vector_allocation_domain = x2apic_vector_allocation_domain, .init_apic_ldr = init_x2apic_ldr, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index f070e86af0f..be0ee3e56ef 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -187,9 +187,6 @@ struct genapic apic_x2apic_phys = { .check_apicid_used = NULL, .check_apicid_present = NULL, - .no_balance_irq = 0, - .no_ioapic_check = 0, - .vector_allocation_domain = x2apic_vector_allocation_domain, .init_apic_ldr = init_x2apic_ldr, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index c8a89158679..68b423f3da9 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -252,9 +252,6 @@ struct genapic apic_x2apic_uv_x = { .check_apicid_used = NULL, .check_apicid_present = NULL, - .no_balance_irq = 0, - .no_ioapic_check = 0, - .vector_allocation_domain = uv_vector_allocation_domain, .init_apic_ldr = uv_init_apic_ldr, diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index aa8443f6c0f..6da251aa9f4 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -74,9 +74,6 @@ struct genapic apic_bigsmp = { .check_apicid_used = bigsmp_check_apicid_used, .check_apicid_present = bigsmp_check_apicid_present, - .no_balance_irq = NO_BALANCE_IRQ, - .no_ioapic_check = 0, - .vector_allocation_domain = vector_allocation_domain, .init_apic_ldr = init_apic_ldr, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 47f6b5b06ba..e89e8c9dd68 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -41,9 +41,6 @@ struct genapic apic_default = { .check_apicid_used = default_check_apicid_used, .check_apicid_present = default_check_apicid_present, - .no_balance_irq = NO_BALANCE_IRQ, - .no_ioapic_check = 0, - .vector_allocation_domain = vector_allocation_domain, .init_apic_ldr = init_apic_ldr, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 5633f3296e1..8e9eeecf7e2 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -23,7 +23,6 @@ void __init es7000_update_genapic_to_cluster(void) apic->target_cpus = target_cpus_cluster; apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER; apic->irq_dest_mode = INT_DEST_MODE_CLUSTER; - apic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER; apic->init_apic_ldr = init_apic_ldr_cluster; @@ -117,9 +116,6 @@ struct genapic apic_es7000 = { .check_apicid_used = es7000_check_apicid_used, .check_apicid_present = es7000_check_apicid_present, - .no_balance_irq = NO_BALANCE_IRQ, - .no_ioapic_check = 0, - .vector_allocation_domain = vector_allocation_domain, .init_apic_ldr = init_apic_ldr, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index d85206d8e4a..f909189fee3 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -61,9 +61,6 @@ struct genapic apic_numaq = { .check_apicid_used = numaq_check_apicid_used, .check_apicid_present = numaq_check_apicid_present, - .no_balance_irq = NO_BALANCE_IRQ, - .no_ioapic_check = 0, - .vector_allocation_domain = vector_allocation_domain, .init_apic_ldr = init_apic_ldr, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index f54cf73d3ed..99a9bea8d14 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -54,9 +54,6 @@ struct genapic apic_summit = { .check_apicid_used = summit_check_apicid_used, .check_apicid_present = summit_check_apicid_present, - .no_balance_irq = NO_BALANCE_IRQ, - .no_ioapic_check = 0, - .vector_allocation_domain = vector_allocation_domain, .init_apic_ldr = init_apic_ldr, -- cgit v1.2.3 From e2d40b1878bd13ca1028ddd299c48e4821ac3535 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 06:50:47 +0100 Subject: x86, apic: clean up ->vector_allocation_domain() - separate the namespace - remove macros - move the default vector-allocation-domain to mach-generic - fix whitespace damage Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mach-default/mach_apic.h | 13 ------------- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/kernel/io_apic.c | 2 +- arch/x86/mach-generic/bigsmp.c | 4 ++-- arch/x86/mach-generic/default.c | 16 +++++++++++++++- arch/x86/mach-generic/es7000.c | 4 ++-- arch/x86/mach-generic/numaq.c | 4 ++-- arch/x86/mach-generic/summit.c | 4 ++-- 8 files changed, 24 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 8adccf8ee47..9c56542644c 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -23,7 +23,6 @@ static inline const struct cpumask *default_target_cpus(void) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) #define phys_pkg_id (apic->phys_pkg_id) -#define vector_allocation_domain (apic->vector_allocation_domain) #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) #define send_IPI_self (apic->send_IPI_self) #define wakeup_secondary_cpu (apic->wakeup_cpu) @@ -89,18 +88,6 @@ static inline int apicid_to_node(int logical_apicid) #endif } -static inline void vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; -} #endif static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 4cb9e2b99e3..e94881af962 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -15,7 +15,6 @@ #define check_phys_apicid_present (apic->check_phys_apicid_present) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) -#define vector_allocation_domain (apic->vector_allocation_domain) #define enable_apic_mode (apic->enable_apic_mode) #define phys_pkg_id (apic->phys_pkg_id) #define wakeup_secondary_cpu (apic->wakeup_cpu) diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 733ecf17272..49899e06624 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1316,7 +1316,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) int new_cpu; int vector, offset; - vector_allocation_domain(cpu, tmp_mask); + apic->vector_allocation_domain(cpu, tmp_mask); vector = current_vector; offset = current_offset; diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 6da251aa9f4..391cc99cd21 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -42,7 +42,7 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { { } }; -static void vector_allocation_domain(int cpu, cpumask_t *retmask) +static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) { cpus_clear(*retmask); cpu_set(cpu, *retmask); @@ -74,7 +74,7 @@ struct genapic apic_bigsmp = { .check_apicid_used = bigsmp_check_apicid_used, .check_apicid_present = bigsmp_check_apicid_present, - .vector_allocation_domain = vector_allocation_domain, + .vector_allocation_domain = bigsmp_vector_allocation_domain, .init_apic_ldr = init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index e89e8c9dd68..6adc3c69a3c 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -18,6 +18,20 @@ #include #include +static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* + * Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; +} + /* should be called last. */ static int probe_default(void) { @@ -41,7 +55,7 @@ struct genapic apic_default = { .check_apicid_used = default_check_apicid_used, .check_apicid_present = default_check_apicid_present, - .vector_allocation_domain = vector_allocation_domain, + .vector_allocation_domain = default_vector_allocation_domain, .init_apic_ldr = init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 8e9eeecf7e2..bc1f21cd6a4 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -86,7 +86,7 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) } #endif -static void vector_allocation_domain(int cpu, cpumask_t *retmask) +static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -116,7 +116,7 @@ struct genapic apic_es7000 = { .check_apicid_used = es7000_check_apicid_used, .check_apicid_present = es7000_check_apicid_present, - .vector_allocation_domain = vector_allocation_domain, + .vector_allocation_domain = es7000_vector_allocation_domain, .init_apic_ldr = init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index f909189fee3..712882f48c4 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -31,7 +31,7 @@ static int probe_numaq(void) return found_numaq; } -static void vector_allocation_domain(int cpu, cpumask_t *retmask) +static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -61,7 +61,7 @@ struct genapic apic_numaq = { .check_apicid_used = numaq_check_apicid_used, .check_apicid_present = numaq_check_apicid_present, - .vector_allocation_domain = vector_allocation_domain, + .vector_allocation_domain = numaq_vector_allocation_domain, .init_apic_ldr = init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 99a9bea8d14..1834887b94a 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -24,7 +24,7 @@ static int probe_summit(void) return 0; } -static void vector_allocation_domain(int cpu, cpumask_t *retmask) +static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -54,7 +54,7 @@ struct genapic apic_summit = { .check_apicid_used = summit_check_apicid_used, .check_apicid_present = summit_check_apicid_present, - .vector_allocation_domain = vector_allocation_domain, + .vector_allocation_domain = summit_vector_allocation_domain, .init_apic_ldr = init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, -- cgit v1.2.3 From a5c4329622a3437adef4b2a4288d127957743c97 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 06:50:47 +0100 Subject: x86, apic: clean up ->init_apic_ldr() - separate the namespace - remove macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 4 ++-- arch/x86/include/asm/mach-default/mach_apic.h | 3 +-- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/apic.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 4 ++-- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 12 files changed, 13 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 916451252b3..81941308299 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -46,7 +46,7 @@ static inline unsigned long calculate_ldr(int cpu) * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ -static inline void init_apic_ldr(void) +static inline void bigsmp_init_apic_ldr(void) { unsigned long val; int cpu = smp_processor_id(); diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 847008a7702..06f5757bf7a 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -52,7 +52,7 @@ static inline unsigned long calculate_ldr(int cpu) * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ -static inline void init_apic_ldr_cluster(void) +static inline void es7000_init_apic_ldr_cluster(void) { unsigned long val; int cpu = smp_processor_id(); @@ -62,7 +62,7 @@ static inline void init_apic_ldr_cluster(void) apic_write(APIC_LDR, val); } -static inline void init_apic_ldr(void) +static inline void es7000_init_apic_ldr(void) { unsigned long val; int cpu = smp_processor_id(); diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 9c56542644c..23e0a2da3a9 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -19,7 +19,6 @@ static inline const struct cpumask *default_target_cpus(void) #ifdef CONFIG_X86_64 #include -#define init_apic_ldr (apic->init_apic_ldr) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) #define phys_pkg_id (apic->phys_pkg_id) @@ -36,7 +35,7 @@ extern void setup_apic_routing(void); * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ -static inline void init_apic_ldr(void) +static inline void default_init_apic_ldr(void) { unsigned long val; diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index e94881af962..8e51f416394 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define init_apic_ldr (apic->init_apic_ldr) #define ioapic_phys_id_map (apic->ioapic_phys_id_map) #define setup_apic_routing (apic->setup_apic_routing) #define multi_timer_check (apic->multi_timer_check) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index defee3496ad..802297489a3 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -28,7 +28,7 @@ static inline int numaq_apic_id_registered(void) return 1; } -static inline void init_apic_ldr(void) +static inline void numaq_init_apic_ldr(void) { /* Already done in NUMA-Q firmware */ } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 51df002ecf4..9108c89fe88 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -37,7 +37,7 @@ static inline unsigned long summit_check_apicid_present(int bit) extern u8 cpu_2_logical_apicid[]; -static inline void init_apic_ldr(void) +static inline void summit_init_apic_ldr(void) { unsigned long val, id; int count = 0; diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 3853ed76c88..b7077936ac0 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1179,7 +1179,7 @@ void __cpuinit setup_local_APIC(void) * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ - init_apic_ldr(); + apic->init_apic_ldr(); /* * Set Task Priority to 'accept all'. We never change this diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 391cc99cd21..7b7fc471a3f 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -75,7 +75,7 @@ struct genapic apic_bigsmp = { .check_apicid_present = bigsmp_check_apicid_present, .vector_allocation_domain = bigsmp_vector_allocation_domain, - .init_apic_ldr = init_apic_ldr, + .init_apic_ldr = bigsmp_init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 6adc3c69a3c..633e8482af2 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -56,7 +56,7 @@ struct genapic apic_default = { .check_apicid_present = default_check_apicid_present, .vector_allocation_domain = default_vector_allocation_domain, - .init_apic_ldr = init_apic_ldr, + .init_apic_ldr = default_init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index bc1f21cd6a4..b70833e3597 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -24,7 +24,7 @@ void __init es7000_update_genapic_to_cluster(void) apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER; apic->irq_dest_mode = INT_DEST_MODE_CLUSTER; - apic->init_apic_ldr = init_apic_ldr_cluster; + apic->init_apic_ldr = es7000_init_apic_ldr_cluster; apic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster; } @@ -117,7 +117,7 @@ struct genapic apic_es7000 = { .check_apicid_present = es7000_check_apicid_present, .vector_allocation_domain = es7000_vector_allocation_domain, - .init_apic_ldr = init_apic_ldr, + .init_apic_ldr = es7000_init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 712882f48c4..a06fda57928 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -62,7 +62,7 @@ struct genapic apic_numaq = { .check_apicid_present = numaq_check_apicid_present, .vector_allocation_domain = numaq_vector_allocation_domain, - .init_apic_ldr = init_apic_ldr, + .init_apic_ldr = numaq_init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 1834887b94a..36c552fa427 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -55,7 +55,7 @@ struct genapic apic_summit = { .check_apicid_present = summit_check_apicid_present, .vector_allocation_domain = summit_vector_allocation_domain, - .init_apic_ldr = init_apic_ldr, + .init_apic_ldr = summit_init_apic_ldr, .ioapic_phys_id_map = ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, -- cgit v1.2.3 From d190cb87c4503014353f2310c4bfa2268fa7111d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 06:50:47 +0100 Subject: x86, apic: clean up ->ioapic_phys_id_map() - separate the namespace - remove macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 2 +- arch/x86/include/asm/mach-default/mach_apic.h | 2 +- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 3 ++- arch/x86/kernel/io_apic.c | 4 ++-- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 81941308299..05116d5487d 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -94,7 +94,7 @@ static inline int cpu_to_logical_apicid(int cpu) return cpu_physical_id(cpu); } -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0xFFL); diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 06f5757bf7a..db3e652f0f7 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -125,7 +125,7 @@ static inline int cpu_to_logical_apicid(int cpu) #endif } -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +static inline physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0xff); diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 23e0a2da3a9..7abdaae06f2 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -99,7 +99,7 @@ static inline unsigned long default_check_apicid_present(int bit) return physid_isset(bit, phys_cpu_present_map); } -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) { return phys_map; } diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 8e51f416394..c1c96e6bb18 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define ioapic_phys_id_map (apic->ioapic_phys_id_map) #define setup_apic_routing (apic->setup_apic_routing) #define multi_timer_check (apic->multi_timer_check) #define apicid_to_node (apic->apicid_to_node) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 802297489a3..dc7499b9262 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -48,7 +48,7 @@ static inline int multi_timer_check(int apic, int irq) return apic != 0 && irq == 0; } -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) { /* We don't have a good way to do this yet - hack */ return physids_promote(0xFUL); diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 9108c89fe88..4dafb58f930 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -109,7 +109,8 @@ static inline int cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) +static inline physid_mask_t + summit_ioapic_phys_id_map(physid_mask_t phys_id_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0x0F); diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 49899e06624..db79ad9a764 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -2108,7 +2108,7 @@ static void __init setup_ioapic_ids_from_mpc(void) * This is broken; anything with a real cpu count has to * circumvent this idiocy regardless. */ - phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); + phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map); /* * Set the IOAPIC ID to the value stored in the MPC table. @@ -3862,7 +3862,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) */ if (physids_empty(apic_id_map)) - apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); + apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map); spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic, 0); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 7b7fc471a3f..f2a3418d0cc 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -77,7 +77,7 @@ struct genapic apic_bigsmp = { .vector_allocation_domain = bigsmp_vector_allocation_domain, .init_apic_ldr = bigsmp_init_apic_ldr, - .ioapic_phys_id_map = ioapic_phys_id_map, + .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 633e8482af2..c403f3d9300 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -58,7 +58,7 @@ struct genapic apic_default = { .vector_allocation_domain = default_vector_allocation_domain, .init_apic_ldr = default_init_apic_ldr, - .ioapic_phys_id_map = ioapic_phys_id_map, + .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index b70833e3597..ce09baf0872 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -119,7 +119,7 @@ struct genapic apic_es7000 = { .vector_allocation_domain = es7000_vector_allocation_domain, .init_apic_ldr = es7000_init_apic_ldr, - .ioapic_phys_id_map = ioapic_phys_id_map, + .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index a06fda57928..5d98f18a0bd 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -64,7 +64,7 @@ struct genapic apic_numaq = { .vector_allocation_domain = numaq_vector_allocation_domain, .init_apic_ldr = numaq_init_apic_ldr, - .ioapic_phys_id_map = ioapic_phys_id_map, + .ioapic_phys_id_map = numaq_ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 36c552fa427..6abdd53a01c 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -57,7 +57,7 @@ struct genapic apic_summit = { .vector_allocation_domain = summit_vector_allocation_domain, .init_apic_ldr = summit_init_apic_ldr, - .ioapic_phys_id_map = ioapic_phys_id_map, + .ioapic_phys_id_map = summit_ioapic_phys_id_map, .setup_apic_routing = setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, -- cgit v1.2.3 From 72ce016583916fb7ffcbaa6a3e1f8f731b79a865 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 06:50:47 +0100 Subject: x86, apic: clean up ->setup_apic_routing() - separate the namespace - remove macros - remove namespace clash on 64-bit Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 2 +- arch/x86/include/asm/genapic.h | 2 +- arch/x86/include/asm/mach-default/mach_apic.h | 4 ++-- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/acpi/boot.c | 5 ++--- arch/x86/kernel/apic.c | 2 +- arch/x86/kernel/genapic_64.c | 2 +- arch/x86/kernel/mpparse.c | 6 +++--- arch/x86/kernel/smpboot.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 17 files changed, 20 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 05116d5487d..321ea47b5dd 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -56,7 +56,7 @@ static inline void bigsmp_init_apic_ldr(void) apic_write(APIC_LDR, val); } -static inline void setup_apic_routing(void) +static inline void bigsmp_setup_apic_routing(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Physflat", nr_ioapics); diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index db3e652f0f7..f1183000a94 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -73,7 +73,7 @@ static inline void es7000_init_apic_ldr(void) } extern int apic_version [MAX_APICS]; -static inline void setup_apic_routing(void) +static inline void es7000_setup_apic_routing(void) { int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 661898c2229..38b1202316f 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -107,7 +107,7 @@ extern void apic_send_IPI_self(int vector); extern struct genapic apic_x2apic_uv_x; DECLARE_PER_CPU(int, x2apic_extra_bits); -extern void setup_apic_routing(void); +extern void default_setup_apic_routing(void); #endif #endif /* _ASM_X86_GENAPIC_64_H */ diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 7abdaae06f2..d4467746304 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -25,7 +25,7 @@ static inline const struct cpumask *default_target_cpus(void) #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) #define send_IPI_self (apic->send_IPI_self) #define wakeup_secondary_cpu (apic->wakeup_cpu) -extern void setup_apic_routing(void); +extern void default_setup_apic_routing(void); #else #define wakeup_secondary_cpu wakeup_secondary_cpu_via_init /* @@ -70,7 +70,7 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) return cpuid_apic >> index_msb; } -static inline void setup_apic_routing(void) +static inline void default_setup_apic_routing(void) { #ifdef CONFIG_X86_IO_APIC printk("Enabling APIC mode: %s. Using %d I/O APICs\n", diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index c1c96e6bb18..ddf369248ab 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define setup_apic_routing (apic->setup_apic_routing) #define multi_timer_check (apic->multi_timer_check) #define apicid_to_node (apic->apicid_to_node) #define cpu_to_logical_apicid (apic->cpu_to_logical_apicid) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index dc7499b9262..2feb7e72e9e 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -33,7 +33,7 @@ static inline void numaq_init_apic_ldr(void) /* Already done in NUMA-Q firmware */ } -static inline void setup_apic_routing(void) +static inline void numaq_setup_apic_routing(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "NUMA-Q", nr_ioapics); diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 4dafb58f930..7ec2696bc9a 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -74,7 +74,7 @@ static inline int summit_apic_id_registered(void) return 1; } -static inline void setup_apic_routing(void) +static inline void summit_setup_apic_routing(void) { printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", nr_ioapics); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 314fe0dddef..539163161a4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1360,9 +1360,8 @@ static void __init acpi_process_madt(void) acpi_ioapic = 1; smp_found_config = 1; -#ifdef CONFIG_X86_32 - setup_apic_routing(); -#endif + if (apic->setup_apic_routing) + apic->setup_apic_routing(); } } if (error == -EINVAL) { diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index b7077936ac0..fcbcc03cd4b 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1625,7 +1625,7 @@ int __init APIC_init_uniprocessor(void) enable_IR_x2apic(); #endif #ifdef CONFIG_X86_64 - setup_apic_routing(); + default_setup_apic_routing(); #endif verify_local_APIC(); diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 060945b8eec..d57d2138f07 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -44,7 +44,7 @@ static struct genapic *apic_probe[] __initdata = { /* * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. */ -void __init setup_apic_routing(void) +void __init default_setup_apic_routing(void) { if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { if (!intr_remapping_enabled) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index fa6bb263892..c8a534a16d9 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -390,9 +390,9 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) generic_bigsmp_probe(); #endif -#ifdef CONFIG_X86_32 - setup_apic_routing(); -#endif + if (apic->setup_apic_routing) + apic->setup_apic_routing(); + if (!num_processors) printk(KERN_ERR "MPTABLE: no processors registered!\n"); return num_processors; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 45c096f605f..3791b4ae567 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1128,7 +1128,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) #ifdef CONFIG_X86_64 enable_IR_x2apic(); - setup_apic_routing(); + default_setup_apic_routing(); #endif if (smp_sanity_check(max_cpus) < 0) { diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index f2a3418d0cc..ad3837a59bd 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -78,7 +78,7 @@ struct genapic apic_bigsmp = { .init_apic_ldr = bigsmp_init_apic_ldr, .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, - .setup_apic_routing = setup_apic_routing, + .setup_apic_routing = bigsmp_setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index c403f3d9300..67f287fc12d 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -59,7 +59,7 @@ struct genapic apic_default = { .init_apic_ldr = default_init_apic_ldr, .ioapic_phys_id_map = default_ioapic_phys_id_map, - .setup_apic_routing = setup_apic_routing, + .setup_apic_routing = default_setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index ce09baf0872..f6117293946 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -120,7 +120,7 @@ struct genapic apic_es7000 = { .init_apic_ldr = es7000_init_apic_ldr, .ioapic_phys_id_map = es7000_ioapic_phys_id_map, - .setup_apic_routing = setup_apic_routing, + .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 5d98f18a0bd..8c137f41348 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -65,7 +65,7 @@ struct genapic apic_numaq = { .init_apic_ldr = numaq_init_apic_ldr, .ioapic_phys_id_map = numaq_ioapic_phys_id_map, - .setup_apic_routing = setup_apic_routing, + .setup_apic_routing = numaq_setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 6abdd53a01c..0698566dc7b 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -58,7 +58,7 @@ struct genapic apic_summit = { .init_apic_ldr = summit_init_apic_ldr, .ioapic_phys_id_map = summit_ioapic_phys_id_map, - .setup_apic_routing = setup_apic_routing, + .setup_apic_routing = summit_setup_apic_routing, .multi_timer_check = multi_timer_check, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, -- cgit v1.2.3 From 33a201fac698a93d9d1ffa77030ba2ff38d1a3d1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 07:17:26 +0100 Subject: x86, apic: streamline the ->multi_timer_check() quirk only NUMAQ uses this quirk: to prevent the timer IRQ from being added on secondary nodes. All other genapic templates can have a NULL ->multi_timer_check() callback. Also, extend the generic code to treat a NULL pointer accordingly. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 5 ----- arch/x86/include/asm/es7000/apic.h | 5 ----- arch/x86/include/asm/mach-default/mach_apic.h | 5 ----- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 5 ----- arch/x86/kernel/io_apic.c | 11 ++++++++--- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 12 files changed, 14 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 321ea47b5dd..df59298086c 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -62,11 +62,6 @@ static inline void bigsmp_setup_apic_routing(void) "Physflat", nr_ioapics); } -static inline int multi_timer_check(int apic, int irq) -{ - return (0); -} - static inline int apicid_to_node(int logical_apicid) { return apicid_2_node[hard_smp_processor_id()]; diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index f1183000a94..632e4cd3f4f 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -82,11 +82,6 @@ static inline void es7000_setup_apic_routing(void) nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); } -static inline int multi_timer_check(int apic, int irq) -{ - return 0; -} - static inline int apicid_to_node(int logical_apicid) { return 0; diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index d4467746304..f418d470cf4 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -104,11 +104,6 @@ static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) return phys_map; } -static inline int multi_timer_check(int apic, int irq) -{ - return 0; -} - /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index ddf369248ab..bdea0a759e8 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define multi_timer_check (apic->multi_timer_check) #define apicid_to_node (apic->apicid_to_node) #define cpu_to_logical_apicid (apic->cpu_to_logical_apicid) #define cpu_present_to_apicid (apic->cpu_present_to_apicid) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 2feb7e72e9e..22bdf3d4c0e 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -43,7 +43,7 @@ static inline void numaq_setup_apic_routing(void) * Skip adding the timer int on secondary nodes, which causes * a small but painful rift in the time-space continuum. */ -static inline int multi_timer_check(int apic, int irq) +static inline int numaq_multi_timer_check(int apic, int irq) { return apic != 0 && irq == 0; } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 7ec2696bc9a..acb7bd1de84 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -64,11 +64,6 @@ static inline void summit_init_apic_ldr(void) apic_write(APIC_LDR, val); } -static inline int multi_timer_check(int apic, int irq) -{ - return 0; -} - static inline int summit_apic_id_registered(void) { return 1; diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index db79ad9a764..282ea112f3c 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1618,10 +1618,15 @@ static void __init setup_IO_APIC_irqs(void) } irq = pin_2_irq(idx, apic_id, pin); -#ifdef CONFIG_X86_32 - if (multi_timer_check(apic_id, irq)) + + /* + * Skip the timer IRQ if there's a quirk handler + * installed and if it returns 1: + */ + if (apic->multi_timer_check && + apic->multi_timer_check(apic_id, irq)) continue; -#endif + desc = irq_to_desc_alloc_cpu(irq, cpu); if (!desc) { printk(KERN_INFO "can not get irq_desc for %d\n", irq); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index ad3837a59bd..d0749569cdf 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -79,7 +79,7 @@ struct genapic apic_bigsmp = { .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .setup_apic_routing = bigsmp_setup_apic_routing, - .multi_timer_check = multi_timer_check, + .multi_timer_check = NULL, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 67f287fc12d..6a21aa7c0c6 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -60,7 +60,7 @@ struct genapic apic_default = { .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = default_setup_apic_routing, - .multi_timer_check = multi_timer_check, + .multi_timer_check = NULL, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index f6117293946..0be59a51df2 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -121,7 +121,7 @@ struct genapic apic_es7000 = { .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = es7000_setup_apic_routing, - .multi_timer_check = multi_timer_check, + .multi_timer_check = NULL, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 8c137f41348..da4ed653506 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -66,7 +66,7 @@ struct genapic apic_numaq = { .ioapic_phys_id_map = numaq_ioapic_phys_id_map, .setup_apic_routing = numaq_setup_apic_routing, - .multi_timer_check = multi_timer_check, + .multi_timer_check = numaq_multi_timer_check, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 0698566dc7b..b618a186f1e 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -59,7 +59,7 @@ struct genapic apic_summit = { .ioapic_phys_id_map = summit_ioapic_phys_id_map, .setup_apic_routing = summit_setup_apic_routing, - .multi_timer_check = multi_timer_check, + .multi_timer_check = NULL, .apicid_to_node = apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, -- cgit v1.2.3 From 3f57a318c36e1f24070a18df8c4971ca08d33142 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 06:50:47 +0100 Subject: x86, apic: clean up ->apicid_to_node() - separate the namespace - remove macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 2 +- arch/x86/include/asm/mach-default/mach_apic.h | 2 +- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 4 ++-- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/smpboot.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 12 files changed, 12 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index df59298086c..77f0b734875 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -62,7 +62,7 @@ static inline void bigsmp_setup_apic_routing(void) "Physflat", nr_ioapics); } -static inline int apicid_to_node(int logical_apicid) +static inline int bigsmp_apicid_to_node(int logical_apicid) { return apicid_2_node[hard_smp_processor_id()]; } diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 632e4cd3f4f..bcdf31400df 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -82,7 +82,7 @@ static inline void es7000_setup_apic_routing(void) nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); } -static inline int apicid_to_node(int logical_apicid) +static inline int es7000_apicid_to_node(int logical_apicid) { return 0; } diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index f418d470cf4..2f78209d972 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -78,7 +78,7 @@ static inline void default_setup_apic_routing(void) #endif } -static inline int apicid_to_node(int logical_apicid) +static inline int default_apicid_to_node(int logical_apicid) { #ifdef CONFIG_SMP return apicid_2_node[hard_smp_processor_id()]; diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index bdea0a759e8..b585a8e5f81 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define apicid_to_node (apic->apicid_to_node) #define cpu_to_logical_apicid (apic->cpu_to_logical_apicid) #define cpu_present_to_apicid (apic->cpu_present_to_apicid) #define apicid_to_cpu_present (apic->apicid_to_cpu_present) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 22bdf3d4c0e..a0e3b437118 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -76,14 +76,14 @@ static inline int cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -static inline int apicid_to_node(int logical_apicid) +static inline int numaq_apicid_to_node(int logical_apicid) { return logical_apicid >> 4; } static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) { - int node = apicid_to_node(logical_apicid); + int node = numaq_apicid_to_node(logical_apicid); int cpu = __ffs(logical_apicid & 0xf); return physid_mask_of_physid(cpu + 4*node); diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index acb7bd1de84..cfff2760e60 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -75,7 +75,7 @@ static inline void summit_setup_apic_routing(void) nr_ioapics); } -static inline int apicid_to_node(int logical_apicid) +static inline int summit_apicid_to_node(int logical_apicid) { #ifdef CONFIG_SMP return apicid_2_node[hard_smp_processor_id()]; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 3791b4ae567..1dd4cecd4bc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -163,7 +163,7 @@ static void map_cpu_to_logical_apicid(void) { int cpu = smp_processor_id(); int apicid = logical_smp_processor_id(); - int node = apicid_to_node(apicid); + int node = apic->apicid_to_node(apicid); if (!node_online(node)) node = first_online_node; diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index d0749569cdf..2f4121499e5 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -80,7 +80,7 @@ struct genapic apic_bigsmp = { .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .setup_apic_routing = bigsmp_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = apicid_to_node, + .apicid_to_node = bigsmp_apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 6a21aa7c0c6..d391c2dc819 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -61,7 +61,7 @@ struct genapic apic_default = { .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = default_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = apicid_to_node, + .apicid_to_node = default_apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 0be59a51df2..933f2a38599 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -122,7 +122,7 @@ struct genapic apic_es7000 = { .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = apicid_to_node, + .apicid_to_node = es7000_apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index da4ed653506..38344fb9979 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -67,7 +67,7 @@ struct genapic apic_numaq = { .ioapic_phys_id_map = numaq_ioapic_phys_id_map, .setup_apic_routing = numaq_setup_apic_routing, .multi_timer_check = numaq_multi_timer_check, - .apicid_to_node = apicid_to_node, + .apicid_to_node = numaq_apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index b618a186f1e..6150604deaa 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -60,7 +60,7 @@ struct genapic apic_summit = { .ioapic_phys_id_map = summit_ioapic_phys_id_map, .setup_apic_routing = summit_setup_apic_routing, .multi_timer_check = NULL, - .apicid_to_node = apicid_to_node, + .apicid_to_node = summit_apicid_to_node, .cpu_to_logical_apicid = cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, -- cgit v1.2.3 From 5257c5111ca21c8e857b65a79ab986b313e1c362 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 06:50:47 +0100 Subject: x86, apic: clean up ->cpu_to_logical_apicid() - separate the namespace - remove macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 6 +++--- arch/x86/include/asm/es7000/apic.h | 16 ++++++++-------- arch/x86/include/asm/mach-default/mach_apic.h | 2 +- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 3 ++- arch/x86/include/asm/summit/apic.h | 8 ++++---- arch/x86/kernel/ipi.c | 4 ++-- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 12 files changed, 25 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 77f0b734875..d0d894ff7d3 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -82,7 +82,7 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) +static inline int bigsmp_cpu_to_logical_apicid(int cpu) { if (cpu >= nr_cpu_ids) return BAD_APICID; @@ -115,7 +115,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) int apicid; cpu = first_cpu(*cpumask); - apicid = cpu_to_logical_apicid(cpu); + apicid = bigsmp_cpu_to_logical_apicid(cpu); return apicid; } @@ -132,7 +132,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, if (cpumask_test_cpu(cpu, cpu_online_mask)) break; if (cpu < nr_cpu_ids) - return cpu_to_logical_apicid(cpu); + return bigsmp_cpu_to_logical_apicid(cpu); return BAD_APICID; } diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index bcdf31400df..e0cd07e74f9 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -109,7 +109,7 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) +static inline int es7000_cpu_to_logical_apicid(int cpu) { #ifdef CONFIG_SMP if (cpu >= nr_cpu_ids) @@ -155,10 +155,10 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) * on the same apicid cluster return default value of target_cpus(): */ cpu = cpumask_first(cpumask); - apicid = cpu_to_logical_apicid(cpu); + apicid = es7000_cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpumask_test_cpu(cpu, cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); + int new_apicid = es7000_cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ printk ("%s: Not a valid mask!\n", __func__); @@ -182,20 +182,20 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) num_bits_set = cpus_weight(*cpumask); /* Return id to all */ if (num_bits_set == nr_cpu_ids) - return cpu_to_logical_apicid(0); + return es7000_cpu_to_logical_apicid(0); /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of target_cpus(): */ cpu = first_cpu(*cpumask); - apicid = cpu_to_logical_apicid(cpu); + apicid = es7000_cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpu_isset(cpu, *cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); + int new_apicid = es7000_cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ printk ("%s: Not a valid mask!\n", __func__); - return cpu_to_logical_apicid(0); + return es7000_cpu_to_logical_apicid(0); } apicid = new_apicid; cpus_found++; @@ -209,7 +209,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { - int apicid = cpu_to_logical_apicid(0); + int apicid = es7000_cpu_to_logical_apicid(0); cpumask_var_t cpumask; if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 2f78209d972..eae3e4b6ed0 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -105,7 +105,7 @@ static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) } /* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) +static inline int default_cpu_to_logical_apicid(int cpu) { return 1 << cpu; } diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index b585a8e5f81..2ea913e8e0d 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define cpu_to_logical_apicid (apic->cpu_to_logical_apicid) #define cpu_present_to_apicid (apic->cpu_present_to_apicid) #define apicid_to_cpu_present (apic->apicid_to_cpu_present) #define setup_portio_remap (apic->setup_portio_remap) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index a0e3b437118..6989abd3485 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -56,7 +56,8 @@ static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) /* Mapping from cpu number to logical apicid */ extern u8 cpu_2_logical_apicid[]; -static inline int cpu_to_logical_apicid(int cpu) + +static inline int numaq_cpu_to_logical_apicid(int cpu) { if (cpu >= nr_cpu_ids) return BAD_APICID; diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index cfff2760e60..d564d7ee3f6 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -85,7 +85,7 @@ static inline int summit_apicid_to_node(int logical_apicid) } /* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) +static inline int summit_cpu_to_logical_apicid(int cpu) { #ifdef CONFIG_SMP if (cpu >= nr_cpu_ids) @@ -145,10 +145,10 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) * on the same apicid cluster return default value of target_cpus(): */ cpu = first_cpu(*cpumask); - apicid = cpu_to_logical_apicid(cpu); + apicid = summit_cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpu_isset(cpu, *cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); + int new_apicid = summit_cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ printk ("%s: Not a valid mask!\n", __func__); @@ -165,7 +165,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { - int apicid = cpu_to_logical_apicid(0); + int apicid = summit_cpu_to_logical_apicid(0); cpumask_var_t cpumask; if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index e2e4895ca69..367c5e684fa 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -140,7 +140,7 @@ void send_IPI_mask_sequence(const struct cpumask *mask, int vector) local_irq_save(flags); for_each_cpu(query_cpu, mask) - __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); + __send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), vector); local_irq_restore(flags); } @@ -155,7 +155,7 @@ void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) local_irq_save(flags); for_each_cpu(query_cpu, mask) if (query_cpu != this_cpu) - __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), + __send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), vector); local_irq_restore(flags); } diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 2f4121499e5..cd6f02ba88e 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -81,7 +81,7 @@ struct genapic apic_bigsmp = { .setup_apic_routing = bigsmp_setup_apic_routing, .multi_timer_check = NULL, .apicid_to_node = bigsmp_apicid_to_node, - .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index d391c2dc819..ef9b936c41a 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -62,7 +62,7 @@ struct genapic apic_default = { .setup_apic_routing = default_setup_apic_routing, .multi_timer_check = NULL, .apicid_to_node = default_apicid_to_node, - .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_to_logical_apicid = default_cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 933f2a38599..74bf2b6b751 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -123,7 +123,7 @@ struct genapic apic_es7000 = { .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, .apicid_to_node = es7000_apicid_to_node, - .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 38344fb9979..461f5beedb2 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -68,7 +68,7 @@ struct genapic apic_numaq = { .setup_apic_routing = numaq_setup_apic_routing, .multi_timer_check = numaq_multi_timer_check, .apicid_to_node = numaq_apicid_to_node, - .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 6150604deaa..d99be2d4efc 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -61,7 +61,7 @@ struct genapic apic_summit = { .setup_apic_routing = summit_setup_apic_routing, .multi_timer_check = NULL, .apicid_to_node = summit_apicid_to_node, - .cpu_to_logical_apicid = cpu_to_logical_apicid, + .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, .cpu_present_to_apicid = cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, -- cgit v1.2.3 From a21769a4461801454930a06bc18bd8249cd9e993 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 06:50:47 +0100 Subject: x86, apic: clean up ->cpu_present_to_apicid() - separate the namespace - remove macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 2 +- arch/x86/include/asm/genapic.h | 2 ++ arch/x86/include/asm/mach-default/mach_apic.h | 11 ++++++++++- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/genapic_flat_64.c | 4 ++-- arch/x86/kernel/genx2apic_cluster.c | 2 +- arch/x86/kernel/genx2apic_phys.c | 2 +- arch/x86/kernel/genx2apic_uv_x.c | 2 +- arch/x86/kernel/smpboot.c | 9 ++++++++- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 17 files changed, 34 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index d0d894ff7d3..eea5e9788dd 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -67,7 +67,7 @@ static inline int bigsmp_apicid_to_node(int logical_apicid) return apicid_2_node[hard_smp_processor_id()]; } -static inline int cpu_present_to_apicid(int mps_cpu) +static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index e0cd07e74f9..7cdde3d9c5f 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -88,7 +88,7 @@ static inline int es7000_apicid_to_node(int logical_apicid) } -static inline int cpu_present_to_apicid(int mps_cpu) +static inline int es7000_cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) return boot_cpu_physical_apicid; diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 38b1202316f..2cb14d51e45 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -108,6 +108,8 @@ extern struct genapic apic_x2apic_uv_x; DECLARE_PER_CPU(int, x2apic_extra_bits); extern void default_setup_apic_routing(void); + +extern int default_cpu_present_to_apicid(int mps_cpu); #endif #endif /* _ASM_X86_GENAPIC_64_H */ diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index eae3e4b6ed0..15d5627a9d6 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -110,7 +110,7 @@ static inline int default_cpu_to_logical_apicid(int cpu) return 1 << cpu; } -static inline int cpu_present_to_apicid(int mps_cpu) +static inline int __default_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); @@ -118,6 +118,15 @@ static inline int cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } +#ifdef CONFIG_X86_32 +static inline int default_cpu_present_to_apicid(int mps_cpu) +{ + return __default_cpu_present_to_apicid(mps_cpu); +} +#else +extern int default_cpu_present_to_apicid(int mps_cpu); +#endif + static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) { return physid_mask_of_physid(phys_apicid); diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 2ea913e8e0d..332fe93ab41 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define cpu_present_to_apicid (apic->cpu_present_to_apicid) #define apicid_to_cpu_present (apic->apicid_to_cpu_present) #define setup_portio_remap (apic->setup_portio_remap) #define check_phys_apicid_present (apic->check_phys_apicid_present) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 6989abd3485..f482b063447 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -69,7 +69,7 @@ static inline int numaq_cpu_to_logical_apicid(int cpu) * cpu to APIC ID relation to properly interact with the intelligent * mode of the cluster controller. */ -static inline int cpu_present_to_apicid(int mps_cpu) +static inline int numaq_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < 60) return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index d564d7ee3f6..fc127369188 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -96,7 +96,7 @@ static inline int summit_cpu_to_logical_apicid(int cpu) #endif } -static inline int cpu_present_to_apicid(int mps_cpu) +static inline int summit_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 9446f372a16..f4a2c1c0a1a 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -197,7 +197,7 @@ struct genapic apic_flat = { .multi_timer_check = NULL, .apicid_to_node = NULL, .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, .check_phys_apicid_present = NULL, @@ -341,7 +341,7 @@ struct genapic apic_physflat = { .multi_timer_check = NULL, .apicid_to_node = NULL, .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, .check_phys_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 2eeca6e744a..710d612a964 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -199,7 +199,7 @@ struct genapic apic_x2apic_cluster = { .multi_timer_check = NULL, .apicid_to_node = NULL, .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, .check_phys_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index be0ee3e56ef..49a449178c3 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -195,7 +195,7 @@ struct genapic apic_x2apic_phys = { .multi_timer_check = NULL, .apicid_to_node = NULL, .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, .check_phys_apicid_present = NULL, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 68b423f3da9..a08a6359186 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -260,7 +260,7 @@ struct genapic apic_x2apic_uv_x = { .multi_timer_check = NULL, .apicid_to_node = NULL, .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, .check_phys_apicid_present = NULL, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 1dd4cecd4bc..812bf39de35 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -903,9 +903,16 @@ do_rest: return boot_error; } +#ifdef CONFIG_X86_64 +int default_cpu_present_to_apicid(int mps_cpu) +{ + return __default_cpu_present_to_apicid(mps_cpu); +} +#endif + int __cpuinit native_cpu_up(unsigned int cpu) { - int apicid = cpu_present_to_apicid(cpu); + int apicid = apic->cpu_present_to_apicid(cpu); unsigned long flags; int err; diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index cd6f02ba88e..1eaf18c801d 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -82,7 +82,7 @@ struct genapic apic_bigsmp = { .multi_timer_check = NULL, .apicid_to_node = bigsmp_apicid_to_node, .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, - .cpu_present_to_apicid = cpu_present_to_apicid, + .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index ef9b936c41a..2903657f420 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -63,7 +63,7 @@ struct genapic apic_default = { .multi_timer_check = NULL, .apicid_to_node = default_apicid_to_node, .cpu_to_logical_apicid = default_cpu_to_logical_apicid, - .cpu_present_to_apicid = cpu_present_to_apicid, + .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 74bf2b6b751..5a3a8ab4f8a 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -124,7 +124,7 @@ struct genapic apic_es7000 = { .multi_timer_check = NULL, .apicid_to_node = es7000_apicid_to_node, .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, - .cpu_present_to_apicid = cpu_present_to_apicid, + .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 461f5beedb2..d928cae211c 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -69,7 +69,7 @@ struct genapic apic_numaq = { .multi_timer_check = numaq_multi_timer_check, .apicid_to_node = numaq_apicid_to_node, .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, - .cpu_present_to_apicid = cpu_present_to_apicid, + .cpu_present_to_apicid = numaq_cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index d99be2d4efc..e6bb34ee580 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -62,7 +62,7 @@ struct genapic apic_summit = { .multi_timer_check = NULL, .apicid_to_node = summit_apicid_to_node, .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, - .cpu_present_to_apicid = cpu_present_to_apicid, + .cpu_present_to_apicid = summit_cpu_present_to_apicid, .apicid_to_cpu_present = apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, -- cgit v1.2.3 From 8058714a41afc4c983acb274b1adf7bd3cfe7f6e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 06:50:47 +0100 Subject: x86, apic: clean up ->apicid_to_cpu_present() - separate the namespace - remove macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 2 +- arch/x86/include/asm/mach-default/mach_apic.h | 2 +- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 4 ++-- arch/x86/kernel/io_apic.c | 4 ++-- arch/x86/kernel/visws_quirks.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 13 files changed, 14 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index eea5e9788dd..080457450b2 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -75,7 +75,7 @@ static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) +static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) { return physid_mask_of_physid(phys_apicid); } diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 7cdde3d9c5f..a09e1133ced 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -98,7 +98,7 @@ static inline int es7000_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) +static inline physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) { static int id = 0; physid_mask_t mask; diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 15d5627a9d6..22683e5b82b 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -127,7 +127,7 @@ static inline int default_cpu_present_to_apicid(int mps_cpu) extern int default_cpu_present_to_apicid(int mps_cpu); #endif -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) +static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) { return physid_mask_of_physid(phys_apicid); } diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 332fe93ab41..997618f2eb5 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define apicid_to_cpu_present (apic->apicid_to_cpu_present) #define setup_portio_remap (apic->setup_portio_remap) #define check_phys_apicid_present (apic->check_phys_apicid_present) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index f482b063447..8ac000f9928 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -82,7 +82,7 @@ static inline int numaq_apicid_to_node(int logical_apicid) return logical_apicid >> 4; } -static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) +static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) { int node = numaq_apicid_to_node(logical_apicid); int cpu = __ffs(logical_apicid & 0xf); diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index fc127369188..79c1a45f886 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -105,13 +105,13 @@ static inline int summit_cpu_present_to_apicid(int mps_cpu) } static inline physid_mask_t - summit_ioapic_phys_id_map(physid_mask_t phys_id_map) +summit_ioapic_phys_id_map(physid_mask_t phys_id_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0x0F); } -static inline physid_mask_t apicid_to_cpu_present(int apicid) +static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) { return physid_mask_of_physid(0); } diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 282ea112f3c..3d85d3d810b 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -2155,7 +2155,7 @@ static void __init setup_ioapic_ids_from_mpc(void) mp_ioapics[apic_id].apicid = i; } else { physid_mask_t tmp; - tmp = apicid_to_cpu_present(mp_ioapics[apic_id].apicid); + tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid); apic_printk(APIC_VERBOSE, "Setting %d in the " "phys_id_present_map\n", mp_ioapics[apic_id].apicid); @@ -3899,7 +3899,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) apic_id = i; } - tmp = apicid_to_cpu_present(apic_id); + tmp = apic->apicid_to_cpu_present(apic_id); physids_or(apic_id_map, apic_id_map, tmp); if (reg_00.bits.ID != apic_id) { diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index d801d06af06..2ed5bdf15c9 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -200,7 +200,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) return; } - apic_cpus = apicid_to_cpu_present(m->apicid); + apic_cpus = apic->apicid_to_cpu_present(m->apicid); physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); /* * Validate version diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 1eaf18c801d..61396523074 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -83,7 +83,7 @@ struct genapic apic_bigsmp = { .apicid_to_node = bigsmp_apicid_to_node, .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, - .apicid_to_cpu_present = apicid_to_cpu_present, + .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 2903657f420..8fc704a3db7 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -64,7 +64,7 @@ struct genapic apic_default = { .apicid_to_node = default_apicid_to_node, .cpu_to_logical_apicid = default_cpu_to_logical_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = apicid_to_cpu_present, + .apicid_to_cpu_present = default_apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 5a3a8ab4f8a..1e0e1627455 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -125,7 +125,7 @@ struct genapic apic_es7000 = { .apicid_to_node = es7000_apicid_to_node, .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, - .apicid_to_cpu_present = apicid_to_cpu_present, + .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index d928cae211c..839b86b765a 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -70,7 +70,7 @@ struct genapic apic_numaq = { .apicid_to_node = numaq_apicid_to_node, .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, .cpu_present_to_apicid = numaq_cpu_present_to_apicid, - .apicid_to_cpu_present = apicid_to_cpu_present, + .apicid_to_cpu_present = numaq_apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index e6bb34ee580..b6e37607a52 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -63,7 +63,7 @@ struct genapic apic_summit = { .apicid_to_node = summit_apicid_to_node, .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, .cpu_present_to_apicid = summit_cpu_present_to_apicid, - .apicid_to_cpu_present = apicid_to_cpu_present, + .apicid_to_cpu_present = summit_apicid_to_cpu_present, .setup_portio_remap = setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, -- cgit v1.2.3 From d83093b50416f4ca59d3a84b2ddc217748214d64 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 12:43:18 +0100 Subject: x86: refactor ->setup_portio_remap() subarch methods Only NUMAQ has a real ->setup_portio_remap() method, the other subarchitectures define it but keep it empty. So mark the vector as NULL, extend the generic code to handle NULL -setup_portio_remap() entries and remove all the empty handlers. Also move the NUMAQ method from the header file into the apic driver .c file. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 4 ---- arch/x86/include/asm/mach-default/mach_apic.h | 4 ---- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 13 ------------- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/smpboot.c | 3 ++- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 15 ++++++++++++++- arch/x86/mach-generic/summit.c | 2 +- 12 files changed, 22 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 080457450b2..2fa70032e3b 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -95,7 +95,7 @@ static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) return physids_promote(0xFFL); } -static inline void setup_portio_remap(void) +static inline void bigsmp_setup_portio_remap(void) { } diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index a09e1133ced..c5b0eb51e53 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -127,10 +127,6 @@ static inline physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) } -static inline void setup_portio_remap(void) -{ -} - extern unsigned int boot_cpu_physical_apicid; static inline int check_phys_apicid_present(int cpu_physical_apicid) { diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 22683e5b82b..54c20e19e28 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -132,10 +132,6 @@ static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) return physid_mask_of_physid(phys_apicid); } -static inline void setup_portio_remap(void) -{ -} - static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 997618f2eb5..393a97c5685 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define setup_portio_remap (apic->setup_portio_remap) #define check_phys_apicid_present (apic->check_phys_apicid_present) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 8ac000f9928..6b626519cd7 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -92,19 +92,6 @@ static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) extern void *xquad_portio; -static inline void setup_portio_remap(void) -{ - int num_quads = num_online_nodes(); - - if (num_quads <= 1) - return; - - printk("Remapping cross-quad port I/O for %d quads\n", num_quads); - xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); - printk("xquad_portio vaddr 0x%08lx, len %08lx\n", - (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); -} - static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { return (1); diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 79c1a45f886..131343b0b75 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -116,7 +116,7 @@ static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) return physid_mask_of_physid(0); } -static inline void setup_portio_remap(void) +static inline void summit_setup_portio_remap(void) { } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 812bf39de35..0e7d26c01f9 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1170,7 +1170,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) map_cpu_to_logical_apicid(); - setup_portio_remap(); + if (apic->setup_portio_remap) + apic->setup_portio_remap(); smpboot_setup_io_apic(); /* diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 61396523074..424740554a3 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -84,7 +84,7 @@ struct genapic apic_bigsmp = { .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, - .setup_portio_remap = setup_portio_remap, + .setup_portio_remap = NULL, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 8fc704a3db7..b48a58daf71 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -65,7 +65,7 @@ struct genapic apic_default = { .cpu_to_logical_apicid = default_cpu_to_logical_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = default_apicid_to_cpu_present, - .setup_portio_remap = setup_portio_remap, + .setup_portio_remap = NULL, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 1e0e1627455..449eca5b604 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -126,7 +126,7 @@ struct genapic apic_es7000 = { .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, - .setup_portio_remap = setup_portio_remap, + .setup_portio_remap = NULL, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 839b86b765a..e60361b0bf1 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -44,6 +44,19 @@ static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; } +static void numaq_setup_portio_remap(void) +{ + int num_quads = num_online_nodes(); + + if (num_quads <= 1) + return; + + printk("Remapping cross-quad port I/O for %d quads\n", num_quads); + xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); + printk("xquad_portio vaddr 0x%08lx, len %08lx\n", + (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); +} + struct genapic apic_numaq = { .name = "NUMAQ", @@ -71,7 +84,7 @@ struct genapic apic_numaq = { .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, .cpu_present_to_apicid = numaq_cpu_present_to_apicid, .apicid_to_cpu_present = numaq_apicid_to_cpu_present, - .setup_portio_remap = setup_portio_remap, + .setup_portio_remap = numaq_setup_portio_remap, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index b6e37607a52..ffcf7ca2e8c 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -64,7 +64,7 @@ struct genapic apic_summit = { .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, .cpu_present_to_apicid = summit_cpu_present_to_apicid, .apicid_to_cpu_present = summit_apicid_to_cpu_present, - .setup_portio_remap = setup_portio_remap, + .setup_portio_remap = NULL, .check_phys_apicid_present = check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, -- cgit v1.2.3 From a27a621001f4c3e57caf47feff4b014577fd01c6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 12:43:18 +0100 Subject: x86: refactor ->check_phys_apicid_present() subarch methods - spread out the namespace to per driver methods - extend it to 64-bit as well so that we can use apic->check_phys_apicid_present() unconditionally Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 6 +++--- arch/x86/include/asm/es7000/apic.h | 4 ++-- arch/x86/include/asm/genapic.h | 1 + arch/x86/include/asm/mach-default/mach_apic.h | 18 +++++++++++++----- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 4 ++-- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/genapic_flat_64.c | 4 ++-- arch/x86/kernel/genx2apic_cluster.c | 2 +- arch/x86/kernel/genx2apic_phys.c | 2 +- arch/x86/kernel/genx2apic_uv_x.c | 2 +- arch/x86/kernel/smpboot.c | 7 ++++++- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 17 files changed, 38 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 2fa70032e3b..5ba4118fcdf 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -99,13 +99,13 @@ static inline void bigsmp_setup_portio_remap(void) { } -static inline void enable_apic_mode(void) +static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) { + return 1; } -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) +static inline void enable_apic_mode(void) { - return (1); } /* As we are using single CPU as destination, pick only one CPU here */ diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index c5b0eb51e53..717c27f8da6 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -126,9 +126,9 @@ static inline physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) return physids_promote(0xff); } - extern unsigned int boot_cpu_physical_apicid; -static inline int check_phys_apicid_present(int cpu_physical_apicid) + +static inline int es7000_check_phys_apicid_present(int cpu_physical_apicid) { boot_cpu_physical_apicid = read_apic_id(); return (1); diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 2cb14d51e45..f292fd02eba 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -110,6 +110,7 @@ DECLARE_PER_CPU(int, x2apic_extra_bits); extern void default_setup_apic_routing(void); extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); #endif #endif /* _ASM_X86_GENAPIC_64_H */ diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 54c20e19e28..0a824d3a223 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -118,13 +118,26 @@ static inline int __default_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } +static inline int +__default_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); +} + #ifdef CONFIG_X86_32 static inline int default_cpu_present_to_apicid(int mps_cpu) { return __default_cpu_present_to_apicid(mps_cpu); } + +static inline int +default_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return __default_check_phys_apicid_present(boot_cpu_physical_apicid); +} #else extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); #endif static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) @@ -132,11 +145,6 @@ static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) return physid_mask_of_physid(phys_apicid); } -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); -} - static inline void enable_apic_mode(void) { } diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 393a97c5685..efd762d951a 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,7 +3,6 @@ #include -#define check_phys_apicid_present (apic->check_phys_apicid_present) #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) #define enable_apic_mode (apic->enable_apic_mode) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 6b626519cd7..3be735e2ab9 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -92,9 +92,9 @@ static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) extern void *xquad_portio; -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) +static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) { - return (1); + return 1; } static inline void enable_apic_mode(void) diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 131343b0b75..fe578f6df78 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -120,7 +120,7 @@ static inline void summit_setup_portio_remap(void) { } -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) +static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) { return 1; } diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index f4a2c1c0a1a..78adf71f7e5 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -200,7 +200,7 @@ struct genapic apic_flat = { .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, - .check_phys_apicid_present = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = NULL, @@ -344,7 +344,7 @@ struct genapic apic_physflat = { .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, - .check_phys_apicid_present = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = NULL, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 710d612a964..7062e24b18f 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -202,7 +202,7 @@ struct genapic apic_x2apic_cluster = { .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, - .check_phys_apicid_present = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = NULL, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 49a449178c3..7177a1110f0 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -198,7 +198,7 @@ struct genapic apic_x2apic_phys = { .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, - .check_phys_apicid_present = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = NULL, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index a08a6359186..debd721f0b4 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -263,7 +263,7 @@ struct genapic apic_x2apic_uv_x = { .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, - .check_phys_apicid_present = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = NULL, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0e7d26c01f9..ab83be2f8e0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -908,6 +908,11 @@ int default_cpu_present_to_apicid(int mps_cpu) { return __default_cpu_present_to_apicid(mps_cpu); } + +int default_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return __default_check_phys_apicid_present(boot_cpu_physical_apicid); +} #endif int __cpuinit native_cpu_up(unsigned int cpu) @@ -1058,7 +1063,7 @@ static int __init smp_sanity_check(unsigned max_cpus) * Should not be necessary because the MP table should list the boot * CPU too, but we do it for the sake of robustness anyway. */ - if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { + if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n", boot_cpu_physical_apicid); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 424740554a3..82743d16c23 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -85,7 +85,7 @@ struct genapic apic_bigsmp = { .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, .setup_portio_remap = NULL, - .check_phys_apicid_present = check_phys_apicid_present, + .check_phys_apicid_present = bigsmp_check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index b48a58daf71..d0374c69ad0 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -66,7 +66,7 @@ struct genapic apic_default = { .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = default_apicid_to_cpu_present, .setup_portio_remap = NULL, - .check_phys_apicid_present = check_phys_apicid_present, + .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 449eca5b604..52b3eb5e645 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -127,7 +127,7 @@ struct genapic apic_es7000 = { .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, - .check_phys_apicid_present = check_phys_apicid_present, + .check_phys_apicid_present = es7000_check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index e60361b0bf1..7ec2ca43ca2 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -85,7 +85,7 @@ struct genapic apic_numaq = { .cpu_present_to_apicid = numaq_cpu_present_to_apicid, .apicid_to_cpu_present = numaq_apicid_to_cpu_present, .setup_portio_remap = numaq_setup_portio_remap, - .check_phys_apicid_present = check_phys_apicid_present, + .check_phys_apicid_present = numaq_check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index ffcf7ca2e8c..acf12de8916 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -65,7 +65,7 @@ struct genapic apic_summit = { .cpu_present_to_apicid = summit_cpu_present_to_apicid, .apicid_to_cpu_present = summit_apicid_to_cpu_present, .setup_portio_remap = NULL, - .check_phys_apicid_present = check_phys_apicid_present, + .check_phys_apicid_present = summit_check_phys_apicid_present, .enable_apic_mode = enable_apic_mode, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, -- cgit v1.2.3 From 4904033302c745342e3b3a611881cdee57fbe06a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 12:43:18 +0100 Subject: x86: refactor ->enable_apic_mode() subarch methods Only ES7000 has a real ->enable_apic_mode() method, the other subarchitectures define it but keep it empty. So mark the vector as NULL, extend the generic code to handle NULL -setup_portio_remap() entries and remove all the empty handlers. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 4 ---- arch/x86/include/asm/mach-default/mach_apic.h | 3 --- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 4 ---- arch/x86/include/asm/summit/apic.h | 4 ---- arch/x86/kernel/apic.c | 3 ++- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 6 +++--- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 11 files changed, 9 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 5ba4118fcdf..f49d440862f 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -104,10 +104,6 @@ static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) return 1; } -static inline void enable_apic_mode(void) -{ -} - /* As we are using single CPU as destination, pick only one CPU here */ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 0a824d3a223..3647c92d45e 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -145,8 +145,5 @@ static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) return physid_mask_of_physid(phys_apicid); } -static inline void enable_apic_mode(void) -{ -} #endif /* CONFIG_X86_LOCAL_APIC */ #endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index efd762d951a..6fed521585c 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -5,7 +5,6 @@ #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) -#define enable_apic_mode (apic->enable_apic_mode) #define phys_pkg_id (apic->phys_pkg_id) #define wakeup_secondary_cpu (apic->wakeup_cpu) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 3be735e2ab9..dc93c30972e 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -97,10 +97,6 @@ static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) return 1; } -static inline void enable_apic_mode(void) -{ -} - /* * We use physical apicids here, not logical, so just return the default * physical broadcast to stop people from breaking us diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index fe578f6df78..526d19e7944 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -125,10 +125,6 @@ static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) return 1; } -static inline void enable_apic_mode(void) -{ -} - static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { int num_bits_set; diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index fcbcc03cd4b..9d6374da478 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1763,7 +1763,8 @@ void __init connect_bsp_APIC(void) outb(0x01, 0x23); } #endif - enable_apic_mode(); + if (apic->enable_apic_mode) + apic->enable_apic_mode(); } /** diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 82743d16c23..e151b472456 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -86,7 +86,7 @@ struct genapic apic_bigsmp = { .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, .setup_portio_remap = NULL, .check_phys_apicid_present = bigsmp_check_phys_apicid_present, - .enable_apic_mode = enable_apic_mode, + .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index d0374c69ad0..ac6be195b97 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -67,7 +67,7 @@ struct genapic apic_default = { .apicid_to_cpu_present = default_apicid_to_cpu_present, .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = enable_apic_mode, + .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 52b3eb5e645..9acb71120ef 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -36,10 +36,10 @@ static int probe_es7000(void) } extern void es7000_sw_apic(void); -static void __init enable_apic_mode(void) + +static void __init es7000_enable_apic_mode(void) { es7000_sw_apic(); - return; } static __init int @@ -128,7 +128,7 @@ struct genapic apic_es7000 = { .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, .check_phys_apicid_present = es7000_check_phys_apicid_present, - .enable_apic_mode = enable_apic_mode, + .enable_apic_mode = es7000_enable_apic_mode, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 7ec2ca43ca2..8d3358de3fe 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -86,7 +86,7 @@ struct genapic apic_numaq = { .apicid_to_cpu_present = numaq_apicid_to_cpu_present, .setup_portio_remap = numaq_setup_portio_remap, .check_phys_apicid_present = numaq_check_phys_apicid_present, - .enable_apic_mode = enable_apic_mode, + .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index acf12de8916..cb83bcbb2de 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -66,7 +66,7 @@ struct genapic apic_summit = { .apicid_to_cpu_present = summit_apicid_to_cpu_present, .setup_portio_remap = NULL, .check_phys_apicid_present = summit_check_phys_apicid_present, - .enable_apic_mode = enable_apic_mode, + .enable_apic_mode = NULL, .phys_pkg_id = phys_pkg_id, .mps_oem_check = mps_oem_check, -- cgit v1.2.3 From b0b20e5a3a6615ae750804523aeedd32911bb9d6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 13:15:06 +0100 Subject: x86, es7000: clean up es7000_enable_apic_mode() - eliminate the needless es7000_enable_apic_mode() complication which was not apparent prior the namespace cleanups - clean up the control flow in es7000_enable_apic_mode() - other cleanups Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/apic.h | 2 ++ arch/x86/kernel/es7000_32.c | 27 ++++++++++++++------------- arch/x86/mach-generic/es7000.c | 7 ------- 3 files changed, 16 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 717c27f8da6..038c4f0e480 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -36,6 +36,8 @@ static inline unsigned long es7000_check_apicid_present(int bit) return physid_isset(bit, phys_cpu_present_map); } +extern void es7000_enable_apic_mode(void); + #define apicid_cluster(apicid) (apicid & 0xF0) static inline unsigned long calculate_ldr(int cpu) diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 20a2a43c2a9..e73fe18488a 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -359,20 +359,21 @@ es7000_mip_write(struct mip_reg *mip_reg) return status; } -void __init -es7000_sw_apic(void) +void __init es7000_enable_apic_mode(void) { - if (es7000_plat) { - int mip_status; - struct mip_reg es7000_mip_reg; - - printk("ES7000: Enabling APIC mode.\n"); - memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); - es7000_mip_reg.off_0 = MIP_SW_APIC; - es7000_mip_reg.off_38 = (MIP_VALID); - while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) - printk("es7000_sw_apic: command failed, status = %x\n", - mip_status); + struct mip_reg es7000_mip_reg; + int mip_status; + + if (!es7000_plat) return; + + printk("ES7000: Enabling APIC mode.\n"); + memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); + es7000_mip_reg.off_0 = MIP_SW_APIC; + es7000_mip_reg.off_38 = MIP_VALID; + + while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) { + printk("es7000_enable_apic_mode: command failed, status = %x\n", + mip_status); } } diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 9acb71120ef..1185964b7a3 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -35,13 +35,6 @@ static int probe_es7000(void) return 0; } -extern void es7000_sw_apic(void); - -static void __init es7000_enable_apic_mode(void) -{ - es7000_sw_apic(); -} - static __init int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { -- cgit v1.2.3 From d4c9a9f3d416cfa1f5ffbe09d864d069467fe693 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 13:31:22 +0100 Subject: x86, apic: unify phys_pkg_id() - unify the call signature of 64-bit to that of 32-bit - clean up the types all around - clean up namespace contamination Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 2 +- arch/x86/include/asm/genapic.h | 6 +----- arch/x86/include/asm/mach-default/mach_apic.h | 2 +- arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 5 +++-- arch/x86/kernel/cpu/addon_cpuid_features.c | 10 +--------- arch/x86/kernel/cpu/common.c | 11 +---------- arch/x86/kernel/genapic_flat_64.c | 6 +++--- arch/x86/kernel/genx2apic_cluster.c | 4 ++-- arch/x86/kernel/genx2apic_phys.c | 4 ++-- arch/x86/kernel/genx2apic_uv_x.c | 4 ++-- 12 files changed, 19 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index f49d440862f..b7cba5b5635 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -133,7 +133,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, return BAD_APICID; } -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) +static inline int phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 038c4f0e480..d2c6c202e8b 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -221,7 +221,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, return apicid; } -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) +static inline int phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index f292fd02eba..14b19de8cd0 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -48,11 +48,7 @@ struct genapic { void (*setup_portio_remap)(void); int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); void (*enable_apic_mode)(void); -#ifdef CONFIG_X86_32 - u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); -#else - unsigned int (*phys_pkg_id)(int index_msb); -#endif + int (*phys_pkg_id)(int cpuid_apic, int index_msb); /* * When one of the next two hooks returns 1 the genapic diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 3647c92d45e..55797a35150 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -65,7 +65,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, return (unsigned int)(mask1 & mask2 & mask3); } -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) +static inline int phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index dc93c30972e..bc2c8a425c0 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -113,7 +113,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, } /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) +static inline int phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 526d19e7944..64cd441ae00 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -175,13 +175,14 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, return apicid; } -/* cpuid returns the value latched in the HW at reset, not the APIC ID +/* + * cpuid returns the value latched in the HW at reset, not the APIC ID * register's value. For any box whose BIOS changes APIC IDs, like * clustered APIC systems, we must use hard_smp_processor_id. * * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. */ -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) +static inline int phys_pkg_id(int cpuid_apic, int index_msb) { return hard_smp_processor_id() >> index_msb; } diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 4e581fdc0a5..84f8e4a5aef 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -116,7 +116,6 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; -#ifdef CONFIG_X86_32 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) & core_select_mask; c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); @@ -124,14 +123,7 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) * Reinit the apicid, now that we have extended initial_apicid. */ c->apicid = phys_pkg_id(c->initial_apicid, 0); -#else - c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; - c->phys_proc_id = phys_pkg_id(core_plus_mask_width); - /* - * Reinit the apicid, now that we have extended initial_apicid. - */ - c->apicid = phys_pkg_id(0); -#endif + c->x86_max_cores = (core_level_siblings / smp_num_siblings); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 275e2cb43b9..93c491c4fe7 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -442,11 +442,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) } index_msb = get_count_order(smp_num_siblings); -#ifdef CONFIG_X86_64 - c->phys_proc_id = phys_pkg_id(index_msb); -#else c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); -#endif smp_num_siblings = smp_num_siblings / c->x86_max_cores; @@ -454,13 +450,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) core_bits = get_count_order(c->x86_max_cores); -#ifdef CONFIG_X86_64 - c->cpu_core_id = phys_pkg_id(index_msb) & - ((1 << core_bits) - 1); -#else c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); -#endif } out: @@ -742,7 +733,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) this_cpu->c_identify(c); #ifdef CONFIG_X86_64 - c->apicid = phys_pkg_id(0); + c->apicid = phys_pkg_id(c->initial_apicid, 0); #endif /* diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 78adf71f7e5..cc9e07b9609 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -169,7 +169,7 @@ static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, return mask1 & mask2; } -static unsigned int phys_pkg_id(int index_msb) +static int flat_phys_pkg_id(int initial_apic_id, int index_msb) { return hard_smp_processor_id() >> index_msb; } @@ -202,7 +202,7 @@ struct genapic apic_flat = { .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = flat_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = get_apic_id, @@ -346,7 +346,7 @@ struct genapic apic_physflat = { .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = flat_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = get_apic_id, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 7062e24b18f..18b6f14376e 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -157,7 +157,7 @@ static unsigned long set_apic_id(unsigned int id) return x; } -static unsigned int phys_pkg_id(int index_msb) +static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) { return current_cpu_data.initial_apicid >> index_msb; } @@ -204,7 +204,7 @@ struct genapic apic_x2apic_cluster = { .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = x2apic_cluster_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = get_apic_id, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 7177a1110f0..2cb6f49e4c5 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -156,7 +156,7 @@ static unsigned long set_apic_id(unsigned int id) return x; } -static unsigned int phys_pkg_id(int index_msb) +static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) { return current_cpu_data.initial_apicid >> index_msb; } @@ -200,7 +200,7 @@ struct genapic apic_x2apic_phys = { .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = x2apic_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = get_apic_id, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index debd721f0b4..67e7658775e 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -226,7 +226,7 @@ static unsigned int uv_read_apic_id(void) return get_apic_id(apic_read(APIC_ID)); } -static unsigned int phys_pkg_id(int index_msb) +static int uv_phys_pkg_id(int initial_apicid, int index_msb) { return uv_read_apic_id() >> index_msb; } @@ -265,7 +265,7 @@ struct genapic apic_x2apic_uv_x = { .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = uv_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = get_apic_id, -- cgit v1.2.3 From cb8cc442dc7e07cb5438b357843ab4095ad73933 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 13:24:54 +0100 Subject: x86, apic: refactor ->phys_pkg_id() Refactor the ->phys_pkg_id() methods: - namespace separation - macro wrapper removal - open-coded calls to the methods in the generic code Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 2 +- arch/x86/include/asm/es7000/apic.h | 2 +- arch/x86/include/asm/mach-default/mach_apic.h | 3 +-- arch/x86/include/asm/mach-generic/mach_apic.h | 1 - arch/x86/include/asm/numaq/apic.h | 2 +- arch/x86/include/asm/summit/apic.h | 2 +- arch/x86/kernel/cpu/addon_cpuid_features.c | 6 +++--- arch/x86/kernel/cpu/common.c | 8 ++++---- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 13 files changed, 17 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index b7cba5b5635..1230f5d7a38 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -133,7 +133,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, return BAD_APICID; } -static inline int phys_pkg_id(int cpuid_apic, int index_msb) +static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index d2c6c202e8b..f183dfb4de4 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -221,7 +221,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, return apicid; } -static inline int phys_pkg_id(int cpuid_apic, int index_msb) +static inline int es7000_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 55797a35150..d0605281a6b 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -21,7 +21,6 @@ static inline const struct cpumask *default_target_cpus(void) #include #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) -#define phys_pkg_id (apic->phys_pkg_id) #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) #define send_IPI_self (apic->send_IPI_self) #define wakeup_secondary_cpu (apic->wakeup_cpu) @@ -65,7 +64,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, return (unsigned int)(mask1 & mask2 & mask3); } -static inline int phys_pkg_id(int cpuid_apic, int index_msb) +static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 6fed521585c..1eeb5b61e48 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -5,7 +5,6 @@ #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) -#define phys_pkg_id (apic->phys_pkg_id) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void generic_bigsmp_probe(void); diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index bc2c8a425c0..765c4d5124c 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -113,7 +113,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, } /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ -static inline int phys_pkg_id(int cpuid_apic, int index_msb) +static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 64cd441ae00..fa6b3b45290 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -182,7 +182,7 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, * * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. */ -static inline int phys_pkg_id(int cpuid_apic, int index_msb) +static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) { return hard_smp_processor_id() >> index_msb; } diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 84f8e4a5aef..e8bb892c09f 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -116,13 +116,13 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; - c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) + c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) & core_select_mask; - c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); + c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width); /* * Reinit the apicid, now that we have extended initial_apicid. */ - c->apicid = phys_pkg_id(c->initial_apicid, 0); + c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c->x86_max_cores = (core_level_siblings / smp_num_siblings); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 93c491c4fe7..055b9c3a660 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -442,7 +442,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) } index_msb = get_count_order(smp_num_siblings); - c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); + c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); smp_num_siblings = smp_num_siblings / c->x86_max_cores; @@ -450,7 +450,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) core_bits = get_count_order(c->x86_max_cores); - c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & + c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); } @@ -686,7 +686,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; #ifdef CONFIG_X86_32 # ifdef CONFIG_X86_HT - c->apicid = phys_pkg_id(c->initial_apicid, 0); + c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); # else c->apicid = c->initial_apicid; # endif @@ -733,7 +733,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) this_cpu->c_identify(c); #ifdef CONFIG_X86_64 - c->apicid = phys_pkg_id(c->initial_apicid, 0); + c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); #endif /* diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index e151b472456..d04b38954df 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -87,7 +87,7 @@ struct genapic apic_bigsmp = { .setup_portio_remap = NULL, .check_phys_apicid_present = bigsmp_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = bigsmp_phys_pkg_id, .mps_oem_check = mps_oem_check, .get_apic_id = get_apic_id, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index ac6be195b97..5c9266f756e 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -68,7 +68,7 @@ struct genapic apic_default = { .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = default_phys_pkg_id, .mps_oem_check = mps_oem_check, .get_apic_id = get_apic_id, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 1185964b7a3..52787e34c9c 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -122,7 +122,7 @@ struct genapic apic_es7000 = { .setup_portio_remap = NULL, .check_phys_apicid_present = es7000_check_phys_apicid_present, .enable_apic_mode = es7000_enable_apic_mode, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = es7000_phys_pkg_id, .mps_oem_check = mps_oem_check, .get_apic_id = get_apic_id, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 8d3358de3fe..6a1134e6d72 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -87,7 +87,7 @@ struct genapic apic_numaq = { .setup_portio_remap = numaq_setup_portio_remap, .check_phys_apicid_present = numaq_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = numaq_phys_pkg_id, .mps_oem_check = mps_oem_check, .get_apic_id = get_apic_id, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index cb83bcbb2de..2d6843a61d9 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -67,7 +67,7 @@ struct genapic apic_summit = { .setup_portio_remap = NULL, .check_phys_apicid_present = summit_check_phys_apicid_present, .enable_apic_mode = NULL, - .phys_pkg_id = phys_pkg_id, + .phys_pkg_id = summit_phys_pkg_id, .mps_oem_check = mps_oem_check, .get_apic_id = get_apic_id, -- cgit v1.2.3 From 5f836405ef632ba82f4a5261ff2be4198e53b51b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 13:47:42 +0100 Subject: x86, smp: clean up mps_oem_check() Impact: cleanup - allow NULL ->mps_oem_check() entries - clean up the code flow Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 3 +-- arch/x86/mach-generic/probe.c | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 14b19de8cd0..8bb1c73c55b 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -55,8 +55,7 @@ struct genapic { * is switched to this. Essentially they are additional * probe functions: */ - int (*mps_oem_check)(struct mpc_table *mpc, char *oem, - char *productid); + int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); unsigned int (*get_apic_id)(unsigned long x); unsigned long (*set_apic_id)(unsigned int id); diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index a21e2b1a701..799a70f4d90 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -113,17 +113,21 @@ void __init generic_apic_probe(void) int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { int i; + for (i = 0; apic_probe[i]; ++i) { - if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) { - if (!cmdline_apic) { - apic = apic_probe[i]; - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - apic->name); - } - return 1; + if (!apic_probe[i]->mps_oem_check) + continue; + if (!apic_probe[i]->mps_oem_check(mpc, oem, productid)) + continue; + + if (!cmdline_apic) { + apic = apic_probe[i]; + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); + printk(KERN_INFO "Switched to APIC driver `%s'.\n", + apic->name); } + return 1; } return 0; } -- cgit v1.2.3 From 1322a2e2db87c938d8381f8501af9a4d0eab8bc7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 13:54:56 +0100 Subject: x86, mpparse: call the generic quirk handlers early Call all the registered MPS quirk handlers early. These methods scan low RAM typically for specific signatures so are safe to be called early. Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index c8a534a16d9..f6fb1928439 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -292,16 +292,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) return 0; #ifdef CONFIG_X86_32 - /* - * need to make sure summit and es7000's mps_oem_check is safe to be - * called early via genericarch 's mps_oem_check - */ - if (early) { -#ifdef CONFIG_X86_NUMAQ - numaq_mps_oem_check(mpc, oem, str); -#endif - } else - mps_oem_check(mpc, oem, str); + mps_oem_check(mpc, oem, str); #endif /* save the local APIC address, it might be non-default */ if (!acpi_lapic) -- cgit v1.2.3 From 9c7642470ecf03d8b4946a2addc8fe631b8426dd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 13:44:32 +0100 Subject: x86: consolidate the ->mps_oem_check() code - spread out the mps_oem_check() namespace on a per APIC driver basis Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/mpparse.h | 6 ------ arch/x86/include/asm/mach-default/mach_mpparse.h | 2 +- arch/x86/include/asm/mach-generic/mach_mpparse.h | 3 +-- arch/x86/include/asm/summit/mpparse.h | 4 ++-- arch/x86/kernel/mpparse.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 5 +++-- arch/x86/mach-generic/numaq.c | 4 ++-- arch/x86/mach-generic/probe.c | 3 ++- arch/x86/mach-generic/summit.c | 2 +- 11 files changed, 15 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h index 30692c4ae85..662eb1e574d 100644 --- a/arch/x86/include/asm/es7000/mpparse.h +++ b/arch/x86/include/asm/es7000/mpparse.h @@ -8,13 +8,7 @@ extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); extern void setup_unisys(void); -#ifndef CONFIG_X86_GENERICARCH -extern int default_acpi_madt_oem_check(char *oem_id, char *oem_table_id); -extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid); -#endif - #ifdef CONFIG_ACPI - static inline int es7000_check_dsdt(void) { struct acpi_table_header header; diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h index 8fa01770ba6..af0da140df9 100644 --- a/arch/x86/include/asm/mach-default/mach_mpparse.h +++ b/arch/x86/include/asm/mach-default/mach_mpparse.h @@ -2,7 +2,7 @@ #define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H static inline int -mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { return 0; } diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h index f497d96c76b..22bfb56f8fb 100644 --- a/arch/x86/include/asm/mach-generic/mach_mpparse.h +++ b/arch/x86/include/asm/mach-generic/mach_mpparse.h @@ -1,8 +1,7 @@ #ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H #define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H - -extern int mps_oem_check(struct mpc_table *, char *, char *); +extern int generic_mps_oem_check(struct mpc_table *, char *, char *); extern int default_acpi_madt_oem_check(char *, char *); diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h index 555ed8238e9..4bbcce39acb 100644 --- a/arch/x86/include/asm/summit/mpparse.h +++ b/arch/x86/include/asm/summit/mpparse.h @@ -11,8 +11,8 @@ extern void setup_summit(void); #define setup_summit() {} #endif -static inline int mps_oem_check(struct mpc_table *mpc, char *oem, - char *productid) +static inline int +summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { if (!strncmp(oem, "IBM ENSW", 8) && (!strncmp(productid, "VIGIL SMP", 9) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index f6fb1928439..b12fa5ce6f5 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -292,7 +292,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) return 0; #ifdef CONFIG_X86_32 - mps_oem_check(mpc, oem, str); + generic_mps_oem_check(mpc, oem, str); #endif /* save the local APIC address, it might be non-default */ if (!acpi_lapic) diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index d04b38954df..6bf6aafeb2c 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -88,7 +88,7 @@ struct genapic apic_bigsmp = { .check_phys_apicid_present = bigsmp_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = bigsmp_phys_pkg_id, - .mps_oem_check = mps_oem_check, + .mps_oem_check = NULL, .get_apic_id = get_apic_id, .set_apic_id = NULL, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 5c9266f756e..e5f85cd75b4 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -69,7 +69,7 @@ struct genapic apic_default = { .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = default_phys_pkg_id, - .mps_oem_check = mps_oem_check, + .mps_oem_check = NULL, .get_apic_id = get_apic_id, .set_apic_id = NULL, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 52787e34c9c..f861163cd39 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -36,11 +36,12 @@ static int probe_es7000(void) } static __init int -mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { if (mpc->oemptr) { struct mpc_oemtable *oem_table = (struct mpc_oemtable *)mpc->oemptr; + if (!strncmp(oem, "UNISYS", 6)) return parse_unisys_oem((char *)oem_table); } @@ -123,7 +124,7 @@ struct genapic apic_es7000 = { .check_phys_apicid_present = es7000_check_phys_apicid_present, .enable_apic_mode = es7000_enable_apic_mode, .phys_pkg_id = es7000_phys_pkg_id, - .mps_oem_check = mps_oem_check, + .mps_oem_check = es7000_mps_oem_check, .get_apic_id = get_apic_id, .set_apic_id = NULL, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 6a1134e6d72..517882c9c15 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -19,7 +19,7 @@ #include #include -static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { numaq_mps_oem_check(mpc, oem, productid); return found_numaq; @@ -88,7 +88,7 @@ struct genapic apic_numaq = { .check_phys_apicid_present = numaq_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = numaq_phys_pkg_id, - .mps_oem_check = mps_oem_check, + .mps_oem_check = __numaq_mps_oem_check, .get_apic_id = get_apic_id, .set_apic_id = NULL, diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index 799a70f4d90..ab68c6e5c48 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -110,7 +110,8 @@ void __init generic_apic_probe(void) /* These functions can switch the APIC even after the initial ->probe() */ -int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +int __init +generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { int i; diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 2d6843a61d9..719e944ff30 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -68,7 +68,7 @@ struct genapic apic_summit = { .check_phys_apicid_present = summit_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = summit_phys_pkg_id, - .mps_oem_check = mps_oem_check, + .mps_oem_check = summit_mps_oem_check, .get_apic_id = get_apic_id, .set_apic_id = NULL, -- cgit v1.2.3 From ca6c8ed4646f8ccaa4f7db618bf69b8b8fb49767 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 14:08:38 +0100 Subject: x86, apic: refactor ->get_apic_id() & GET_APIC_ID() - spread out the namespace on a per driver basis - get rid of macro wrappers - small cleanups Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apicdef.h | 6 ++---- arch/x86/include/asm/es7000/apicdef.h | 6 ++---- arch/x86/include/asm/mach-default/mach_apic.h | 2 +- arch/x86/include/asm/mach-default/mach_apicdef.h | 10 +++++----- arch/x86/include/asm/mach-generic/mach_apicdef.h | 1 - arch/x86/include/asm/numaq/apicdef.h | 7 ++----- arch/x86/include/asm/smp.h | 2 +- arch/x86/include/asm/summit/apicdef.h | 6 ++---- arch/x86/kernel/genapic_flat_64.c | 9 +++++---- arch/x86/kernel/genx2apic_cluster.c | 4 ++-- arch/x86/kernel/genx2apic_phys.c | 4 ++-- arch/x86/kernel/genx2apic_uv_x.c | 6 +++--- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 17 files changed, 32 insertions(+), 41 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h index 392c3f5ef2f..ed25dd6503b 100644 --- a/arch/x86/include/asm/bigsmp/apicdef.h +++ b/arch/x86/include/asm/bigsmp/apicdef.h @@ -3,11 +3,9 @@ #define APIC_ID_MASK (0xFF<<24) -static inline unsigned get_apic_id(unsigned long x) +static inline unsigned bigsmp_get_apic_id(unsigned long x) { - return (((x)>>24)&0xFF); + return (x >> 24) & 0xFF; } -#define GET_APIC_ID(x) get_apic_id(x) - #endif diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h index 8b234a3cb85..e23791762a1 100644 --- a/arch/x86/include/asm/es7000/apicdef.h +++ b/arch/x86/include/asm/es7000/apicdef.h @@ -3,11 +3,9 @@ #define APIC_ID_MASK (0xFF<<24) -static inline unsigned get_apic_id(unsigned long x) +static inline unsigned int es7000_get_apic_id(unsigned long x) { - return (((x)>>24)&0xFF); + return (x >> 24) & 0xFF; } -#define GET_APIC_ID(x) get_apic_id(x) - #endif diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index d0605281a6b..8719208f273 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -21,7 +21,7 @@ static inline const struct cpumask *default_target_cpus(void) #include #define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) #define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) -#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) +#define read_apic_id() (apic->get_apic_id(apic_read(APIC_ID))) #define send_IPI_self (apic->send_IPI_self) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void default_setup_apic_routing(void); diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h index b4dcc0971c7..e84d437ba2b 100644 --- a/arch/x86/include/asm/mach-default/mach_apicdef.h +++ b/arch/x86/include/asm/mach-default/mach_apicdef.h @@ -5,20 +5,20 @@ #ifdef CONFIG_X86_64 #define APIC_ID_MASK (apic->apic_id_mask) -#define GET_APIC_ID(x) (apic->get_apic_id(x)) #define SET_APIC_ID(x) (apic->set_apic_id(x)) #else #define APIC_ID_MASK (0xF<<24) -static inline unsigned get_apic_id(unsigned long x) + +static inline unsigned default_get_apic_id(unsigned long x) { unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); + if (APIC_XAPIC(ver)) - return (((x)>>24)&0xFF); + return (x >> 24) & 0xFF; else - return (((x)>>24)&0xF); + return (x >> 24) & 0x0F; } -#define GET_APIC_ID(x) get_apic_id(x) #endif #endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h index acc9adddb34..645520bcd2c 100644 --- a/arch/x86/include/asm/mach-generic/mach_apicdef.h +++ b/arch/x86/include/asm/mach-generic/mach_apicdef.h @@ -4,7 +4,6 @@ #ifndef APIC_DEFINITION #include -#define GET_APIC_ID (apic->get_apic_id) #define APIC_ID_MASK (apic->apic_id_mask) #endif diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h index e012a46cc22..29f5e3d34e5 100644 --- a/arch/x86/include/asm/numaq/apicdef.h +++ b/arch/x86/include/asm/numaq/apicdef.h @@ -1,14 +1,11 @@ #ifndef __ASM_NUMAQ_APICDEF_H #define __ASM_NUMAQ_APICDEF_H - #define APIC_ID_MASK (0xF<<24) -static inline unsigned get_apic_id(unsigned long x) +static inline unsigned int numaq_get_apic_id(unsigned long x) { - return (((x)>>24)&0x0F); + return (x >> 24) & 0x0F; } -#define GET_APIC_ID(x) get_apic_id(x) - #endif diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 45ef8a1b9d7..c63d480802a 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -189,7 +189,7 @@ static inline unsigned int read_apic_id(void) reg = *(u32 *)(APIC_BASE + APIC_ID); - return GET_APIC_ID(reg); + return apic->get_apic_id(reg); } #endif diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h index f3fbca1f61c..4286528af7c 100644 --- a/arch/x86/include/asm/summit/apicdef.h +++ b/arch/x86/include/asm/summit/apicdef.h @@ -3,11 +3,9 @@ #define APIC_ID_MASK (0xFF<<24) -static inline unsigned get_apic_id(unsigned long x) +static inline unsigned summit_get_apic_id(unsigned long x) { - return (x>>24)&0xFF; + return (x >> 24) & 0xFF; } -#define GET_APIC_ID(x) get_apic_id(x) - #endif diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index cc9e07b9609..ab47091dac2 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -126,11 +126,12 @@ static void flat_send_IPI_all(int vector) __send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical); } -static unsigned int get_apic_id(unsigned long x) +static unsigned int flat_get_apic_id(unsigned long x) { unsigned int id; id = (((x)>>24) & 0xFFu); + return id; } @@ -146,7 +147,7 @@ static unsigned int read_xapic_id(void) { unsigned int id; - id = get_apic_id(apic_read(APIC_ID)); + id = flat_get_apic_id(apic_read(APIC_ID)); return id; } @@ -205,7 +206,7 @@ struct genapic apic_flat = { .phys_pkg_id = flat_phys_pkg_id, .mps_oem_check = NULL, - .get_apic_id = get_apic_id, + .get_apic_id = flat_get_apic_id, .set_apic_id = set_apic_id, .apic_id_mask = 0xFFu << 24, @@ -349,7 +350,7 @@ struct genapic apic_physflat = { .phys_pkg_id = flat_phys_pkg_id, .mps_oem_check = NULL, - .get_apic_id = get_apic_id, + .get_apic_id = flat_get_apic_id, .set_apic_id = set_apic_id, .apic_id_mask = 0xFFu<<24, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 18b6f14376e..c7557e05184 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -141,7 +141,7 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, return BAD_APICID; } -static unsigned int get_apic_id(unsigned long x) +static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x) { unsigned int id; @@ -207,7 +207,7 @@ struct genapic apic_x2apic_cluster = { .phys_pkg_id = x2apic_cluster_phys_pkg_id, .mps_oem_check = NULL, - .get_apic_id = get_apic_id, + .get_apic_id = x2apic_cluster_phys_get_apic_id, .set_apic_id = set_apic_id, .apic_id_mask = 0xFFFFFFFFu, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 2cb6f49e4c5..80cba49cfd8 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -140,7 +140,7 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, return BAD_APICID; } -static unsigned int get_apic_id(unsigned long x) +static unsigned int x2apic_phys_get_apic_id(unsigned long x) { unsigned int id; @@ -203,7 +203,7 @@ struct genapic apic_x2apic_phys = { .phys_pkg_id = x2apic_phys_pkg_id, .mps_oem_check = NULL, - .get_apic_id = get_apic_id, + .get_apic_id = x2apic_phys_get_apic_id, .set_apic_id = set_apic_id, .apic_id_mask = 0xFFFFFFFFu, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 67e7658775e..50310b96adc 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -201,7 +201,7 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, return BAD_APICID; } -static unsigned int get_apic_id(unsigned long x) +static unsigned int x2apic_get_apic_id(unsigned long x) { unsigned int id; @@ -223,7 +223,7 @@ static unsigned long set_apic_id(unsigned int id) static unsigned int uv_read_apic_id(void) { - return get_apic_id(apic_read(APIC_ID)); + return x2apic_get_apic_id(apic_read(APIC_ID)); } static int uv_phys_pkg_id(int initial_apicid, int index_msb) @@ -268,7 +268,7 @@ struct genapic apic_x2apic_uv_x = { .phys_pkg_id = uv_phys_pkg_id, .mps_oem_check = NULL, - .get_apic_id = get_apic_id, + .get_apic_id = x2apic_get_apic_id, .set_apic_id = set_apic_id, .apic_id_mask = 0xFFFFFFFFu, diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 6bf6aafeb2c..9eca977227c 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -90,7 +90,7 @@ struct genapic apic_bigsmp = { .phys_pkg_id = bigsmp_phys_pkg_id, .mps_oem_check = NULL, - .get_apic_id = get_apic_id, + .get_apic_id = bigsmp_get_apic_id, .set_apic_id = NULL, .apic_id_mask = APIC_ID_MASK, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index e5f85cd75b4..d51a3f0335a 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -71,7 +71,7 @@ struct genapic apic_default = { .phys_pkg_id = default_phys_pkg_id, .mps_oem_check = NULL, - .get_apic_id = get_apic_id, + .get_apic_id = default_get_apic_id, .set_apic_id = NULL, .apic_id_mask = APIC_ID_MASK, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index f861163cd39..1944675db62 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -126,7 +126,7 @@ struct genapic apic_es7000 = { .phys_pkg_id = es7000_phys_pkg_id, .mps_oem_check = es7000_mps_oem_check, - .get_apic_id = get_apic_id, + .get_apic_id = es7000_get_apic_id, .set_apic_id = NULL, .apic_id_mask = APIC_ID_MASK, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 517882c9c15..fcbba84c090 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -90,7 +90,7 @@ struct genapic apic_numaq = { .phys_pkg_id = numaq_phys_pkg_id, .mps_oem_check = __numaq_mps_oem_check, - .get_apic_id = get_apic_id, + .get_apic_id = numaq_get_apic_id, .set_apic_id = NULL, .apic_id_mask = APIC_ID_MASK, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 719e944ff30..5650eaf9061 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -70,7 +70,7 @@ struct genapic apic_summit = { .phys_pkg_id = summit_phys_pkg_id, .mps_oem_check = summit_mps_oem_check, - .get_apic_id = get_apic_id, + .get_apic_id = summit_get_apic_id, .set_apic_id = NULL, .apic_id_mask = APIC_ID_MASK, -- cgit v1.2.3 From 5b8127277bc4cdca78eda5ee900a314642822ace Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 14:59:17 +0100 Subject: x86, apic: refactor ->apic_id_mask & APIC_ID_MASK - spread out the namespace on a per driver basis - get rid of wrapper macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apicdef.h | 2 +- arch/x86/include/asm/es7000/apicdef.h | 2 +- arch/x86/include/asm/mach-default/mach_apicdef.h | 3 +-- arch/x86/include/asm/mach-generic/mach_apicdef.h | 2 -- arch/x86/include/asm/numaq/apicdef.h | 2 +- arch/x86/include/asm/summit/apicdef.h | 2 +- arch/x86/kernel/apic.c | 4 ++-- arch/x86/kernel/genapic_flat_64.c | 2 +- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 13 files changed, 13 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h index ed25dd6503b..6e587818c7e 100644 --- a/arch/x86/include/asm/bigsmp/apicdef.h +++ b/arch/x86/include/asm/bigsmp/apicdef.h @@ -1,7 +1,7 @@ #ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H -#define APIC_ID_MASK (0xFF<<24) +#define BIGSMP_APIC_ID_MASK (0xFF<<24) static inline unsigned bigsmp_get_apic_id(unsigned long x) { diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h index e23791762a1..476da0c7f5c 100644 --- a/arch/x86/include/asm/es7000/apicdef.h +++ b/arch/x86/include/asm/es7000/apicdef.h @@ -1,7 +1,7 @@ #ifndef __ASM_ES7000_APICDEF_H #define __ASM_ES7000_APICDEF_H -#define APIC_ID_MASK (0xFF<<24) +#define ES7000_APIC_ID_MASK (0xFF<<24) static inline unsigned int es7000_get_apic_id(unsigned long x) { diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h index e84d437ba2b..8318d121ea6 100644 --- a/arch/x86/include/asm/mach-default/mach_apicdef.h +++ b/arch/x86/include/asm/mach-default/mach_apicdef.h @@ -4,10 +4,9 @@ #include #ifdef CONFIG_X86_64 -#define APIC_ID_MASK (apic->apic_id_mask) #define SET_APIC_ID(x) (apic->set_apic_id(x)) #else -#define APIC_ID_MASK (0xF<<24) +#define DEFAULT_APIC_ID_MASK (0x0F<<24) static inline unsigned default_get_apic_id(unsigned long x) { diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h index 645520bcd2c..61caa65b13f 100644 --- a/arch/x86/include/asm/mach-generic/mach_apicdef.h +++ b/arch/x86/include/asm/mach-generic/mach_apicdef.h @@ -3,8 +3,6 @@ #ifndef APIC_DEFINITION #include - -#define APIC_ID_MASK (apic->apic_id_mask) #endif #endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */ diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h index 29f5e3d34e5..6f2cc5df0b1 100644 --- a/arch/x86/include/asm/numaq/apicdef.h +++ b/arch/x86/include/asm/numaq/apicdef.h @@ -1,7 +1,7 @@ #ifndef __ASM_NUMAQ_APICDEF_H #define __ASM_NUMAQ_APICDEF_H -#define APIC_ID_MASK (0xF<<24) +#define NUMAQ_APIC_ID_MASK (0xF<<24) static inline unsigned int numaq_get_apic_id(unsigned long x) { diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h index 4286528af7c..0373f0c7b5d 100644 --- a/arch/x86/include/asm/summit/apicdef.h +++ b/arch/x86/include/asm/summit/apicdef.h @@ -1,7 +1,7 @@ #ifndef __ASM_SUMMIT_APICDEF_H #define __ASM_SUMMIT_APICDEF_H -#define APIC_ID_MASK (0xFF<<24) +#define SUMMIT_APIC_ID_MASK (0xFF<<24) static inline unsigned summit_get_apic_id(unsigned long x) { diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 9d6374da478..5f7f3a9a47a 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1009,11 +1009,11 @@ int __init verify_local_APIC(void) */ reg0 = apic_read(APIC_ID); apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); - apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); + apic_write(APIC_ID, reg0 ^ apic->apic_id_mask); reg1 = apic_read(APIC_ID); apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); apic_write(APIC_ID, reg0); - if (reg1 != (reg0 ^ APIC_ID_MASK)) + if (reg1 != (reg0 ^ apic->apic_id_mask)) return 0; /* diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index ab47091dac2..78baa55cd0e 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -352,7 +352,7 @@ struct genapic apic_physflat = { .get_apic_id = flat_get_apic_id, .set_apic_id = set_apic_id, - .apic_id_mask = 0xFFu<<24, + .apic_id_mask = 0xFFu << 24, .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 9eca977227c..1f4ad4f7702 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -92,7 +92,7 @@ struct genapic apic_bigsmp = { .get_apic_id = bigsmp_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = APIC_ID_MASK, + .apic_id_mask = BIGSMP_APIC_ID_MASK, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index d51a3f0335a..239af25615b 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -73,7 +73,7 @@ struct genapic apic_default = { .get_apic_id = default_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = APIC_ID_MASK, + .apic_id_mask = DEFAULT_APIC_ID_MASK, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 1944675db62..21fb33eea91 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -128,7 +128,7 @@ struct genapic apic_es7000 = { .get_apic_id = es7000_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = APIC_ID_MASK, + .apic_id_mask = ES7000_APIC_ID_MASK, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index fcbba84c090..27d2d1f2d6f 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -92,7 +92,7 @@ struct genapic apic_numaq = { .get_apic_id = numaq_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = APIC_ID_MASK, + .apic_id_mask = NUMAQ_APIC_ID_MASK, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 5650eaf9061..f24cba1b29d 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -72,7 +72,7 @@ struct genapic apic_summit = { .get_apic_id = summit_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = APIC_ID_MASK, + .apic_id_mask = SUMMIT_APIC_ID_MASK, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, -- cgit v1.2.3 From 94af18755266edf46803564414d74f9621aaded8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 15:08:53 +0100 Subject: x86, apic: get rid of *_APIC_ID_MASK definitions Impact: cleanup Remove the *_APIC_ID_MASK subarch definitions and move them straight to the genapic driver initialization code. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apicdef.h | 2 -- arch/x86/include/asm/es7000/apicdef.h | 2 -- arch/x86/include/asm/mach-default/mach_apicdef.h | 1 - arch/x86/include/asm/numaq/apicdef.h | 2 -- arch/x86/include/asm/summit/apicdef.h | 2 -- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 10 files changed, 5 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h index 6e587818c7e..e58dee84757 100644 --- a/arch/x86/include/asm/bigsmp/apicdef.h +++ b/arch/x86/include/asm/bigsmp/apicdef.h @@ -1,8 +1,6 @@ #ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H -#define BIGSMP_APIC_ID_MASK (0xFF<<24) - static inline unsigned bigsmp_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h index 476da0c7f5c..c74881a7b3d 100644 --- a/arch/x86/include/asm/es7000/apicdef.h +++ b/arch/x86/include/asm/es7000/apicdef.h @@ -1,8 +1,6 @@ #ifndef __ASM_ES7000_APICDEF_H #define __ASM_ES7000_APICDEF_H -#define ES7000_APIC_ID_MASK (0xFF<<24) - static inline unsigned int es7000_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h index 8318d121ea6..5141085962d 100644 --- a/arch/x86/include/asm/mach-default/mach_apicdef.h +++ b/arch/x86/include/asm/mach-default/mach_apicdef.h @@ -6,7 +6,6 @@ #ifdef CONFIG_X86_64 #define SET_APIC_ID(x) (apic->set_apic_id(x)) #else -#define DEFAULT_APIC_ID_MASK (0x0F<<24) static inline unsigned default_get_apic_id(unsigned long x) { diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h index 6f2cc5df0b1..cd927d5bd50 100644 --- a/arch/x86/include/asm/numaq/apicdef.h +++ b/arch/x86/include/asm/numaq/apicdef.h @@ -1,8 +1,6 @@ #ifndef __ASM_NUMAQ_APICDEF_H #define __ASM_NUMAQ_APICDEF_H -#define NUMAQ_APIC_ID_MASK (0xF<<24) - static inline unsigned int numaq_get_apic_id(unsigned long x) { return (x >> 24) & 0x0F; diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h index 0373f0c7b5d..c24b0df2dec 100644 --- a/arch/x86/include/asm/summit/apicdef.h +++ b/arch/x86/include/asm/summit/apicdef.h @@ -1,8 +1,6 @@ #ifndef __ASM_SUMMIT_APICDEF_H #define __ASM_SUMMIT_APICDEF_H -#define SUMMIT_APIC_ID_MASK (0xFF<<24) - static inline unsigned summit_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 1f4ad4f7702..ee52c59aa3a 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -92,7 +92,7 @@ struct genapic apic_bigsmp = { .get_apic_id = bigsmp_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = BIGSMP_APIC_ID_MASK, + .apic_id_mask = 0xFF << 24, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 239af25615b..e4ed7e6d626 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -73,7 +73,7 @@ struct genapic apic_default = { .get_apic_id = default_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = DEFAULT_APIC_ID_MASK, + .apic_id_mask = 0x0F << 24, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 21fb33eea91..3d046dec0c9 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -128,7 +128,7 @@ struct genapic apic_es7000 = { .get_apic_id = es7000_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = ES7000_APIC_ID_MASK, + .apic_id_mask = 0xFF << 24, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 27d2d1f2d6f..a7bf1aa02e1 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -92,7 +92,7 @@ struct genapic apic_numaq = { .get_apic_id = numaq_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = NUMAQ_APIC_ID_MASK, + .apic_id_mask = 0x0F << 24, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index f24cba1b29d..a0ae6b91048 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -72,7 +72,7 @@ struct genapic apic_summit = { .get_apic_id = summit_get_apic_id, .set_apic_id = NULL, - .apic_id_mask = SUMMIT_APIC_ID_MASK, + .apic_id_mask = 0xFF << 24, .cpu_mask_to_apicid = cpu_mask_to_apicid, .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, -- cgit v1.2.3 From debccb3e77be52cfc26c5a99e123c114c5c72aeb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 15:20:18 +0100 Subject: x86, apic: refactor ->cpu_mask_to_apicid*() - spread out the namespace on a per driver basis - clean up the functions - get rid of macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 17 +++++++--------- arch/x86/include/asm/es7000/apic.h | 29 +++++++++++++++++---------- arch/x86/include/asm/mach-default/mach_apic.h | 10 ++++----- arch/x86/include/asm/mach-generic/mach_apic.h | 2 -- arch/x86/include/asm/numaq/apic.h | 11 +++++----- arch/x86/include/asm/summit/apic.h | 21 +++++++++++-------- arch/x86/kernel/genapic_flat_64.c | 4 +++- arch/x86/kernel/genx2apic_cluster.c | 15 ++++++++------ arch/x86/kernel/genx2apic_phys.c | 15 ++++++++------ arch/x86/kernel/genx2apic_uv_x.c | 14 +++++++------ arch/x86/kernel/io_apic.c | 21 ++++++++++--------- arch/x86/mach-generic/bigsmp.c | 4 ++-- arch/x86/mach-generic/default.c | 4 ++-- arch/x86/mach-generic/es7000.c | 6 +++--- arch/x86/mach-generic/numaq.c | 4 ++-- arch/x86/mach-generic/summit.c | 4 ++-- 16 files changed, 101 insertions(+), 80 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 1230f5d7a38..ee29d66cd30 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -105,18 +105,14 @@ static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) } /* As we are using single CPU as destination, pick only one CPU here */ -static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) +static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) { - int cpu; - int apicid; - - cpu = first_cpu(*cpumask); - apicid = bigsmp_cpu_to_logical_apicid(cpu); - return apicid; + return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); } -static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) +static inline unsigned int +bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -124,9 +120,10 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - for_each_cpu_and(cpu, cpumask, andmask) + for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) break; + } if (cpu < nr_cpu_ids) return bigsmp_cpu_to_logical_apicid(cpu); diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index f183dfb4de4..b89b45db735 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -137,12 +137,12 @@ static inline int es7000_check_phys_apicid_present(int cpu_physical_apicid) } static inline unsigned int -cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) +es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) { - int num_bits_set; int cpus_found = 0; - int cpu; + int num_bits_set; int apicid; + int cpu; num_bits_set = cpumask_weight(cpumask); /* Return id to all */ @@ -154,12 +154,15 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) */ cpu = cpumask_first(cpumask); apicid = es7000_cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { if (cpumask_test_cpu(cpu, cpumask)) { int new_apicid = es7000_cpu_to_logical_apicid(cpu); + if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ + apicid_cluster(new_apicid)) { printk ("%s: Not a valid mask!\n", __func__); + return 0xFF; } apicid = new_apicid; @@ -170,12 +173,12 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) return apicid; } -static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) +static inline unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) { - int num_bits_set; int cpus_found = 0; - int cpu; + int num_bits_set; int apicid; + int cpu; num_bits_set = cpus_weight(*cpumask); /* Return id to all */ @@ -190,9 +193,11 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) while (cpus_found < num_bits_set) { if (cpu_isset(cpu, *cpumask)) { int new_apicid = es7000_cpu_to_logical_apicid(cpu); + if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ + apicid_cluster(new_apicid)) { printk ("%s: Not a valid mask!\n", __func__); + return es7000_cpu_to_logical_apicid(0); } apicid = new_apicid; @@ -204,8 +209,9 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) } -static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask) +static inline unsigned int +es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, + const struct cpumask *andmask) { int apicid = es7000_cpu_to_logical_apicid(0); cpumask_var_t cpumask; @@ -215,9 +221,10 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, cpumask_and(cpumask, inmask, andmask); cpumask_and(cpumask, cpumask, cpu_online_mask); - apicid = cpu_mask_to_apicid(cpumask); + apicid = es7000_cpu_mask_to_apicid(cpumask); free_cpumask_var(cpumask); + return apicid; } diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 8719208f273..8972f843414 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -19,8 +19,6 @@ static inline const struct cpumask *default_target_cpus(void) #ifdef CONFIG_X86_64 #include -#define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) -#define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) #define read_apic_id() (apic->get_apic_id(apic_read(APIC_ID))) #define send_IPI_self (apic->send_IPI_self) #define wakeup_secondary_cpu (apic->wakeup_cpu) @@ -49,13 +47,15 @@ static inline int default_apic_id_registered(void) return physid_isset(read_apic_id(), phys_cpu_present_map); } -static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask) +static inline unsigned int +default_cpu_mask_to_apicid(const struct cpumask *cpumask) { return cpumask_bits(cpumask)[0]; } -static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) +static inline unsigned int +default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { unsigned long mask1 = cpumask_bits(cpumask)[0]; unsigned long mask2 = cpumask_bits(andmask)[0]; diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 1eeb5b61e48..ca460e45991 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,8 +3,6 @@ #include -#define cpu_mask_to_apicid (apic->cpu_mask_to_apicid) -#define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void generic_bigsmp_probe(void); diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 765c4d5124c..ce95e79f723 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -101,15 +101,16 @@ static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) * We use physical apicids here, not logical, so just return the default * physical broadcast to stop people from breaking us */ -static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) +static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) { - return (int) 0xF; + return 0x0F; } -static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) +static inline unsigned int +numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { - return (int) 0xF; + return 0x0F; } /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index fa6b3b45290..15b8dbd19e1 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -125,29 +125,32 @@ static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) return 1; } -static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) +static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) { - int num_bits_set; int cpus_found = 0; - int cpu; + int num_bits_set; int apicid; + int cpu; num_bits_set = cpus_weight(*cpumask); /* Return id to all */ if (num_bits_set >= nr_cpu_ids) - return (int) 0xFF; + return 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of target_cpus(): */ cpu = first_cpu(*cpumask); apicid = summit_cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { if (cpu_isset(cpu, *cpumask)) { int new_apicid = summit_cpu_to_logical_apicid(cpu); + if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ + apicid_cluster(new_apicid)) { printk ("%s: Not a valid mask!\n", __func__); + return 0xFF; } apicid = apicid | new_apicid; @@ -158,8 +161,9 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } -static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask) +static inline unsigned int +summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, + const struct cpumask *andmask) { int apicid = summit_cpu_to_logical_apicid(0); cpumask_var_t cpumask; @@ -169,9 +173,10 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, cpumask_and(cpumask, inmask, andmask); cpumask_and(cpumask, cpumask, cpu_online_mask); - apicid = cpu_mask_to_apicid(cpumask); + apicid = summit_cpu_mask_to_apicid(cpumask); free_cpumask_var(cpumask); + return apicid; } diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 78baa55cd0e..b941b112caf 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -309,11 +309,13 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - for_each_cpu_and(cpu, cpumask, andmask) + for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) break; + } if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); + return BAD_APICID; } diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index c7557e05184..62f9fccf01d 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -111,21 +111,21 @@ static int x2apic_apic_id_registered(void) static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) { - int cpu; - /* * We're using fixed IRQ delivery, can only return one logical APIC ID. * May as well be the first. */ - cpu = cpumask_first(cpumask); + int cpu = cpumask_first(cpumask); + if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_logical_apicid, cpu); else return BAD_APICID; } -static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) +static unsigned int +x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -133,11 +133,14 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one logical APIC ID. * May as well be the first. */ - for_each_cpu_and(cpu, cpumask, andmask) + for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) break; + } + if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_logical_apicid, cpu); + return BAD_APICID; } diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 80cba49cfd8..3da1675b260 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -110,21 +110,21 @@ static int x2apic_apic_id_registered(void) static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) { - int cpu; - /* * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = cpumask_first(cpumask); + int cpu = cpumask_first(cpumask); + if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else return BAD_APICID; } -static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) +static unsigned int +x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -132,11 +132,14 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - for_each_cpu_and(cpu, cpumask, andmask) + for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) break; + } + if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); + return BAD_APICID; } diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 50310b96adc..f957878c21e 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -171,21 +171,21 @@ static void uv_init_apic_ldr(void) static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) { - int cpu; - /* * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = cpumask_first(cpumask); + int cpu = cpumask_first(cpumask); + if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else return BAD_APICID; } -static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) +static unsigned int +uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -193,11 +193,13 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - for_each_cpu_and(cpu, cpumask, andmask) + for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) break; + } if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); + return BAD_APICID; } diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 3d85d3d810b..01a2505d727 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -563,8 +563,9 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); /* - * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid - * of that, or returns BAD_APICID and leaves desc->affinity untouched. + * Either sets desc->affinity to a valid value, and returns + * ->cpu_mask_to_apicid of that, or returns BAD_APICID and + * leaves desc->affinity untouched. */ static unsigned int set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) @@ -582,7 +583,8 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) cpumask_and(desc->affinity, cfg->domain, mask); set_extra_move_desc(desc, mask); - return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask); + + return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask); } static void @@ -1562,7 +1564,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq if (assign_irq_vector(irq, cfg, apic->target_cpus())) return; - dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); + dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " @@ -1666,7 +1668,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, */ entry.dest_mode = apic->irq_dest_mode; entry.mask = 1; /* mask IRQ now */ - entry.dest = cpu_mask_to_apicid(apic->target_cpus()); + entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); entry.delivery_mode = apic->irq_delivery_mode; entry.polarity = 0; entry.trigger = 0; @@ -2367,7 +2369,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) set_extra_move_desc(desc, mask); - dest = cpu_mask_to_apicid_and(cfg->domain, mask); + dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); modify_ioapic_rte = desc->status & IRQ_LEVEL; if (modify_ioapic_rte) { @@ -3270,7 +3272,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms if (err) return err; - dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); + dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); #ifdef CONFIG_INTR_REMAP if (irq_remapped(irq)) { @@ -3708,7 +3710,8 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) struct ht_irq_msg msg; unsigned dest; - dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); + dest = apic->cpu_mask_to_apicid_and(cfg->domain, + apic->target_cpus()); msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); @@ -3773,7 +3776,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, entry->polarity = 0; entry->trigger = 0; entry->mask = 0; - entry->dest = cpu_mask_to_apicid(eligible_cpu); + entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index ee52c59aa3a..22c2c7b8e4a 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -94,8 +94,8 @@ struct genapic apic_bigsmp = { .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, - .cpu_mask_to_apicid = cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, .send_IPI_mask = send_IPI_mask, .send_IPI_mask_allbutself = NULL, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index e4ed7e6d626..477ebec1674 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -75,8 +75,8 @@ struct genapic apic_default = { .set_apic_id = NULL, .apic_id_mask = 0x0F << 24, - .cpu_mask_to_apicid = cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = default_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = send_IPI_mask, .send_IPI_mask_allbutself = NULL, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 3d046dec0c9..d758cf65d73 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -26,7 +26,7 @@ void __init es7000_update_genapic_to_cluster(void) apic->init_apic_ldr = es7000_init_apic_ldr_cluster; - apic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster; + apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster; } static int probe_es7000(void) @@ -130,8 +130,8 @@ struct genapic apic_es7000 = { .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, - .cpu_mask_to_apicid = cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, .send_IPI_mask = send_IPI_mask, .send_IPI_mask_allbutself = NULL, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index a7bf1aa02e1..eb7d56a521d 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -94,8 +94,8 @@ struct genapic apic_numaq = { .set_apic_id = NULL, .apic_id_mask = 0x0F << 24, - .cpu_mask_to_apicid = cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, .send_IPI_mask = send_IPI_mask, .send_IPI_mask_allbutself = NULL, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index a0ae6b91048..8c293055bdf 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -74,8 +74,8 @@ struct genapic apic_summit = { .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, - .cpu_mask_to_apicid = cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = summit_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, .send_IPI_mask = send_IPI_mask, .send_IPI_mask_allbutself = NULL, -- cgit v1.2.3 From dac5f4121df3c39fdb2ea57acd669a0ae19e46f8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 15:42:24 +0100 Subject: x86, apic: untangle the send_IPI_*() jungle Our send_IPI_*() methods and definitions are a twisted mess: the same symbol is defined to different things depending on .config details, in a non-transparent way. - spread out the quirks into separately named per apic driver methods - prefix the standard PC methods with default_ - get rid of wrapper macro obfuscation - clean up various details Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/ipi.h | 16 +++++------ arch/x86/include/asm/es7000/ipi.h | 16 +++++------ arch/x86/include/asm/hw_irq.h | 4 +-- arch/x86/include/asm/ipi.h | 38 +++++++++++++------------ arch/x86/include/asm/mach-default/mach_apic.h | 1 - arch/x86/include/asm/mach-default/mach_ipi.h | 40 ++++++++++++--------------- arch/x86/include/asm/mach-generic/mach_ipi.h | 4 --- arch/x86/include/asm/numaq/ipi.h | 16 +++++------ arch/x86/include/asm/summit/ipi.h | 16 +++++------ arch/x86/kernel/apic.c | 2 +- arch/x86/kernel/genapic_64.c | 2 +- arch/x86/kernel/genapic_flat_64.c | 24 +++++++++------- arch/x86/kernel/genx2apic_cluster.c | 38 +++++++++++++------------ arch/x86/kernel/genx2apic_phys.c | 36 ++++++++++-------------- arch/x86/kernel/genx2apic_uv_x.c | 21 ++++++++------ arch/x86/kernel/io_apic.c | 10 +++---- arch/x86/kernel/ipi.c | 28 ++++++++++--------- arch/x86/kernel/kgdb.c | 2 +- arch/x86/kernel/reboot.c | 2 +- arch/x86/kernel/smp.c | 10 +++---- arch/x86/mach-generic/bigsmp.c | 6 ++-- arch/x86/mach-generic/default.c | 6 ++-- arch/x86/mach-generic/es7000.c | 6 ++-- arch/x86/mach-generic/numaq.c | 6 ++-- arch/x86/mach-generic/summit.c | 6 ++-- arch/x86/mm/tlb.c | 2 +- 26 files changed, 178 insertions(+), 180 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h index 27fcd01b3ae..a91db69cda6 100644 --- a/arch/x86/include/asm/bigsmp/ipi.h +++ b/arch/x86/include/asm/bigsmp/ipi.h @@ -1,22 +1,22 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -void send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); +void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -static inline void send_IPI_mask(const struct cpumask *mask, int vector) +static inline void default_send_IPI_mask(const struct cpumask *mask, int vector) { - send_IPI_mask_sequence(mask, vector); + default_send_IPI_mask_sequence(mask, vector); } -static inline void send_IPI_allbutself(int vector) +static inline void bigsmp_send_IPI_allbutself(int vector) { - send_IPI_mask_allbutself(cpu_online_mask, vector); + default_send_IPI_mask_allbutself(cpu_online_mask, vector); } -static inline void send_IPI_all(int vector) +static inline void bigsmp_send_IPI_all(int vector) { - send_IPI_mask(cpu_online_mask, vector); + default_send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_MACH_IPI_H */ diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h index 7e8ed24d4b8..81e77c812ba 100644 --- a/arch/x86/include/asm/es7000/ipi.h +++ b/arch/x86/include/asm/es7000/ipi.h @@ -1,22 +1,22 @@ #ifndef __ASM_ES7000_IPI_H #define __ASM_ES7000_IPI_H -void send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); +void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -static inline void send_IPI_mask(const struct cpumask *mask, int vector) +static inline void es7000_send_IPI_mask(const struct cpumask *mask, int vector) { - send_IPI_mask_sequence(mask, vector); + default_send_IPI_mask_sequence(mask, vector); } -static inline void send_IPI_allbutself(int vector) +static inline void es7000_send_IPI_allbutself(int vector) { - send_IPI_mask_allbutself(cpu_online_mask, vector); + default_send_IPI_mask_allbutself(cpu_online_mask, vector); } -static inline void send_IPI_all(int vector) +static inline void es7000_send_IPI_all(int vector) { - send_IPI_mask(cpu_online_mask, vector); + es7000_send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_ES7000_IPI_H */ diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 8de644b6b95..bfa921fad13 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -73,9 +73,9 @@ extern void enable_IO_APIC(void); /* IPI functions */ #ifdef CONFIG_X86_32 -extern void send_IPI_self(int vector); +extern void default_send_IPI_self(int vector); #endif -extern void send_IPI(int dest, int vector); +extern void default_send_IPI(int dest, int vector); /* Statistics */ extern atomic_t irq_err_count; diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index c745a306f7d..a8d717f2c7e 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -55,8 +55,9 @@ static inline void __xapic_wait_icr_idle(void) cpu_relax(); } -static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, - unsigned int dest) +static inline void +__default_send_IPI_shortcut(unsigned int shortcut, + int vector, unsigned int dest) { /* * Subtle. In the case of the 'never do double writes' workaround @@ -87,8 +88,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, * This is used to send an IPI with no shorthand notation (the destination is * specified in bits 56 to 63 of the ICR). */ -static inline void __send_IPI_dest_field(unsigned int mask, int vector, - unsigned int dest) +static inline void + __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) { unsigned long cfg; @@ -117,11 +118,11 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, native_apic_mem_write(APIC_ICR, cfg); } -static inline void send_IPI_mask_sequence(const struct cpumask *mask, - int vector) +static inline void +default_send_IPI_mask_sequence(const struct cpumask *mask, int vector) { - unsigned long flags; unsigned long query_cpu; + unsigned long flags; /* * Hack. The clustered APIC addressing mode doesn't allow us to send @@ -130,27 +131,28 @@ static inline void send_IPI_mask_sequence(const struct cpumask *mask, */ local_irq_save(flags); for_each_cpu(query_cpu, mask) { - __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), - vector, APIC_DEST_PHYSICAL); + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } -static inline void send_IPI_mask_allbutself(const struct cpumask *mask, - int vector) +static inline void +default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { - unsigned long flags; - unsigned int query_cpu; unsigned int this_cpu = smp_processor_id(); + unsigned int query_cpu; + unsigned long flags; /* See Hack comment above */ local_irq_save(flags); - for_each_cpu(query_cpu, mask) - if (query_cpu != this_cpu) - __send_IPI_dest_field( - per_cpu(x86_cpu_to_apicid, query_cpu), - vector, APIC_DEST_PHYSICAL); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); + } local_irq_restore(flags); } diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 8972f843414..2e4104cf348 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -20,7 +20,6 @@ static inline const struct cpumask *default_target_cpus(void) #ifdef CONFIG_X86_64 #include #define read_apic_id() (apic->get_apic_id(apic_read(APIC_ID))) -#define send_IPI_self (apic->send_IPI_self) #define wakeup_secondary_cpu (apic->wakeup_cpu) extern void default_setup_apic_routing(void); #else diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h index 089399643df..85dec630c69 100644 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ b/arch/x86/include/asm/mach-default/mach_ipi.h @@ -4,45 +4,40 @@ /* Avoid include hell */ #define NMI_VECTOR 0x02 -void send_IPI_mask_bitmask(const struct cpumask *mask, int vector); -void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -void __send_IPI_shortcut(unsigned int shortcut, int vector); +void default_send_IPI_mask_bitmask(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); +void __default_send_IPI_shortcut(unsigned int shortcut, int vector); extern int no_broadcast; #ifdef CONFIG_X86_64 #include -#define send_IPI_mask (apic->send_IPI_mask) -#define send_IPI_mask_allbutself (apic->send_IPI_mask_allbutself) #else -static inline void send_IPI_mask(const struct cpumask *mask, int vector) +static inline void default_send_IPI_mask(const struct cpumask *mask, int vector) { - send_IPI_mask_bitmask(mask, vector); + default_send_IPI_mask_bitmask(mask, vector); } -void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); #endif -static inline void __local_send_IPI_allbutself(int vector) +static inline void __default_local_send_IPI_allbutself(int vector) { if (no_broadcast || vector == NMI_VECTOR) - send_IPI_mask_allbutself(cpu_online_mask, vector); + apic->send_IPI_mask_allbutself(cpu_online_mask, vector); else - __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector); } -static inline void __local_send_IPI_all(int vector) +static inline void __default_local_send_IPI_all(int vector) { if (no_broadcast || vector == NMI_VECTOR) - send_IPI_mask(cpu_online_mask, vector); + apic->send_IPI_mask(cpu_online_mask, vector); else - __send_IPI_shortcut(APIC_DEST_ALLINC, vector); + __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector); } -#ifdef CONFIG_X86_64 -#define send_IPI_allbutself (apic->send_IPI_allbutself) -#define send_IPI_all (apic->send_IPI_all) -#else -static inline void send_IPI_allbutself(int vector) +#ifdef CONFIG_X86_32 +static inline void default_send_IPI_allbutself(int vector) { /* * if there are no other CPUs in the system then we get an APIC send @@ -51,13 +46,12 @@ static inline void send_IPI_allbutself(int vector) if (!(num_online_cpus() > 1)) return; - __local_send_IPI_allbutself(vector); - return; + __default_local_send_IPI_allbutself(vector); } -static inline void send_IPI_all(int vector) +static inline void default_send_IPI_all(int vector) { - __local_send_IPI_all(vector); + __default_local_send_IPI_all(vector); } #endif diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h index 75e54bd6cbd..5691c09645c 100644 --- a/arch/x86/include/asm/mach-generic/mach_ipi.h +++ b/arch/x86/include/asm/mach-generic/mach_ipi.h @@ -3,8 +3,4 @@ #include -#define send_IPI_mask (apic->send_IPI_mask) -#define send_IPI_allbutself (apic->send_IPI_allbutself) -#define send_IPI_all (apic->send_IPI_all) - #endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */ diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h index a8374c65277..5dbc4b4cd5e 100644 --- a/arch/x86/include/asm/numaq/ipi.h +++ b/arch/x86/include/asm/numaq/ipi.h @@ -1,22 +1,22 @@ #ifndef __ASM_NUMAQ_IPI_H #define __ASM_NUMAQ_IPI_H -void send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); +void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -static inline void send_IPI_mask(const struct cpumask *mask, int vector) +static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) { - send_IPI_mask_sequence(mask, vector); + default_send_IPI_mask_sequence(mask, vector); } -static inline void send_IPI_allbutself(int vector) +static inline void numaq_send_IPI_allbutself(int vector) { - send_IPI_mask_allbutself(cpu_online_mask, vector); + default_send_IPI_mask_allbutself(cpu_online_mask, vector); } -static inline void send_IPI_all(int vector) +static inline void numaq_send_IPI_all(int vector) { - send_IPI_mask(cpu_online_mask, vector); + numaq_send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_NUMAQ_IPI_H */ diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h index a8a2c24f50c..f87a43fe0ae 100644 --- a/arch/x86/include/asm/summit/ipi.h +++ b/arch/x86/include/asm/summit/ipi.h @@ -1,26 +1,26 @@ #ifndef __ASM_SUMMIT_IPI_H #define __ASM_SUMMIT_IPI_H -void send_IPI_mask_sequence(const cpumask_t *mask, int vector); -void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); +void default_send_IPI_mask_sequence(const cpumask_t *mask, int vector); +void default_send_IPI_mask_allbutself(const cpumask_t *mask, int vector); -static inline void send_IPI_mask(const cpumask_t *mask, int vector) +static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) { - send_IPI_mask_sequence(mask, vector); + default_send_IPI_mask_sequence(mask, vector); } -static inline void send_IPI_allbutself(int vector) +static inline void summit_send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) - send_IPI_mask(&mask, vector); + summit_send_IPI_mask(&mask, vector); } -static inline void send_IPI_all(int vector) +static inline void summit_send_IPI_all(int vector) { - send_IPI_mask(&cpu_online_map, vector); + summit_send_IPI_mask(&cpu_online_map, vector); } #endif /* __ASM_SUMMIT_IPI_H */ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 5f7f3a9a47a..84b8e7c57ab 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -475,7 +475,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, static void lapic_timer_broadcast(const struct cpumask *mask) { #ifdef CONFIG_SMP - send_IPI_mask(mask, LOCAL_TIMER_VECTOR); + apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR); #endif } diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index d57d2138f07..820dea5d0eb 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -65,7 +65,7 @@ void __init default_setup_apic_routing(void) void apic_send_IPI_self(int vector) { - __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); + __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); } int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index b941b112caf..7c648ccea51 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -74,7 +74,7 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector) unsigned long flags; local_irq_save(flags); - __send_IPI_dest_field(mask, vector, apic->dest_logical); + __default_send_IPI_dest_field(mask, vector, apic->dest_logical); local_irq_restore(flags); } @@ -85,14 +85,15 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) _flat_send_IPI_mask(mask, vector); } -static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, - int vector) +static void + flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; int cpu = smp_processor_id(); if (cpu < BITS_PER_LONG) clear_bit(cpu, &mask); + _flat_send_IPI_mask(mask, vector); } @@ -114,16 +115,19 @@ static void flat_send_IPI_allbutself(int vector) _flat_send_IPI_mask(mask, vector); } } else if (num_online_cpus() > 1) { - __send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical); + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, + vector, apic->dest_logical); } } static void flat_send_IPI_all(int vector) { - if (vector == NMI_VECTOR) + if (vector == NMI_VECTOR) { flat_send_IPI_mask(cpu_online_mask, vector); - else - __send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical); + } else { + __default_send_IPI_shortcut(APIC_DEST_ALLINC, + vector, apic->dest_logical); + } } static unsigned int flat_get_apic_id(unsigned long x) @@ -265,18 +269,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) { - send_IPI_mask_sequence(cpumask, vector); + default_send_IPI_mask_sequence(cpumask, vector); } static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { - send_IPI_mask_allbutself(cpumask, vector); + default_send_IPI_mask_allbutself(cpumask, vector); } static void physflat_send_IPI_allbutself(int vector) { - send_IPI_mask_allbutself(cpu_online_mask, vector); + default_send_IPI_mask_allbutself(cpu_online_mask, vector); } static void physflat_send_IPI_all(int vector) diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 62f9fccf01d..2d97726a973 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -36,8 +36,8 @@ static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) cpumask_set_cpu(cpu, retmask); } -static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, - unsigned int dest) +static void + __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) { unsigned long cfg; @@ -57,45 +57,50 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, */ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) { - unsigned long flags; unsigned long query_cpu; + unsigned long flags; local_irq_save(flags); - for_each_cpu(query_cpu, mask) + for_each_cpu(query_cpu, mask) { __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, apic->dest_logical); + } local_irq_restore(flags); } -static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, - int vector) +static void + x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { - unsigned long flags; - unsigned long query_cpu; unsigned long this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; local_irq_save(flags); - for_each_cpu(query_cpu, mask) - if (query_cpu != this_cpu) - __x2apic_send_IPI_dest( + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, apic->dest_logical); + } local_irq_restore(flags); } static void x2apic_send_IPI_allbutself(int vector) { - unsigned long flags; - unsigned long query_cpu; unsigned long this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; local_irq_save(flags); - for_each_online_cpu(query_cpu) - if (query_cpu != this_cpu) - __x2apic_send_IPI_dest( + for_each_online_cpu(query_cpu) { + if (query_cpu == this_cpu) + continue; + __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, apic->dest_logical); + } local_irq_restore(flags); } @@ -175,7 +180,6 @@ static void init_x2apic_ldr(void) int cpu = smp_processor_id(); per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); - return; } struct genapic apic_x2apic_cluster = { diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 3da1675b260..74777c27669 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -55,8 +55,8 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) { - unsigned long flags; unsigned long query_cpu; + unsigned long flags; local_irq_save(flags); for_each_cpu(query_cpu, mask) { @@ -66,12 +66,12 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) local_irq_restore(flags); } -static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, - int vector) +static void + x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { - unsigned long flags; - unsigned long query_cpu; unsigned long this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; local_irq_save(flags); for_each_cpu(query_cpu, mask) { @@ -85,16 +85,17 @@ static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, static void x2apic_send_IPI_allbutself(int vector) { - unsigned long flags; - unsigned long query_cpu; unsigned long this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; local_irq_save(flags); - for_each_online_cpu(query_cpu) - if (query_cpu != this_cpu) - __x2apic_send_IPI_dest( - per_cpu(x86_cpu_to_apicid, query_cpu), - vector, APIC_DEST_PHYSICAL); + for_each_online_cpu(query_cpu) { + if (query_cpu == this_cpu) + continue; + __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), + vector, APIC_DEST_PHYSICAL); + } local_irq_restore(flags); } @@ -145,18 +146,12 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, static unsigned int x2apic_phys_get_apic_id(unsigned long x) { - unsigned int id; - - id = x; - return id; + return x; } static unsigned long set_apic_id(unsigned int id) { - unsigned long x; - - x = id; - return x; + return id; } static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) @@ -171,7 +166,6 @@ static void x2apic_send_IPI_self(int vector) static void init_x2apic_ldr(void) { - return; } struct genapic apic_x2apic_phys = { diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index f957878c21e..24b9f42db9b 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -118,12 +118,13 @@ static void uv_send_IPI_one(int cpu, int vector) int pnode; apicid = per_cpu(x86_cpu_to_apicid, cpu); - lapicid = apicid & 0x3f; /* ZZZ macro needed */ + lapicid = apicid & 0x3f; /* ZZZ macro needed */ pnode = uv_apicid_to_pnode(apicid); - val = - (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << - UVH_IPI_INT_APIC_ID_SHFT) | - (vector << UVH_IPI_INT_VECTOR_SHFT); + + val = ( 1UL << UVH_IPI_INT_SEND_SHFT ) | + ( lapicid << UVH_IPI_INT_APIC_ID_SHFT ) | + ( vector << UVH_IPI_INT_VECTOR_SHFT ); + uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } @@ -137,22 +138,24 @@ static void uv_send_IPI_mask(const struct cpumask *mask, int vector) static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { - unsigned int cpu; unsigned int this_cpu = smp_processor_id(); + unsigned int cpu; - for_each_cpu(cpu, mask) + for_each_cpu(cpu, mask) { if (cpu != this_cpu) uv_send_IPI_one(cpu, vector); + } } static void uv_send_IPI_allbutself(int vector) { - unsigned int cpu; unsigned int this_cpu = smp_processor_id(); + unsigned int cpu; - for_each_online_cpu(cpu) + for_each_online_cpu(cpu) { if (cpu != this_cpu) uv_send_IPI_one(cpu, vector); + } } static void uv_send_IPI_all(int vector) diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 01a2505d727..e90970ce21a 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -514,11 +514,11 @@ static void send_cleanup_vector(struct irq_cfg *cfg) for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) cfg->move_cleanup_count++; for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) - send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); + apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); } else { cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); cfg->move_cleanup_count = cpumask_weight(cleanup_mask); - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); free_cpumask_var(cleanup_mask); } cfg->move_in_progress = 0; @@ -800,7 +800,7 @@ static void clear_IO_APIC (void) } #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32) -void send_IPI_self(int vector) +void default_send_IPI_self(int vector) { unsigned int cfg; @@ -2297,7 +2297,7 @@ static int ioapic_retrigger_irq(unsigned int irq) unsigned long flags; spin_lock_irqsave(&vector_lock, flags); - send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); + apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); spin_unlock_irqrestore(&vector_lock, flags); return 1; @@ -2305,7 +2305,7 @@ static int ioapic_retrigger_irq(unsigned int irq) #else static int ioapic_retrigger_irq(unsigned int irq) { - send_IPI_self(irq_cfg(irq)->vector); + apic->send_IPI_self(irq_cfg(irq)->vector); return 1; } diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 367c5e684fa..e16c41b2e4e 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -48,7 +48,7 @@ static inline int __prepare_ICR2(unsigned int mask) return SET_APIC_DEST_FIELD(mask); } -void __send_IPI_shortcut(unsigned int shortcut, int vector) +void __default_send_IPI_shortcut(unsigned int shortcut, int vector) { /* * Subtle. In the case of the 'never do double writes' workaround @@ -75,16 +75,16 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector) apic_write(APIC_ICR, cfg); } -void send_IPI_self(int vector) +void default_send_IPI_self(int vector) { - __send_IPI_shortcut(APIC_DEST_SELF, vector); + __default_send_IPI_shortcut(APIC_DEST_SELF, vector); } /* * This is used to send an IPI with no shorthand notation (the destination is * specified in bits 56 to 63 of the ICR). */ -static inline void __send_IPI_dest_field(unsigned long mask, int vector) +static inline void __default_send_IPI_dest_field(unsigned long mask, int vector) { unsigned long cfg; @@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector) /* * This is only used on smaller machines. */ -void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) +void default_send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; unsigned long flags; local_irq_save(flags); WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); - __send_IPI_dest_field(mask, vector); + __default_send_IPI_dest_field(mask, vector); local_irq_restore(flags); } -void send_IPI_mask_sequence(const struct cpumask *mask, int vector) +void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector) { unsigned long flags; unsigned int query_cpu; @@ -140,11 +140,11 @@ void send_IPI_mask_sequence(const struct cpumask *mask, int vector) local_irq_save(flags); for_each_cpu(query_cpu, mask) - __send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), vector); + __default_send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), vector); local_irq_restore(flags); } -void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { unsigned long flags; unsigned int query_cpu; @@ -153,10 +153,12 @@ void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) /* See Hack comment above */ local_irq_save(flags); - for_each_cpu(query_cpu, mask) - if (query_cpu != this_cpu) - __send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), - vector); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector); + } local_irq_restore(flags); } diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 10435a120d2..b62a3811e01 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -347,7 +347,7 @@ void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code) */ void kgdb_roundup_cpus(unsigned long flags) { - send_IPI_allbutself(APIC_DM_NMI); + apic->send_IPI_allbutself(APIC_DM_NMI); } #endif diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f8536fee5c1..38dace28d62 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -651,7 +651,7 @@ static int crash_nmi_callback(struct notifier_block *self, static void smp_send_nmi_allbutself(void) { - send_IPI_allbutself(NMI_VECTOR); + apic->send_IPI_allbutself(NMI_VECTOR); } static struct notifier_block crash_nmi_nb = { diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index e6faa3316bd..c48ba6cc32a 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -118,12 +118,12 @@ static void native_smp_send_reschedule(int cpu) WARN_ON(1); return; } - send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); + apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); } void native_send_call_func_single_ipi(int cpu) { - send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); + apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); } void native_send_call_func_ipi(const struct cpumask *mask) @@ -131,7 +131,7 @@ void native_send_call_func_ipi(const struct cpumask *mask) cpumask_var_t allbutself; if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { - send_IPI_mask(mask, CALL_FUNCTION_VECTOR); + apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); return; } @@ -140,9 +140,9 @@ void native_send_call_func_ipi(const struct cpumask *mask) if (cpumask_equal(mask, allbutself) && cpumask_equal(cpu_online_mask, cpu_callout_mask)) - send_IPI_allbutself(CALL_FUNCTION_VECTOR); + apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); else - send_IPI_mask(mask, CALL_FUNCTION_VECTOR); + apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); free_cpumask_var(allbutself); } diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 22c2c7b8e4a..4782b554788 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -97,10 +97,10 @@ struct genapic apic_bigsmp = { .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, - .send_IPI_mask = send_IPI_mask, + .send_IPI_mask = default_send_IPI_mask, .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = send_IPI_allbutself, - .send_IPI_all = send_IPI_all, + .send_IPI_allbutself = bigsmp_send_IPI_allbutself, + .send_IPI_all = bigsmp_send_IPI_all, .send_IPI_self = NULL, .wakeup_cpu = NULL, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 477ebec1674..bf4670db25f 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -78,10 +78,10 @@ struct genapic apic_default = { .cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, - .send_IPI_mask = send_IPI_mask, + .send_IPI_mask = default_send_IPI_mask, .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = send_IPI_allbutself, - .send_IPI_all = send_IPI_all, + .send_IPI_allbutself = default_send_IPI_allbutself, + .send_IPI_all = default_send_IPI_all, .send_IPI_self = NULL, .wakeup_cpu = NULL, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index d758cf65d73..d36642e6d90 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -133,10 +133,10 @@ struct genapic apic_es7000 = { .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, - .send_IPI_mask = send_IPI_mask, + .send_IPI_mask = es7000_send_IPI_mask, .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = send_IPI_allbutself, - .send_IPI_all = send_IPI_all, + .send_IPI_allbutself = es7000_send_IPI_allbutself, + .send_IPI_all = es7000_send_IPI_all, .send_IPI_self = NULL, .wakeup_cpu = NULL, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index eb7d56a521d..135b1832ad8 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -97,10 +97,10 @@ struct genapic apic_numaq = { .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, - .send_IPI_mask = send_IPI_mask, + .send_IPI_mask = numaq_send_IPI_mask, .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = send_IPI_allbutself, - .send_IPI_all = send_IPI_all, + .send_IPI_allbutself = numaq_send_IPI_allbutself, + .send_IPI_all = numaq_send_IPI_all, .send_IPI_self = NULL, .wakeup_cpu = NULL, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 8c293055bdf..77196a4a9d2 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -77,10 +77,10 @@ struct genapic apic_summit = { .cpu_mask_to_apicid = summit_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, - .send_IPI_mask = send_IPI_mask, + .send_IPI_mask = summit_send_IPI_mask, .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = send_IPI_allbutself, - .send_IPI_all = send_IPI_all, + .send_IPI_allbutself = summit_send_IPI_allbutself, + .send_IPI_all = summit_send_IPI_all, .send_IPI_self = NULL, .wakeup_cpu = NULL, diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 72a6d4ebe34..6348e114692 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -196,7 +196,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(to_cpumask(f->flush_cpumask), + apic->send_IPI_mask(to_cpumask(f->flush_cpumask), INVALIDATE_TLB_VECTOR_START + sender); while (!cpumask_empty(to_cpumask(f->flush_cpumask))) -- cgit v1.2.3 From 6f177c01db6b865181fbc6c948381b290ee09718 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 16:09:23 +0100 Subject: x86, smp: clean up ->trampoline_phys_low/high handling - spread out the namespace on a per apic driver basis - remove wrapper macros Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/wakecpu.h | 4 ++-- arch/x86/include/asm/mach-default/mach_wakecpu.h | 4 ++-- arch/x86/include/asm/mach-default/smpboot_hooks.h | 6 +++--- arch/x86/include/asm/mach-generic/mach_wakecpu.h | 2 -- arch/x86/include/asm/numaq/wakecpu.h | 12 ++++++------ arch/x86/mach-generic/bigsmp.c | 4 ++-- arch/x86/mach-generic/default.c | 4 ++-- arch/x86/mach-generic/es7000.c | 4 ++-- arch/x86/mach-generic/numaq.c | 4 ++-- arch/x86/mach-generic/summit.c | 4 ++-- 10 files changed, 23 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index 78f0daaee43..4c01be6ff80 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h @@ -1,8 +1,8 @@ #ifndef __ASM_ES7000_WAKECPU_H #define __ASM_ES7000_WAKECPU_H -#define TRAMPOLINE_PHYS_LOW 0x467 -#define TRAMPOLINE_PHYS_HIGH 0x469 +#define ES7000_TRAMPOLINE_PHYS_LOW 0x467 +#define ES7000_TRAMPOLINE_PHYS_HIGH 0x469 static inline void wait_for_init_deassert(atomic_t *deassert) { diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index 89897a6a65b..0a8d7860e44 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -1,8 +1,8 @@ #ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H #define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H -#define TRAMPOLINE_PHYS_LOW (0x467) -#define TRAMPOLINE_PHYS_HIGH (0x469) +#define DEFAULT_TRAMPOLINE_PHYS_LOW (0x467) +#define DEFAULT_TRAMPOLINE_PHYS_HIGH (0x469) static inline void wait_for_init_deassert(atomic_t *deassert) { diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/mach-default/smpboot_hooks.h index 23bf52103b8..1def6011490 100644 --- a/arch/x86/include/asm/mach-default/smpboot_hooks.h +++ b/arch/x86/include/asm/mach-default/smpboot_hooks.h @@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) CMOS_WRITE(0xa, 0xf); local_flush_tlb(); pr_debug("1.\n"); - *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = + *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) = start_eip >> 4; pr_debug("2.\n"); - *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = + *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) = start_eip & 0xf; pr_debug("3.\n"); } @@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void) */ CMOS_WRITE(0, 0xf); - *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; + *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; } static inline void __init smpboot_setup_io_apic(void) diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h index 22006bbee61..2031377a954 100644 --- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-generic/mach_wakecpu.h @@ -1,8 +1,6 @@ #ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H #define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H -#define TRAMPOLINE_PHYS_LOW (apic->trampoline_phys_low) -#define TRAMPOLINE_PHYS_HIGH (apic->trampoline_phys_high) #define wait_for_init_deassert (apic->wait_for_init_deassert) #define smp_callin_clear_local_apic (apic->smp_callin_clear_local_apic) #define store_NMI_vector (apic->store_NMI_vector) diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h index 6f499df8edd..8b6c16d8558 100644 --- a/arch/x86/include/asm/numaq/wakecpu.h +++ b/arch/x86/include/asm/numaq/wakecpu.h @@ -3,8 +3,8 @@ /* This file copes with machines that wakeup secondary CPUs by NMIs */ -#define TRAMPOLINE_PHYS_LOW (0x8) -#define TRAMPOLINE_PHYS_HIGH (0xa) +#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) +#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) /* We don't do anything here because we use NMI's to boot instead */ static inline void wait_for_init_deassert(atomic_t *deassert) @@ -24,17 +24,17 @@ static inline void store_NMI_vector(unsigned short *high, unsigned short *low) { printk("Storing NMI vector\n"); *high = - *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)); + *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)); *low = - *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)); + *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); } static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { printk("Restoring NMI vector\n"); - *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = + *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)) = *high; - *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = + *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)) = *low; } diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 4782b554788..a317fbe07fd 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -104,8 +104,8 @@ struct genapic apic_bigsmp = { .send_IPI_self = NULL, .wakeup_cpu = NULL, - .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = wait_for_init_deassert, .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index bf4670db25f..17d8f9c2218 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -85,8 +85,8 @@ struct genapic apic_default = { .send_IPI_self = NULL, .wakeup_cpu = NULL, - .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = wait_for_init_deassert, .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index d36642e6d90..871e85445e2 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -140,8 +140,8 @@ struct genapic apic_es7000 = { .send_IPI_self = NULL, .wakeup_cpu = NULL, - .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = wait_for_init_deassert, .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 135b1832ad8..0b496ab5450 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -104,8 +104,8 @@ struct genapic apic_numaq = { .send_IPI_self = NULL, .wakeup_cpu = NULL, - .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = wait_for_init_deassert, .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 77196a4a9d2..c4799cd3459 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -84,8 +84,8 @@ struct genapic apic_summit = { .send_IPI_self = NULL, .wakeup_cpu = NULL, - .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = wait_for_init_deassert, .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, -- cgit v1.2.3 From abfa584c8df8b691cf18f51c7d4af27e5b32be4a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 16:15:16 +0100 Subject: x86: set ->trampoline_phys_low/high on 64-bit too 64-bit x86 has zero for ->trampoline_phys_low/high, but the smpboot code can use these values - so it's better to set them up to their correct values. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 6 ++++++ arch/x86/include/asm/mach-default/mach_wakecpu.h | 3 --- arch/x86/kernel/genapic_flat_64.c | 8 ++++---- arch/x86/kernel/genx2apic_cluster.c | 4 ++-- arch/x86/kernel/genx2apic_phys.c | 4 ++-- arch/x86/kernel/genx2apic_uv_x.c | 4 ++-- 6 files changed, 16 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 8bb1c73c55b..90e83a769a1 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -88,6 +88,12 @@ struct genapic { extern struct genapic *apic; +/* + * Warm reset vector default position: + */ +#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 +#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 + #ifdef CONFIG_X86_32 extern void es7000_update_genapic_to_cluster(void); #else diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index 0a8d7860e44..a327a675e47 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -1,9 +1,6 @@ #ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H #define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H -#define DEFAULT_TRAMPOLINE_PHYS_LOW (0x467) -#define DEFAULT_TRAMPOLINE_PHYS_HIGH (0x469) - static inline void wait_for_init_deassert(atomic_t *deassert) { while (!atomic_read(deassert)) diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 7c648ccea51..3a28d6a8c49 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -224,8 +224,8 @@ struct genapic apic_flat = { .send_IPI_self = apic_send_IPI_self, .wakeup_cpu = NULL, - .trampoline_phys_low = 0, - .trampoline_phys_high = 0, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, @@ -370,8 +370,8 @@ struct genapic apic_physflat = { .send_IPI_self = apic_send_IPI_self, .wakeup_cpu = NULL, - .trampoline_phys_low = 0, - .trampoline_phys_high = 0, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 2d97726a973..abc5ee329f2 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -228,8 +228,8 @@ struct genapic apic_x2apic_cluster = { .send_IPI_self = x2apic_send_IPI_self, .wakeup_cpu = NULL, - .trampoline_phys_low = 0, - .trampoline_phys_high = 0, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 74777c27669..dc815ef22d8 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -214,8 +214,8 @@ struct genapic apic_x2apic_phys = { .send_IPI_self = x2apic_send_IPI_self, .wakeup_cpu = NULL, - .trampoline_phys_low = 0, - .trampoline_phys_high = 0, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 24b9f42db9b..b5908735ca5 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -287,8 +287,8 @@ struct genapic apic_x2apic_uv_x = { .send_IPI_self = uv_send_IPI_self, .wakeup_cpu = NULL, - .trampoline_phys_low = 0, - .trampoline_phys_high = 0, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, -- cgit v1.2.3 From a965936643e28af8152d9e960b966baa1a5588a2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 16:21:32 +0100 Subject: x86, smp: refactor ->wait_for_init_deassert() - spread out the namespace on a per APIC driver basis - handle a NULL ->wait_for_init_deassert() as a 'dont wait' default method - remove NUMAQ and Summit handlers Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/wakecpu.h | 2 +- arch/x86/include/asm/mach-default/mach_wakecpu.h | 2 +- arch/x86/include/asm/mach-generic/mach_wakecpu.h | 1 - arch/x86/include/asm/numaq/wakecpu.h | 5 ----- arch/x86/kernel/es7000_32.c | 6 +----- arch/x86/kernel/smpboot.c | 3 ++- arch/x86/mach-generic/bigsmp.c | 4 +++- arch/x86/mach-generic/default.c | 4 +++- arch/x86/mach-generic/es7000.c | 4 +++- arch/x86/mach-generic/numaq.c | 5 ++++- arch/x86/mach-generic/summit.c | 4 +++- 11 files changed, 21 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index 4c01be6ff80..5c4d05f46d2 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h @@ -4,7 +4,7 @@ #define ES7000_TRAMPOLINE_PHYS_LOW 0x467 #define ES7000_TRAMPOLINE_PHYS_HIGH 0x469 -static inline void wait_for_init_deassert(atomic_t *deassert) +static inline void es7000_wait_for_init_deassert(atomic_t *deassert) { #ifndef CONFIG_ES7000_CLUSTERED_APIC while (!atomic_read(deassert)) diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index a327a675e47..1d34c69a758 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H #define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H -static inline void wait_for_init_deassert(atomic_t *deassert) +static inline void default_wait_for_init_deassert(atomic_t *deassert) { while (!atomic_read(deassert)) cpu_relax(); diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h index 2031377a954..58e54122f73 100644 --- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-generic/mach_wakecpu.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H #define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H -#define wait_for_init_deassert (apic->wait_for_init_deassert) #define smp_callin_clear_local_apic (apic->smp_callin_clear_local_apic) #define store_NMI_vector (apic->store_NMI_vector) #define restore_NMI_vector (apic->restore_NMI_vector) diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h index 8b6c16d8558..884b95cf584 100644 --- a/arch/x86/include/asm/numaq/wakecpu.h +++ b/arch/x86/include/asm/numaq/wakecpu.h @@ -6,11 +6,6 @@ #define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) #define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) -/* We don't do anything here because we use NMI's to boot instead */ -static inline void wait_for_init_deassert(atomic_t *deassert) -{ -} - /* * Because we use NMIs rather than the INIT-STARTUP sequence to * bootstrap the CPUs, the APIC may be in a weird state. Kick it. diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index e73fe18488a..d7f433ee602 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -182,10 +182,6 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) return 0; } -static void noop_wait_for_deassert(atomic_t *deassert_not_used) -{ -} - static int __init es7000_update_genapic(void) { apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; @@ -194,7 +190,7 @@ static int __init es7000_update_genapic(void) if (boot_cpu_data.x86 == 6 && (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { es7000_update_genapic_to_cluster(); - apic->wait_for_init_deassert = noop_wait_for_deassert; + apic->wait_for_init_deassert = NULL; apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ab83be2f8e0..558af378a61 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -196,7 +196,8 @@ static void __cpuinit smp_callin(void) * our local APIC. We have to wait for the IPI or we'll * lock up on an APIC access. */ - wait_for_init_deassert(&init_deasserted); + if (apic->wait_for_init_deassert) + apic->wait_for_init_deassert(&init_deasserted); /* * (This works even if the APIC is not enabled.) diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index a317fbe07fd..40910bfd1b4 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -106,7 +106,9 @@ struct genapic apic_bigsmp = { .wakeup_cpu = NULL, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = wait_for_init_deassert, + + .wait_for_init_deassert = default_wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 17d8f9c2218..c2464843df9 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -87,7 +87,9 @@ struct genapic apic_default = { .wakeup_cpu = NULL, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = wait_for_init_deassert, + + .wait_for_init_deassert = default_wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 871e85445e2..4cb3984834e 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -142,7 +142,9 @@ struct genapic apic_es7000 = { .wakeup_cpu = NULL, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = wait_for_init_deassert, + + .wait_for_init_deassert = default_wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 0b496ab5450..fb03867e7c0 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -106,7 +106,10 @@ struct genapic apic_numaq = { .wakeup_cpu = NULL, .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = wait_for_init_deassert, + + /* We don't do anything here because we use NMI's to boot instead */ + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index c4799cd3459..fdca78b96b6 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -86,7 +86,9 @@ struct genapic apic_summit = { .wakeup_cpu = NULL, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = wait_for_init_deassert, + + .wait_for_init_deassert = default_wait_for_init_deassert, + .smp_callin_clear_local_apic = smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, -- cgit v1.2.3 From 333344d94300500e401cffb4eea10a5ab6e5a41d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 16:31:52 +0100 Subject: x86, smp: refactor ->smp_callin_clear_local_apic() methods Only NUMAQ does something substantial here, because it initializes via NMIs (not via INIT as standard SMP startup) - so it needs to reset the APIC. - extend the generic code to handle NULL methods - clear out dummy methods and replace them with NULL - clean up: remove wrapper macros, etc. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/wakecpu.h | 5 ----- arch/x86/include/asm/mach-default/mach_wakecpu.h | 5 ----- arch/x86/include/asm/mach-generic/mach_wakecpu.h | 1 - arch/x86/include/asm/numaq/wakecpu.h | 4 ++-- arch/x86/kernel/smpboot.c | 3 ++- arch/x86/mach-generic/bigsmp.c | 3 ++- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 4 +++- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 3 ++- 10 files changed, 13 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index 5c4d05f46d2..e8e03962633 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h @@ -13,11 +13,6 @@ static inline void es7000_wait_for_init_deassert(atomic_t *deassert) return; } -/* Nothing to do for most platforms, since cleared by the INIT cycle */ -static inline void smp_callin_clear_local_apic(void) -{ -} - static inline void store_NMI_vector(unsigned short *high, unsigned short *low) { } diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index 1d34c69a758..d059807cd7e 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -8,11 +8,6 @@ static inline void default_wait_for_init_deassert(atomic_t *deassert) return; } -/* Nothing to do for most platforms, since cleared by the INIT cycle */ -static inline void smp_callin_clear_local_apic(void) -{ -} - static inline void store_NMI_vector(unsigned short *high, unsigned short *low) { } diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h index 58e54122f73..30515a154d8 100644 --- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-generic/mach_wakecpu.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H #define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H -#define smp_callin_clear_local_apic (apic->smp_callin_clear_local_apic) #define store_NMI_vector (apic->store_NMI_vector) #define restore_NMI_vector (apic->restore_NMI_vector) #define inquire_remote_apic (apic->inquire_remote_apic) diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h index 884b95cf584..61d0a7d9613 100644 --- a/arch/x86/include/asm/numaq/wakecpu.h +++ b/arch/x86/include/asm/numaq/wakecpu.h @@ -8,9 +8,9 @@ /* * Because we use NMIs rather than the INIT-STARTUP sequence to - * bootstrap the CPUs, the APIC may be in a weird state. Kick it. + * bootstrap the CPUs, the APIC may be in a weird state. Kick it: */ -static inline void smp_callin_clear_local_apic(void) +static inline void numaq_smp_callin_clear_local_apic(void) { clear_local_APIC(); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 558af378a61..10873a46b29 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -244,7 +244,8 @@ static void __cpuinit smp_callin(void) */ pr_debug("CALLIN, before setup_local_APIC().\n"); - smp_callin_clear_local_apic(); + if (apic->smp_callin_clear_local_apic) + apic->smp_callin_clear_local_apic(); setup_local_APIC(); end_local_APIC_setup(); map_cpu_to_logical_apicid(); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 40910bfd1b4..bd069e7b521 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -109,7 +109,8 @@ struct genapic apic_bigsmp = { .wait_for_init_deassert = default_wait_for_init_deassert, - .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, .inquire_remote_apic = inquire_remote_apic, diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index c2464843df9..a25e6eff048 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -90,7 +90,7 @@ struct genapic apic_default = { .wait_for_init_deassert = default_wait_for_init_deassert, - .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .smp_callin_clear_local_apic = NULL, .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, .inquire_remote_apic = inquire_remote_apic, diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 4cb3984834e..ab41b543914 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -145,7 +145,9 @@ struct genapic apic_es7000 = { .wait_for_init_deassert = default_wait_for_init_deassert, - .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + /* Nothing to do for most platforms, since cleared by the INIT cycle: */ + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, .inquire_remote_apic = inquire_remote_apic, diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index fb03867e7c0..4d3924f8cd0 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -110,7 +110,7 @@ struct genapic apic_numaq = { /* We don't do anything here because we use NMI's to boot instead */ .wait_for_init_deassert = NULL, - .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, .inquire_remote_apic = inquire_remote_apic, diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index fdca78b96b6..2595baa7997 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -89,7 +89,8 @@ struct genapic apic_summit = { .wait_for_init_deassert = default_wait_for_init_deassert, - .smp_callin_clear_local_apic = smp_callin_clear_local_apic, + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = store_NMI_vector, .restore_NMI_vector = restore_NMI_vector, .inquire_remote_apic = inquire_remote_apic, -- cgit v1.2.3 From 7bd06ec63a1204ca44b9f1dc487b8632016162d1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 16:31:52 +0100 Subject: x86, smp: refactor ->store/restore_NMI_vector() methods Only NUMAQ does something substantial here, because it initializes via NMIs (not via INIT as standard SMP startup) - so it needs to store and restore the NMI vector. - extend the generic code to handle NULL methods - clear out dummy methods and replace them with NULL - clean up: remove wrapper macros, etc. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/wakecpu.h | 8 -------- arch/x86/include/asm/mach-default/mach_wakecpu.h | 8 -------- arch/x86/include/asm/mach-generic/mach_wakecpu.h | 2 -- arch/x86/include/asm/numaq/wakecpu.h | 6 ++++-- arch/x86/kernel/smpboot.c | 3 ++- arch/x86/mach-generic/bigsmp.c | 5 ++--- arch/x86/mach-generic/default.c | 4 ++-- arch/x86/mach-generic/es7000.c | 5 ++--- arch/x86/mach-generic/numaq.c | 4 ++-- arch/x86/mach-generic/summit.c | 5 ++--- 10 files changed, 16 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index e8e03962633..71a3a412d0e 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h @@ -13,14 +13,6 @@ static inline void es7000_wait_for_init_deassert(atomic_t *deassert) return; } -static inline void store_NMI_vector(unsigned short *high, unsigned short *low) -{ -} - -static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) -{ -} - extern void __inquire_remote_apic(int apicid); static inline void inquire_remote_apic(int apicid) diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index d059807cd7e..656bb5e52bc 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -8,14 +8,6 @@ static inline void default_wait_for_init_deassert(atomic_t *deassert) return; } -static inline void store_NMI_vector(unsigned short *high, unsigned short *low) -{ -} - -static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) -{ -} - #ifdef CONFIG_SMP extern void __inquire_remote_apic(int apicid); #else /* CONFIG_SMP */ diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h index 30515a154d8..93207dfe8f5 100644 --- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-generic/mach_wakecpu.h @@ -1,8 +1,6 @@ #ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H #define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H -#define store_NMI_vector (apic->store_NMI_vector) -#define restore_NMI_vector (apic->restore_NMI_vector) #define inquire_remote_apic (apic->inquire_remote_apic) #endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */ diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h index 61d0a7d9613..123201712a9 100644 --- a/arch/x86/include/asm/numaq/wakecpu.h +++ b/arch/x86/include/asm/numaq/wakecpu.h @@ -15,7 +15,8 @@ static inline void numaq_smp_callin_clear_local_apic(void) clear_local_APIC(); } -static inline void store_NMI_vector(unsigned short *high, unsigned short *low) +static inline void +numaq_store_NMI_vector(unsigned short *high, unsigned short *low) { printk("Storing NMI vector\n"); *high = @@ -24,7 +25,8 @@ static inline void store_NMI_vector(unsigned short *high, unsigned short *low) *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); } -static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) +static inline void +numaq_restore_NMI_vector(unsigned short *high, unsigned short *low) { printk("Restoring NMI vector\n"); *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)) = diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 10873a46b29..1492024592f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -826,7 +826,8 @@ do_rest: pr_debug("Setting warm reset code and vector.\n"); - store_NMI_vector(&nmi_high, &nmi_low); + if (apic->store_NMI_vector) + apic->store_NMI_vector(&nmi_high, &nmi_low); smpboot_setup_warm_reset_vector(start_ip); /* diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index bd069e7b521..ecdb230d0f2 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -110,8 +110,7 @@ struct genapic apic_bigsmp = { .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, - - .store_NMI_vector = store_NMI_vector, - .restore_NMI_vector = restore_NMI_vector, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, .inquire_remote_apic = inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index a25e6eff048..950925615a9 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -91,7 +91,7 @@ struct genapic apic_default = { .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = store_NMI_vector, - .restore_NMI_vector = restore_NMI_vector, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, .inquire_remote_apic = inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index ab41b543914..13190709138 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -147,8 +147,7 @@ struct genapic apic_es7000 = { /* Nothing to do for most platforms, since cleared by the INIT cycle: */ .smp_callin_clear_local_apic = NULL, - - .store_NMI_vector = store_NMI_vector, - .restore_NMI_vector = restore_NMI_vector, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, .inquire_remote_apic = inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 4d3924f8cd0..d7f7fcf21c3 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -111,7 +111,7 @@ struct genapic apic_numaq = { .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, - .store_NMI_vector = store_NMI_vector, - .restore_NMI_vector = restore_NMI_vector, + .store_NMI_vector = numaq_store_NMI_vector, + .restore_NMI_vector = numaq_restore_NMI_vector, .inquire_remote_apic = inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 2595baa7997..46fca79f831 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -90,8 +90,7 @@ struct genapic apic_summit = { .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, - - .store_NMI_vector = store_NMI_vector, - .restore_NMI_vector = restore_NMI_vector, + .store_NMI_vector = NULL, + .restore_NMI_vector = NULL, .inquire_remote_apic = inquire_remote_apic, }; -- cgit v1.2.3 From 3d5f597e938c425554cb7668fd3c9d6a536a984a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 16:43:47 +0100 Subject: x86, smp: remove ->restore_NMI_vector() Nothing actually restores the NMI vector - so remove this logic altogether. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 1 - arch/x86/include/asm/numaq/wakecpu.h | 10 ---------- arch/x86/kernel/genapic_flat_64.c | 2 -- arch/x86/kernel/genx2apic_cluster.c | 1 - arch/x86/kernel/genx2apic_phys.c | 1 - arch/x86/kernel/genx2apic_uv_x.c | 1 - arch/x86/mach-generic/bigsmp.c | 1 - arch/x86/mach-generic/default.c | 1 - arch/x86/mach-generic/es7000.c | 1 - arch/x86/mach-generic/numaq.c | 1 - arch/x86/mach-generic/summit.c | 1 - 11 files changed, 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 90e83a769a1..e5f9c5696fb 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -82,7 +82,6 @@ struct genapic { void (*wait_for_init_deassert)(atomic_t *deassert); void (*smp_callin_clear_local_apic)(void); void (*store_NMI_vector)(unsigned short *high, unsigned short *low); - void (*restore_NMI_vector)(unsigned short *high, unsigned short *low); void (*inquire_remote_apic)(int apicid); }; diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h index 123201712a9..920dcfefa83 100644 --- a/arch/x86/include/asm/numaq/wakecpu.h +++ b/arch/x86/include/asm/numaq/wakecpu.h @@ -25,16 +25,6 @@ numaq_store_NMI_vector(unsigned short *high, unsigned short *low) *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); } -static inline void -numaq_restore_NMI_vector(unsigned short *high, unsigned short *low) -{ - printk("Restoring NMI vector\n"); - *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)) = - *high; - *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)) = - *low; -} - static inline void inquire_remote_apic(int apicid) { } diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 3a28d6a8c49..e9237f59928 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -229,7 +229,6 @@ struct genapic apic_flat = { .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = NULL, }; @@ -375,6 +374,5 @@ struct genapic apic_physflat = { .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = NULL, }; diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index abc5ee329f2..7c87156b641 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -233,6 +233,5 @@ struct genapic apic_x2apic_cluster = { .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = NULL, }; diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index dc815ef22d8..5cbae8aa040 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -219,6 +219,5 @@ struct genapic apic_x2apic_phys = { .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = NULL, }; diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index b5908735ca5..6adb5e6f4d9 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -292,7 +292,6 @@ struct genapic apic_x2apic_uv_x = { .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = NULL, }; diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index ecdb230d0f2..d9377af88cb 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -111,6 +111,5 @@ struct genapic apic_bigsmp = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 950925615a9..b004257035c 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -92,6 +92,5 @@ struct genapic apic_default = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 13190709138..62673a8002f 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -148,6 +148,5 @@ struct genapic apic_es7000 = { /* Nothing to do for most platforms, since cleared by the INIT cycle: */ .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index d7f7fcf21c3..2c3341564d1 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -112,6 +112,5 @@ struct genapic apic_numaq = { .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, .store_NMI_vector = numaq_store_NMI_vector, - .restore_NMI_vector = numaq_restore_NMI_vector, .inquire_remote_apic = inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 46fca79f831..c2471a9fa8f 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -91,6 +91,5 @@ struct genapic apic_summit = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .restore_NMI_vector = NULL, .inquire_remote_apic = inquire_remote_apic, }; -- cgit v1.2.3 From 25dc004903a38f0b6f6626dbbab058c8709c5398 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 16:31:52 +0100 Subject: x86, smp: refactor ->inquire_remote_apic() methods Nothing exciting - a few subarches dont want APIC remote reads to be performed - the others are content with the default method. - extend the generic code to handle NULL methods - clear out dummy methods and replace them with NULL - clean up: remove wrapper macros, etc. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/wakecpu.h | 8 -------- arch/x86/include/asm/mach-default/mach_wakecpu.h | 2 +- arch/x86/include/asm/mach-generic/mach_wakecpu.h | 2 -- arch/x86/include/asm/numaq/wakecpu.h | 4 ---- arch/x86/kernel/smpboot.c | 4 ++-- arch/x86/mach-generic/bigsmp.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/es7000.c | 2 +- arch/x86/mach-generic/numaq.c | 2 +- arch/x86/mach-generic/summit.c | 2 +- 10 files changed, 8 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index 71a3a412d0e..99c72be1840 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h @@ -13,12 +13,4 @@ static inline void es7000_wait_for_init_deassert(atomic_t *deassert) return; } -extern void __inquire_remote_apic(int apicid); - -static inline void inquire_remote_apic(int apicid) -{ - if (apic_verbosity >= APIC_DEBUG) - __inquire_remote_apic(apicid); -} - #endif /* __ASM_MACH_WAKECPU_H */ diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index 656bb5e52bc..b1cde560e4c 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -16,7 +16,7 @@ static inline void __inquire_remote_apic(int apicid) } #endif /* CONFIG_SMP */ -static inline void inquire_remote_apic(int apicid) +static inline void default_inquire_remote_apic(int apicid) { if (apic_verbosity >= APIC_DEBUG) __inquire_remote_apic(apicid); diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h index 93207dfe8f5..0b884c03a3f 100644 --- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-generic/mach_wakecpu.h @@ -1,6 +1,4 @@ #ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H #define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H -#define inquire_remote_apic (apic->inquire_remote_apic) - #endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */ diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h index 920dcfefa83..afe81439c7d 100644 --- a/arch/x86/include/asm/numaq/wakecpu.h +++ b/arch/x86/include/asm/numaq/wakecpu.h @@ -25,8 +25,4 @@ numaq_store_NMI_vector(unsigned short *high, unsigned short *low) *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); } -static inline void inquire_remote_apic(int apicid) -{ -} - #endif /* __ASM_NUMAQ_WAKECPU_H */ diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 1492024592f..170adc5b6cb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -876,8 +876,8 @@ do_rest: else /* trampoline code not run */ printk(KERN_ERR "Not responding.\n"); - if (get_uv_system_type() != UV_NON_UNIQUE_APIC) - inquire_remote_apic(apicid); + if (apic->inquire_remote_apic) + apic->inquire_remote_apic(apicid); } } diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index d9377af88cb..4d8b2d442ba 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -111,5 +111,5 @@ struct genapic apic_bigsmp = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .inquire_remote_apic = inquire_remote_apic, + .inquire_remote_apic = default_inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index b004257035c..c12dd2300a5 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -92,5 +92,5 @@ struct genapic apic_default = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .inquire_remote_apic = inquire_remote_apic, + .inquire_remote_apic = default_inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 62673a8002f..be090b2037c 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -148,5 +148,5 @@ struct genapic apic_es7000 = { /* Nothing to do for most platforms, since cleared by the INIT cycle: */ .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .inquire_remote_apic = inquire_remote_apic, + .inquire_remote_apic = default_inquire_remote_apic, }; diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 2c3341564d1..ddb50fba286 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -112,5 +112,5 @@ struct genapic apic_numaq = { .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, .store_NMI_vector = numaq_store_NMI_vector, - .inquire_remote_apic = inquire_remote_apic, + .inquire_remote_apic = NULL, }; diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index c2471a9fa8f..d5db3045437 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -91,5 +91,5 @@ struct genapic apic_summit = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, - .inquire_remote_apic = inquire_remote_apic, + .inquire_remote_apic = default_inquire_remote_apic, }; -- cgit v1.2.3 From 018e047f3a98bd8d9e9d78b19bc38415f0c34dd7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 17:09:58 +0100 Subject: x86, ES7000: consolidate the APIC code Consolidate all the ES7000 APIC code into arch/x86/mach-generic/es7000.c. With this ES7000 ceases to rely on any subarchitecture include files. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/apic.h | 236 ------------------------- arch/x86/include/asm/es7000/apicdef.h | 9 - arch/x86/include/asm/es7000/ipi.h | 22 --- arch/x86/include/asm/es7000/mpparse.h | 23 --- arch/x86/include/asm/es7000/wakecpu.h | 16 -- arch/x86/mach-generic/es7000.c | 314 ++++++++++++++++++++++++++++++++-- 6 files changed, 295 insertions(+), 325 deletions(-) delete mode 100644 arch/x86/include/asm/es7000/apic.h delete mode 100644 arch/x86/include/asm/es7000/apicdef.h delete mode 100644 arch/x86/include/asm/es7000/ipi.h delete mode 100644 arch/x86/include/asm/es7000/mpparse.h delete mode 100644 arch/x86/include/asm/es7000/wakecpu.h (limited to 'arch') diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h deleted file mode 100644 index b89b45db735..00000000000 --- a/arch/x86/include/asm/es7000/apic.h +++ /dev/null @@ -1,236 +0,0 @@ -#ifndef __ASM_ES7000_APIC_H -#define __ASM_ES7000_APIC_H - -#include - -#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) - -static inline int es7000_apic_id_registered(void) -{ - return 1; -} - -static inline const cpumask_t *target_cpus_cluster(void) -{ - return &CPU_MASK_ALL; -} - -static inline const cpumask_t *es7000_target_cpus(void) -{ - return &cpumask_of_cpu(smp_processor_id()); -} - -#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) -#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) -#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -static inline unsigned long -es7000_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} -static inline unsigned long es7000_check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -extern void es7000_enable_apic_mode(void); - -#define apicid_cluster(apicid) (apicid & 0xF0) - -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long id; - id = xapic_phys_to_log_apicid(cpu); - return (SET_APIC_LOGICAL_ID(id)); -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LdR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void es7000_init_apic_ldr_cluster(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); -} - -static inline void es7000_init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_VALUE); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); -} - -extern int apic_version [MAX_APICS]; -static inline void es7000_setup_apic_routing(void) -{ - int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); - printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", - (apic_version[apic] == 0x14) ? - "Physical Cluster" : "Logical Cluster", - nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); -} - -static inline int es7000_apicid_to_node(int logical_apicid) -{ - return 0; -} - - -static inline int es7000_cpu_present_to_apicid(int mps_cpu) -{ - if (!mps_cpu) - return boot_cpu_physical_apicid; - else if (mps_cpu < nr_cpu_ids) - return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - -static inline physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) -{ - static int id = 0; - physid_mask_t mask; - mask = physid_mask_of_physid(id); - ++id; - return mask; -} - -extern u8 cpu_2_logical_apicid[]; -/* Mapping from cpu number to logical apicid */ -static inline int es7000_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - -static inline physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xff); -} - -extern unsigned int boot_cpu_physical_apicid; - -static inline int es7000_check_phys_apicid_present(int cpu_physical_apicid) -{ - boot_cpu_physical_apicid = read_apic_id(); - return (1); -} - -static inline unsigned int -es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpumask_weight(cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - return 0xFF; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = cpumask_first(cpumask); - apicid = es7000_cpu_to_logical_apicid(cpu); - - while (cpus_found < num_bits_set) { - if (cpumask_test_cpu(cpu, cpumask)) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); - - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); - - return 0xFF; - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static inline unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpus_weight(*cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - return es7000_cpu_to_logical_apicid(0); - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = first_cpu(*cpumask); - apicid = es7000_cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask)) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); - - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); - - return es7000_cpu_to_logical_apicid(0); - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - - -static inline unsigned int -es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask) -{ - int apicid = es7000_cpu_to_logical_apicid(0); - cpumask_var_t cpumask; - - if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) - return apicid; - - cpumask_and(cpumask, inmask, andmask); - cpumask_and(cpumask, cpumask, cpu_online_mask); - apicid = es7000_cpu_mask_to_apicid(cpumask); - - free_cpumask_var(cpumask); - - return apicid; -} - -static inline int es7000_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_ES7000_APIC_H */ diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h deleted file mode 100644 index c74881a7b3d..00000000000 --- a/arch/x86/include/asm/es7000/apicdef.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_ES7000_APICDEF_H -#define __ASM_ES7000_APICDEF_H - -static inline unsigned int es7000_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0xFF; -} - -#endif diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h deleted file mode 100644 index 81e77c812ba..00000000000 --- a/arch/x86/include/asm/es7000/ipi.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __ASM_ES7000_IPI_H -#define __ASM_ES7000_IPI_H - -void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); - -static inline void es7000_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_sequence(mask, vector); -} - -static inline void es7000_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself(cpu_online_mask, vector); -} - -static inline void es7000_send_IPI_all(int vector) -{ - es7000_send_IPI_mask(cpu_online_mask, vector); -} - -#endif /* __ASM_ES7000_IPI_H */ diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h deleted file mode 100644 index 662eb1e574d..00000000000 --- a/arch/x86/include/asm/es7000/mpparse.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ASM_ES7000_MPPARSE_H -#define __ASM_ES7000_MPPARSE_H - -#include - -extern int parse_unisys_oem (char *oemptr); -extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); -extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); -extern void setup_unisys(void); - -#ifdef CONFIG_ACPI -static inline int es7000_check_dsdt(void) -{ - struct acpi_table_header header; - - if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && - !strncmp(header.oem_id, "UNISYS", 6)) - return 1; - return 0; -} -#endif - -#endif /* __ASM_MACH_MPPARSE_H */ diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h deleted file mode 100644 index 99c72be1840..00000000000 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __ASM_ES7000_WAKECPU_H -#define __ASM_ES7000_WAKECPU_H - -#define ES7000_TRAMPOLINE_PHYS_LOW 0x467 -#define ES7000_TRAMPOLINE_PHYS_HIGH 0x469 - -static inline void es7000_wait_for_init_deassert(atomic_t *deassert) -{ -#ifndef CONFIG_ES7000_CLUSTERED_APIC - while (!atomic_read(deassert)) - cpu_relax(); -#endif - return; -} - -#endif /* __ASM_MACH_WAKECPU_H */ diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index be090b2037c..8b6113ec380 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -11,13 +11,300 @@ #include #include #include -#include +#include #include -#include -#include -#include +#include #include +#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) +#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) +#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +extern void es7000_enable_apic_mode(void); +extern int apic_version [MAX_APICS]; +extern u8 cpu_2_logical_apicid[]; +extern unsigned int boot_cpu_physical_apicid; + +extern int parse_unisys_oem (char *oemptr); +extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); +extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); +extern void setup_unisys(void); + +#define apicid_cluster(apicid) (apicid & 0xF0) +#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) + +static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; +} + + +static void es7000_wait_for_init_deassert(atomic_t *deassert) +{ +#ifndef CONFIG_ES7000_CLUSTERED_APIC + while (!atomic_read(deassert)) + cpu_relax(); +#endif + return; +} + +static unsigned int es7000_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0xFF; +} + +#ifdef CONFIG_ACPI +static int es7000_check_dsdt(void) +{ + struct acpi_table_header header; + + if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && + !strncmp(header.oem_id, "UNISYS", 6)) + return 1; + return 0; +} +#endif + +static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_sequence(mask, vector); +} + +static void es7000_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself(cpu_online_mask, vector); +} + +static void es7000_send_IPI_all(int vector) +{ + es7000_send_IPI_mask(cpu_online_mask, vector); +} + +static int es7000_apic_id_registered(void) +{ + return 1; +} + +static const cpumask_t *target_cpus_cluster(void) +{ + return &CPU_MASK_ALL; +} + +static const cpumask_t *es7000_target_cpus(void) +{ + return &cpumask_of_cpu(smp_processor_id()); +} + +static unsigned long +es7000_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return 0; +} +static unsigned long es7000_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static unsigned long calculate_ldr(int cpu) +{ + unsigned long id = xapic_phys_to_log_apicid(cpu); + + return (SET_APIC_LOGICAL_ID(id)); +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LdR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +static void es7000_init_apic_ldr_cluster(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + +static void es7000_init_apic_ldr(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_VALUE); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + +static void es7000_setup_apic_routing(void) +{ + int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); + printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", + (apic_version[apic] == 0x14) ? + "Physical Cluster" : "Logical Cluster", + nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); +} + +static int es7000_apicid_to_node(int logical_apicid) +{ + return 0; +} + + +static int es7000_cpu_present_to_apicid(int mps_cpu) +{ + if (!mps_cpu) + return boot_cpu_physical_apicid; + else if (mps_cpu < nr_cpu_ids) + return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) +{ + static int id = 0; + physid_mask_t mask; + + mask = physid_mask_of_physid(id); + ++id; + + return mask; +} + +/* Mapping from cpu number to logical apicid */ +static int es7000_cpu_to_logical_apicid(int cpu) +{ +#ifdef CONFIG_SMP + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return (int)cpu_2_logical_apicid[cpu]; +#else + return logical_smp_processor_id(); +#endif +} + +static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) +{ + /* For clustered we don't have a good way to do this yet - hack */ + return physids_promote(0xff); +} + +static int es7000_check_phys_apicid_present(int cpu_physical_apicid) +{ + boot_cpu_physical_apicid = read_apic_id(); + return (1); +} + +static unsigned int +es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) +{ + int cpus_found = 0; + int num_bits_set; + int apicid; + int cpu; + + num_bits_set = cpumask_weight(cpumask); + /* Return id to all */ + if (num_bits_set == nr_cpu_ids) + return 0xFF; + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of target_cpus(): + */ + cpu = cpumask_first(cpumask); + apicid = es7000_cpu_to_logical_apicid(cpu); + + while (cpus_found < num_bits_set) { + if (cpumask_test_cpu(cpu, cpumask)) { + int new_apicid = es7000_cpu_to_logical_apicid(cpu); + + if (apicid_cluster(apicid) != + apicid_cluster(new_apicid)) { + printk ("%s: Not a valid mask!\n", __func__); + + return 0xFF; + } + apicid = new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + int cpus_found = 0; + int num_bits_set; + int apicid; + int cpu; + + num_bits_set = cpus_weight(*cpumask); + /* Return id to all */ + if (num_bits_set == nr_cpu_ids) + return es7000_cpu_to_logical_apicid(0); + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of target_cpus(): + */ + cpu = first_cpu(*cpumask); + apicid = es7000_cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { + if (cpu_isset(cpu, *cpumask)) { + int new_apicid = es7000_cpu_to_logical_apicid(cpu); + + if (apicid_cluster(apicid) != + apicid_cluster(new_apicid)) { + printk ("%s: Not a valid mask!\n", __func__); + + return es7000_cpu_to_logical_apicid(0); + } + apicid = new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static unsigned int +es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, + const struct cpumask *andmask) +{ + int apicid = es7000_cpu_to_logical_apicid(0); + cpumask_var_t cpumask; + + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) + return apicid; + + cpumask_and(cpumask, inmask, andmask); + cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = es7000_cpu_mask_to_apicid(cpumask); + + free_cpumask_var(cpumask); + + return apicid; +} + +static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + void __init es7000_update_genapic_to_cluster(void) { apic->target_cpus = target_cpus_cluster; @@ -80,18 +367,6 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) } #endif -static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; -} struct genapic apic_es7000 = { @@ -140,10 +415,11 @@ struct genapic apic_es7000 = { .send_IPI_self = NULL, .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = default_wait_for_init_deassert, + .trampoline_phys_low = 0x467, + .trampoline_phys_high = 0x469, + + .wait_for_init_deassert = es7000_wait_for_init_deassert, /* Nothing to do for most platforms, since cleared by the INIT cycle: */ .smp_callin_clear_local_apic = NULL, -- cgit v1.2.3 From 0939e4fd351c58d08d25650797749f18904461af Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 17:16:25 +0100 Subject: x86, smp: eliminate asm/mach-default/mach_wakecpu.h Spread mach_wakecpu.h's definitions into apic.h and genapic.h and remove mach_wakecpu.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 15 ++++++++++++++ arch/x86/include/asm/genapic.h | 7 +++++++ arch/x86/include/asm/mach-default/mach_wakecpu.h | 25 ------------------------ arch/x86/kernel/smpboot.c | 1 - arch/x86/mach-generic/bigsmp.c | 1 - arch/x86/mach-generic/default.c | 1 - arch/x86/mach-generic/es7000.c | 1 - arch/x86/mach-generic/summit.c | 1 - 8 files changed, 22 insertions(+), 30 deletions(-) delete mode 100644 arch/x86/include/asm/mach-default/mach_wakecpu.h (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index ab1d51a8855..e8f030440bc 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -41,6 +41,21 @@ extern unsigned int apic_verbosity; extern int local_apic_timer_c2_ok; extern int disable_apic; + +#ifdef CONFIG_SMP +extern void __inquire_remote_apic(int apicid); +#else /* CONFIG_SMP */ +static inline void __inquire_remote_apic(int apicid) +{ +} +#endif /* CONFIG_SMP */ + +static inline void default_inquire_remote_apic(int apicid) +{ + if (apic_verbosity >= APIC_DEBUG) + __inquire_remote_apic(apicid); +} + /* * Basic functions accessing APICs. */ diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index e5f9c5696fb..1772dad01b1 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -113,4 +113,11 @@ extern int default_cpu_present_to_apicid(int mps_cpu); extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); #endif +static inline void default_wait_for_init_deassert(atomic_t *deassert) +{ + while (!atomic_read(deassert)) + cpu_relax(); + return; +} + #endif /* _ASM_X86_GENAPIC_64_H */ diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h deleted file mode 100644 index b1cde560e4c..00000000000 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H -#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H - -static inline void default_wait_for_init_deassert(atomic_t *deassert) -{ - while (!atomic_read(deassert)) - cpu_relax(); - return; -} - -#ifdef CONFIG_SMP -extern void __inquire_remote_apic(int apicid); -#else /* CONFIG_SMP */ -static inline void __inquire_remote_apic(int apicid) -{ -} -#endif /* CONFIG_SMP */ - -static inline void default_inquire_remote_apic(int apicid) -{ - if (apic_verbosity >= APIC_DEBUG) - __inquire_remote_apic(apicid); -} - -#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 170adc5b6cb..1fdc1a7e7b5 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -66,7 +66,6 @@ #include #include -#include #include #ifdef CONFIG_X86_32 diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 4d8b2d442ba..6fcccfb5918 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -17,7 +17,6 @@ #include #include #include -#include static int dmi_bigsmp; /* can be set by dmi scanners */ diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index c12dd2300a5..e3c5114fd91 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -16,7 +16,6 @@ #include #include #include -#include static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) { diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 8b6113ec380..bb11166b7c3 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -14,7 +14,6 @@ #include #include #include -#include #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index d5db3045437..673a64f8b46 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -16,7 +16,6 @@ #include #include #include -#include static int probe_summit(void) { -- cgit v1.2.3 From fb5b33c9f62ca9222c11841d61ddb7dc1a6552e9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 17:29:27 +0100 Subject: x86: eliminate asm/mach-*/mach_mpparse.h Move the definition to mpparse.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mach-default/mach_mpparse.h | 17 ----------------- arch/x86/include/asm/mach-generic/mach_mpparse.h | 8 -------- arch/x86/include/asm/mpspec.h | 4 ++++ arch/x86/kernel/acpi/boot.c | 1 - arch/x86/kernel/es7000_32.c | 1 - arch/x86/kernel/mpparse.c | 1 - arch/x86/mach-generic/bigsmp.c | 1 - arch/x86/mach-generic/default.c | 1 - 8 files changed, 4 insertions(+), 30 deletions(-) delete mode 100644 arch/x86/include/asm/mach-default/mach_mpparse.h delete mode 100644 arch/x86/include/asm/mach-generic/mach_mpparse.h (limited to 'arch') diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h deleted file mode 100644 index af0da140df9..00000000000 --- a/arch/x86/include/asm/mach-default/mach_mpparse.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H -#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H - -static inline int -generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - return 0; -} - -/* Hook from generic ACPI tables.c */ -static inline int default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 0; -} - - -#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h deleted file mode 100644 index 22bfb56f8fb..00000000000 --- a/arch/x86/include/asm/mach-generic/mach_mpparse.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H -#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H - -extern int generic_mps_oem_check(struct mpc_table *, char *, char *); - -extern int default_acpi_madt_oem_check(char *, char *); - -#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */ diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 62d14ce3cd0..432e9cbc607 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -142,4 +142,8 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map) extern physid_mask_t phys_cpu_present_map; +extern int generic_mps_oem_check(struct mpc_table *, char *, char *); + +extern int default_acpi_madt_oem_check(char *, char *); + #endif /* _ASM_X86_MPSPEC_H */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 539163161a4..7b02a1cedca 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -63,7 +63,6 @@ EXPORT_SYMBOL(acpi_disabled); #ifdef CONFIG_X86_LOCAL_APIC #include -#include #endif /* CONFIG_X86_LOCAL_APIC */ #endif /* X86 */ diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index d7f433ee602..8faea13c8fa 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index b12fa5ce6f5..c6930162b3b 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -32,7 +32,6 @@ #include #ifdef CONFIG_X86_32 #include -#include #endif /* diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 6fcccfb5918..626f45ca4e7 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -16,7 +16,6 @@ #include #include #include -#include static int dmi_bigsmp; /* can be set by dmi scanners */ diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index e3c5114fd91..6485e57e29b 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -15,7 +15,6 @@ #include #include #include -#include static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) { -- cgit v1.2.3 From b2af018ff26f1a2a026f548f7f0e552589905689 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 17:36:56 +0100 Subject: x86: remove mach_mpspec.h Move its definitions into mpspec.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mach-default/mach_mpspec.h | 12 ------------ arch/x86/include/asm/mach-generic/mach_mpspec.h | 12 ------------ arch/x86/include/asm/mpspec.h | 25 +++++++++++++++++++------ 3 files changed, 19 insertions(+), 30 deletions(-) delete mode 100644 arch/x86/include/asm/mach-default/mach_mpspec.h delete mode 100644 arch/x86/include/asm/mach-generic/mach_mpspec.h (limited to 'arch') diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h deleted file mode 100644 index e85ede686be..00000000000 --- a/arch/x86/include/asm/mach-default/mach_mpspec.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H -#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 256 - -#if CONFIG_BASE_SMALL == 0 -#define MAX_MP_BUSSES 256 -#else -#define MAX_MP_BUSSES 32 -#endif - -#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h deleted file mode 100644 index 3bc40722657..00000000000 --- a/arch/x86/include/asm/mach-generic/mach_mpspec.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H -#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 256 - -/* Summit or generic (i.e. installer) kernels need lots of bus entries. */ -/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ -#define MAX_MP_BUSSES 260 - -extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); - -#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */ diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 432e9cbc607..03fb0d39654 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS]; extern int pic_mode; #ifdef CONFIG_X86_32 -#include + +/* + * Summit or generic (i.e. installer) kernels need lots of bus entries. + * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. + */ +#if CONFIG_BASE_SMALL == 0 +# define MAX_MP_BUSSES 260 +#else +# define MAX_MP_BUSSES 32 +#endif + +#define MAX_IRQ_SOURCES 256 extern unsigned int def_to_bigsmp; extern u8 apicid_2_node[]; @@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES]; extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; #endif -#define MAX_APICID 256 +#define MAX_APICID 256 -#else +#else /* CONFIG_X86_64: */ -#define MAX_MP_BUSSES 256 +#define MAX_MP_BUSSES 256 /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ -#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) +#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) -#endif +#endif /* CONFIG_X86_64 */ extern void early_find_smp_config(void); extern void early_get_smp_config(void); @@ -146,4 +157,6 @@ extern int generic_mps_oem_check(struct mpc_table *, char *, char *); extern int default_acpi_madt_oem_check(char *, char *); +extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); + #endif /* _ASM_X86_MPSPEC_H */ -- cgit v1.2.3 From 1f75ed0c1311a50ed393bcac258de65680d360e5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 17:36:56 +0100 Subject: x86: remove mach_apicdef.h Move its definitions into apic.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 16 ++++++++++++++++ arch/x86/include/asm/mach-default/mach_apic.h | 1 - arch/x86/include/asm/mach-default/mach_apicdef.h | 22 ---------------------- arch/x86/include/asm/mach-generic/mach_apicdef.h | 8 -------- arch/x86/include/asm/smp.h | 4 ++-- arch/x86/kernel/apic.c | 1 - arch/x86/kernel/genapic_flat_64.c | 1 - arch/x86/kernel/io_apic.c | 1 - arch/x86/kernel/mpparse.c | 3 --- arch/x86/mach-generic/default.c | 1 - 10 files changed, 18 insertions(+), 40 deletions(-) delete mode 100644 arch/x86/include/asm/mach-default/mach_apicdef.h delete mode 100644 arch/x86/include/asm/mach-generic/mach_apicdef.h (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index e8f030440bc..3a3202074c6 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -211,4 +211,20 @@ static inline void disable_local_APIC(void) { } #endif /* !CONFIG_X86_LOCAL_APIC */ +#ifdef CONFIG_X86_64 +#define SET_APIC_ID(x) (apic->set_apic_id(x)) +#else + +static inline unsigned default_get_apic_id(unsigned long x) +{ + unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); + + if (APIC_XAPIC(ver)) + return (x >> 24) & 0xFF; + else + return (x >> 24) & 0x0F; +} + +#endif + #endif /* _ASM_X86_APIC_H */ diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 2e4104cf348..b60b767d5be 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -3,7 +3,6 @@ #ifdef CONFIG_X86_LOCAL_APIC -#include #include #define APIC_DFR_VALUE (APIC_DFR_FLAT) diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h deleted file mode 100644 index 5141085962d..00000000000 --- a/arch/x86/include/asm/mach-default/mach_apicdef.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H -#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H - -#include - -#ifdef CONFIG_X86_64 -#define SET_APIC_ID(x) (apic->set_apic_id(x)) -#else - -static inline unsigned default_get_apic_id(unsigned long x) -{ - unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); - - if (APIC_XAPIC(ver)) - return (x >> 24) & 0xFF; - else - return (x >> 24) & 0x0F; -} - -#endif - -#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h deleted file mode 100644 index 61caa65b13f..00000000000 --- a/arch/x86/include/asm/mach-generic/mach_apicdef.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H -#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H - -#ifndef APIC_DEFINITION -#include -#endif - -#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index c63d480802a..d4ac4de4bce 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -173,6 +173,8 @@ extern int safe_smp_processor_id(void); #endif +#include + #ifdef CONFIG_X86_LOCAL_APIC #ifndef CONFIG_X86_64 @@ -182,7 +184,6 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } -#include static inline unsigned int read_apic_id(void) { unsigned int reg; @@ -197,7 +198,6 @@ static inline unsigned int read_apic_id(void) # if defined(APIC_DEFINITION) || defined(CONFIG_X86_64) extern int hard_smp_processor_id(void); # else -#include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 84b8e7c57ab..e6220809ca1 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -50,7 +50,6 @@ #include #include -#include #include /* diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index e9237f59928..19bffb3f732 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -19,7 +19,6 @@ #include #include #include -#include #ifdef CONFIG_ACPI #include diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index e90970ce21a..abae81989c2 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -64,7 +64,6 @@ #include #include -#include #define __apicdebuginit(type) static type __init diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index c6930162b3b..a1452a53d14 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -30,9 +30,6 @@ #include #include -#ifdef CONFIG_X86_32 -#include -#endif /* * Checksum an MP configuration block. diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 6485e57e29b..07817b2a787 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 328386d7ab600aa0993a1226f5817ac30a735724 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 17:50:18 +0100 Subject: x86, smp: refactor ->wake_cpu - remove macro wrappers Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mach-default/mach_apic.h | 2 -- arch/x86/include/asm/mach-generic/mach_apic.h | 2 -- arch/x86/kernel/setup.c | 5 ++--- arch/x86/kernel/smpboot.c | 4 ++-- 4 files changed, 4 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index b60b767d5be..bae053cdcde 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -19,10 +19,8 @@ static inline const struct cpumask *default_target_cpus(void) #ifdef CONFIG_X86_64 #include #define read_apic_id() (apic->get_apic_id(apic_read(APIC_ID))) -#define wakeup_secondary_cpu (apic->wakeup_cpu) extern void default_setup_apic_routing(void); #else -#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init /* * Set up the logical destination ID. * diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index ca460e45991..96f217f819e 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -3,8 +3,6 @@ #include -#define wakeup_secondary_cpu (apic->wakeup_cpu) - extern void generic_bigsmp_probe(void); #endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a58e9f5e603..6b27f6dc7bf 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -589,9 +589,8 @@ early_param("elfcorehdr", setup_elfcorehdr); static int __init default_update_genapic(void) { #ifdef CONFIG_X86_SMP -# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64) - apic->wakeup_cpu = wakeup_secondary_cpu_via_init; -# endif + if (!apic->wakeup_cpu) + apic->wakeup_cpu = wakeup_secondary_cpu_via_init; #endif return 0; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 1fdc1a7e7b5..3fed177f345 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -750,7 +750,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) /* * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad * (ie clustered apic addressing mode), this is a LOGICAL apic ID. - * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. + * Returns zero if CPU booted OK, else error code from ->wakeup_cpu. */ { unsigned long boot_error = 0; @@ -841,7 +841,7 @@ do_rest: /* * Starting actual IPI sequence... */ - boot_error = wakeup_secondary_cpu(apicid, start_ip); + boot_error = apic->wakeup_cpu(apicid, start_ip); if (!boot_error) { /* -- cgit v1.2.3 From 5a44632f77a9c867621f7bf80c233eac75fea672 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 18:47:24 +0100 Subject: x86, numaq: consolidate code Move all the NUMAQ subarch definitions into numaq.c. With this it ceases to depend on build-time subarch features. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/numaq/apic.h | 122 ------------------------- arch/x86/include/asm/numaq/apicdef.h | 9 -- arch/x86/include/asm/numaq/ipi.h | 22 ----- arch/x86/include/asm/numaq/mpparse.h | 6 -- arch/x86/include/asm/numaq/wakecpu.h | 28 ------ arch/x86/mach-generic/numaq.c | 171 ++++++++++++++++++++++++++++++++++- arch/x86/pci/numaq_32.c | 2 +- 7 files changed, 167 insertions(+), 193 deletions(-) delete mode 100644 arch/x86/include/asm/numaq/apic.h delete mode 100644 arch/x86/include/asm/numaq/apicdef.h delete mode 100644 arch/x86/include/asm/numaq/ipi.h delete mode 100644 arch/x86/include/asm/numaq/mpparse.h delete mode 100644 arch/x86/include/asm/numaq/wakecpu.h (limited to 'arch') diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h deleted file mode 100644 index ce95e79f723..00000000000 --- a/arch/x86/include/asm/numaq/apic.h +++ /dev/null @@ -1,122 +0,0 @@ -#ifndef __ASM_NUMAQ_APIC_H -#define __ASM_NUMAQ_APIC_H - -#include -#include -#include - -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) - -static inline const cpumask_t *numaq_target_cpus(void) -{ - return &CPU_MASK_ALL; -} - -static inline unsigned long -numaq_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} -static inline unsigned long numaq_check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} -#define apicid_cluster(apicid) (apicid & 0xF0) - -static inline int numaq_apic_id_registered(void) -{ - return 1; -} - -static inline void numaq_init_apic_ldr(void) -{ - /* Already done in NUMA-Q firmware */ -} - -static inline void numaq_setup_apic_routing(void) -{ - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "NUMA-Q", nr_ioapics); -} - -/* - * Skip adding the timer int on secondary nodes, which causes - * a small but painful rift in the time-space continuum. - */ -static inline int numaq_multi_timer_check(int apic, int irq) -{ - return apic != 0 && irq == 0; -} - -static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* We don't have a good way to do this yet - hack */ - return physids_promote(0xFUL); -} - -/* Mapping from cpu number to logical apicid */ -extern u8 cpu_2_logical_apicid[]; - -static inline int numaq_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -} - -/* - * Supporting over 60 cpus on NUMA-Q requires a locality-dependent - * cpu to APIC ID relation to properly interact with the intelligent - * mode of the cluster controller. - */ -static inline int numaq_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < 60) - return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); - else - return BAD_APICID; -} - -static inline int numaq_apicid_to_node(int logical_apicid) -{ - return logical_apicid >> 4; -} - -static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) -{ - int node = numaq_apicid_to_node(logical_apicid); - int cpu = __ffs(logical_apicid & 0xf); - - return physid_mask_of_physid(cpu + 4*node); -} - -extern void *xquad_portio; - -static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -/* - * We use physical apicids here, not logical, so just return the default - * physical broadcast to stop people from breaking us - */ -static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - return 0x0F; -} - -static inline unsigned int -numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - return 0x0F; -} - -/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ -static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_NUMAQ_APIC_H */ diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h deleted file mode 100644 index cd927d5bd50..00000000000 --- a/arch/x86/include/asm/numaq/apicdef.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_NUMAQ_APICDEF_H -#define __ASM_NUMAQ_APICDEF_H - -static inline unsigned int numaq_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0x0F; -} - -#endif diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h deleted file mode 100644 index 5dbc4b4cd5e..00000000000 --- a/arch/x86/include/asm/numaq/ipi.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __ASM_NUMAQ_IPI_H -#define __ASM_NUMAQ_IPI_H - -void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); - -static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_sequence(mask, vector); -} - -static inline void numaq_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself(cpu_online_mask, vector); -} - -static inline void numaq_send_IPI_all(int vector) -{ - numaq_send_IPI_mask(cpu_online_mask, vector); -} - -#endif /* __ASM_NUMAQ_IPI_H */ diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h deleted file mode 100644 index a2eeefcd1cc..00000000000 --- a/arch/x86/include/asm/numaq/mpparse.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_NUMAQ_MPPARSE_H -#define __ASM_NUMAQ_MPPARSE_H - -extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); - -#endif /* __ASM_NUMAQ_MPPARSE_H */ diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h deleted file mode 100644 index afe81439c7d..00000000000 --- a/arch/x86/include/asm/numaq/wakecpu.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef __ASM_NUMAQ_WAKECPU_H -#define __ASM_NUMAQ_WAKECPU_H - -/* This file copes with machines that wakeup secondary CPUs by NMIs */ - -#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) -#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) - -/* - * Because we use NMIs rather than the INIT-STARTUP sequence to - * bootstrap the CPUs, the APIC may be in a weird state. Kick it: - */ -static inline void numaq_smp_callin_clear_local_apic(void) -{ - clear_local_APIC(); -} - -static inline void -numaq_store_NMI_vector(unsigned short *high, unsigned short *low) -{ - printk("Storing NMI vector\n"); - *high = - *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)); - *low = - *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); -} - -#endif /* __ASM_NUMAQ_WAKECPU_H */ diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index ddb50fba286..c221cfb2c2d 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -11,14 +11,175 @@ #include #include #include -#include +#include #include -#include -#include -#include -#include #include +#include +#include +#include +#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) + +static inline unsigned int numaq_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0x0F; +} + +void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); + +static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_sequence(mask, vector); +} + +static inline void numaq_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself(cpu_online_mask, vector); +} + +static inline void numaq_send_IPI_all(int vector) +{ + numaq_send_IPI_mask(cpu_online_mask, vector); +} + +extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); + +#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) +#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) + +/* + * Because we use NMIs rather than the INIT-STARTUP sequence to + * bootstrap the CPUs, the APIC may be in a weird state. Kick it: + */ +static inline void numaq_smp_callin_clear_local_apic(void) +{ + clear_local_APIC(); +} + +static inline void +numaq_store_NMI_vector(unsigned short *high, unsigned short *low) +{ + printk("Storing NMI vector\n"); + *high = + *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)); + *low = + *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); +} + +static inline const cpumask_t *numaq_target_cpus(void) +{ + return &CPU_MASK_ALL; +} + +static inline unsigned long +numaq_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} + +static inline unsigned long numaq_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +#define apicid_cluster(apicid) (apicid & 0xF0) + +static inline int numaq_apic_id_registered(void) +{ + return 1; +} + +static inline void numaq_init_apic_ldr(void) +{ + /* Already done in NUMA-Q firmware */ +} + +static inline void numaq_setup_apic_routing(void) +{ + printk("Enabling APIC mode: %s. Using %d I/O APICs\n", + "NUMA-Q", nr_ioapics); +} + +/* + * Skip adding the timer int on secondary nodes, which causes + * a small but painful rift in the time-space continuum. + */ +static inline int numaq_multi_timer_check(int apic, int irq) +{ + return apic != 0 && irq == 0; +} + +static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) +{ + /* We don't have a good way to do this yet - hack */ + return physids_promote(0xFUL); +} + +/* Mapping from cpu number to logical apicid */ +extern u8 cpu_2_logical_apicid[]; + +static inline int numaq_cpu_to_logical_apicid(int cpu) +{ + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return (int)cpu_2_logical_apicid[cpu]; +} + +/* + * Supporting over 60 cpus on NUMA-Q requires a locality-dependent + * cpu to APIC ID relation to properly interact with the intelligent + * mode of the cluster controller. + */ +static inline int numaq_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < 60) + return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); + else + return BAD_APICID; +} + +static inline int numaq_apicid_to_node(int logical_apicid) +{ + return logical_apicid >> 4; +} + +static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) +{ + int node = numaq_apicid_to_node(logical_apicid); + int cpu = __ffs(logical_apicid & 0xf); + + return physid_mask_of_physid(cpu + 4*node); +} + +extern void *xquad_portio; + +static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return 1; +} + +/* + * We use physical apicids here, not logical, so just return the default + * physical broadcast to stop people from breaking us + */ +static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + return 0x0F; +} + +static inline unsigned int +numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + return 0x0F; +} + +/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ +static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { numaq_mps_oem_check(mpc, oem, productid); diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index 2089354968a..1b2d773612e 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include -- cgit v1.2.3 From b11b867f78910192fc54bd0d09148cf768c7aaad Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 18:49:31 +0100 Subject: x86, summit: consolidate code Consolidate all the Summit code into a single file: arch/x86/kernel/summit_32.c. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/summit/apic.h | 195 ---------------- arch/x86/include/asm/summit/apicdef.h | 9 - arch/x86/include/asm/summit/ipi.h | 26 --- arch/x86/include/asm/summit/mpparse.h | 109 --------- arch/x86/kernel/summit_32.c | 417 +++++++++++++++++++++++++++++++++- arch/x86/mach-generic/Makefile | 1 - arch/x86/mach-generic/summit.c | 94 -------- 7 files changed, 416 insertions(+), 435 deletions(-) delete mode 100644 arch/x86/include/asm/summit/apic.h delete mode 100644 arch/x86/include/asm/summit/apicdef.h delete mode 100644 arch/x86/include/asm/summit/ipi.h delete mode 100644 arch/x86/include/asm/summit/mpparse.h delete mode 100644 arch/x86/mach-generic/summit.c (limited to 'arch') diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h deleted file mode 100644 index 15b8dbd19e1..00000000000 --- a/arch/x86/include/asm/summit/apic.h +++ /dev/null @@ -1,195 +0,0 @@ -#ifndef __ASM_SUMMIT_APIC_H -#define __ASM_SUMMIT_APIC_H - -#include -#include - -/* In clustered mode, the high nibble of APIC ID is a cluster number. - * The low nibble is a 4-bit bitmap. */ -#define XAPIC_DEST_CPUS_SHIFT 4 -#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) -#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) - -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) - -static inline const cpumask_t *summit_target_cpus(void) -{ - /* CPU_MASK_ALL (0xff) has undefined behaviour with - * dest_LowestPrio mode logical clustered apic interrupt routing - * Just start on cpu 0. IRQ balancing will spread load - */ - return &cpumask_of_cpu(0); -} - -static inline unsigned long -summit_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} - -/* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long summit_check_apicid_present(int bit) -{ - return 1; -} - -#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) - -extern u8 cpu_2_logical_apicid[]; - -static inline void summit_init_apic_ldr(void) -{ - unsigned long val, id; - int count = 0; - u8 my_id = (u8)hard_smp_processor_id(); - u8 my_cluster = (u8)apicid_cluster(my_id); -#ifdef CONFIG_SMP - u8 lid; - int i; - - /* Create logical APIC IDs by counting CPUs already in cluster. */ - for (count = 0, i = nr_cpu_ids; --i >= 0; ) { - lid = cpu_2_logical_apicid[i]; - if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) - ++count; - } -#endif - /* We only have a 4 wide bitmap in cluster mode. If a deranged - * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ - BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); - id = my_cluster | (1UL << count); - apic_write(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(id); - apic_write(APIC_LDR, val); -} - -static inline int summit_apic_id_registered(void) -{ - return 1; -} - -static inline void summit_setup_apic_routing(void) -{ - printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", - nr_ioapics); -} - -static inline int summit_apicid_to_node(int logical_apicid) -{ -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; -#else - return 0; -#endif -} - -/* Mapping from cpu number to logical apicid */ -static inline int summit_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - -static inline int summit_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < nr_cpu_ids) - return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - -static inline physid_mask_t -summit_ioapic_phys_id_map(physid_mask_t phys_id_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0x0F); -} - -static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) -{ - return physid_mask_of_physid(0); -} - -static inline void summit_setup_portio_remap(void) -{ -} - -static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpus_weight(*cpumask); - /* Return id to all */ - if (num_bits_set >= nr_cpu_ids) - return 0xFF; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = first_cpu(*cpumask); - apicid = summit_cpu_to_logical_apicid(cpu); - - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask)) { - int new_apicid = summit_cpu_to_logical_apicid(cpu); - - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); - - return 0xFF; - } - apicid = apicid | new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static inline unsigned int -summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask) -{ - int apicid = summit_cpu_to_logical_apicid(0); - cpumask_var_t cpumask; - - if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) - return apicid; - - cpumask_and(cpumask, inmask, andmask); - cpumask_and(cpumask, cpumask, cpu_online_mask); - apicid = summit_cpu_mask_to_apicid(cpumask); - - free_cpumask_var(cpumask); - - return apicid; -} - -/* - * cpuid returns the value latched in the HW at reset, not the APIC ID - * register's value. For any box whose BIOS changes APIC IDs, like - * clustered APIC systems, we must use hard_smp_processor_id. - * - * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. - */ -static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return hard_smp_processor_id() >> index_msb; -} - -#endif /* __ASM_SUMMIT_APIC_H */ diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h deleted file mode 100644 index c24b0df2dec..00000000000 --- a/arch/x86/include/asm/summit/apicdef.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_SUMMIT_APICDEF_H -#define __ASM_SUMMIT_APICDEF_H - -static inline unsigned summit_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0xFF; -} - -#endif diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h deleted file mode 100644 index f87a43fe0ae..00000000000 --- a/arch/x86/include/asm/summit/ipi.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef __ASM_SUMMIT_IPI_H -#define __ASM_SUMMIT_IPI_H - -void default_send_IPI_mask_sequence(const cpumask_t *mask, int vector); -void default_send_IPI_mask_allbutself(const cpumask_t *mask, int vector); - -static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) -{ - default_send_IPI_mask_sequence(mask, vector); -} - -static inline void summit_send_IPI_allbutself(int vector) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - - if (!cpus_empty(mask)) - summit_send_IPI_mask(&mask, vector); -} - -static inline void summit_send_IPI_all(int vector) -{ - summit_send_IPI_mask(&cpu_online_map, vector); -} - -#endif /* __ASM_SUMMIT_IPI_H */ diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h deleted file mode 100644 index 4bbcce39acb..00000000000 --- a/arch/x86/include/asm/summit/mpparse.h +++ /dev/null @@ -1,109 +0,0 @@ -#ifndef __ASM_SUMMIT_MPPARSE_H -#define __ASM_SUMMIT_MPPARSE_H - -#include - -extern int use_cyclone; - -#ifdef CONFIG_X86_SUMMIT_NUMA -extern void setup_summit(void); -#else -#define setup_summit() {} -#endif - -static inline int -summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - if (!strncmp(oem, "IBM ENSW", 8) && - (!strncmp(productid, "VIGIL SMP", 9) - || !strncmp(productid, "EXA", 3) - || !strncmp(productid, "RUTHLESS SMP", 12))){ - mark_tsc_unstable("Summit based system"); - use_cyclone = 1; /*enable cyclone-timer*/ - setup_summit(); - return 1; - } - return 0; -} - -/* Hook from generic ACPI tables.c */ -static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - if (!strncmp(oem_id, "IBM", 3) && - (!strncmp(oem_table_id, "SERVIGIL", 8) - || !strncmp(oem_table_id, "EXA", 3))){ - mark_tsc_unstable("Summit based system"); - use_cyclone = 1; /*enable cyclone-timer*/ - setup_summit(); - return 1; - } - return 0; -} - -struct rio_table_hdr { - unsigned char version; /* Version number of this data structure */ - /* Version 3 adds chassis_num & WP_index */ - unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */ - unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */ -} __attribute__((packed)); - -struct scal_detail { - unsigned char node_id; /* Scalability Node ID */ - unsigned long CBAR; /* Address of 1MB register space */ - unsigned char port0node; /* Node ID port connected to: 0xFF=None */ - unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port1node; /* Node ID port connected to: 0xFF = None */ - unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port2node; /* Node ID port connected to: 0xFF = None */ - unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */ -} __attribute__((packed)); - -struct rio_detail { - unsigned char node_id; /* RIO Node ID */ - unsigned long BBAR; /* Address of 1MB register space */ - unsigned char type; /* Type of device */ - unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/ - /* For CYC: Node ID of Twister that owns this CYC */ - unsigned char port0node; /* Node ID port connected to: 0xFF=None */ - unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port1node; /* Node ID port connected to: 0xFF=None */ - unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */ - /* For CYC: 0 */ - unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */ - /* = 0 : the XAPIC is not used, ie:*/ - /* ints fwded to another XAPIC */ - /* Bits1:7 Reserved */ - /* For CYC: Bits0:7 Reserved */ - unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */ - /* lower slot numbers/PCI bus numbers */ - /* For CYC: No meaning */ - unsigned char chassis_num; /* 1 based Chassis number */ - /* For LookOut WPEGs this field indicates the */ - /* Expansion Chassis #, enumerated from Boot */ - /* Node WPEG external port, then Boot Node CYC */ - /* external port, then Next Vigil chassis WPEG */ - /* external port, etc. */ - /* Shared Lookouts have only 1 chassis number (the */ - /* first one assigned) */ -} __attribute__((packed)); - - -typedef enum { - CompatTwister = 0, /* Compatibility Twister */ - AltTwister = 1, /* Alternate Twister of internal 8-way */ - CompatCyclone = 2, /* Compatibility Cyclone */ - AltCyclone = 3, /* Alternate Cyclone of internal 8-way */ - CompatWPEG = 4, /* Compatibility WPEG */ - AltWPEG = 5, /* Second Planar WPEG */ - LookOutAWPEG = 6, /* LookOut WPEG */ - LookOutBWPEG = 7, /* LookOut WPEG */ -} node_type; - -static inline int is_WPEG(struct rio_detail *rio){ - return (rio->type == CompatWPEG || rio->type == AltWPEG || - rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); -} - -#endif /* __ASM_SUMMIT_MPPARSE_H */ diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 7b987852e87..3b60dd5e57f 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -30,7 +30,364 @@ #include #include #include -#include + +/* + * APIC driver for the IBM "Summit" chipset. + */ +#define APIC_DEFINITION 1 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline unsigned summit_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0xFF; +} + +void default_send_IPI_mask_sequence(const cpumask_t *mask, int vector); +void default_send_IPI_mask_allbutself(const cpumask_t *mask, int vector); + +static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) +{ + default_send_IPI_mask_sequence(mask, vector); +} + +static inline void summit_send_IPI_allbutself(int vector) +{ + cpumask_t mask = cpu_online_map; + cpu_clear(smp_processor_id(), mask); + + if (!cpus_empty(mask)) + summit_send_IPI_mask(&mask, vector); +} + +static inline void summit_send_IPI_all(int vector) +{ + summit_send_IPI_mask(&cpu_online_map, vector); +} + +#include + +extern int use_cyclone; + +#ifdef CONFIG_X86_SUMMIT_NUMA +extern void setup_summit(void); +#else +#define setup_summit() {} +#endif + +static inline int +summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +{ + if (!strncmp(oem, "IBM ENSW", 8) && + (!strncmp(productid, "VIGIL SMP", 9) + || !strncmp(productid, "EXA", 3) + || !strncmp(productid, "RUTHLESS SMP", 12))){ + mark_tsc_unstable("Summit based system"); + use_cyclone = 1; /*enable cyclone-timer*/ + setup_summit(); + return 1; + } + return 0; +} + +/* Hook from generic ACPI tables.c */ +static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + if (!strncmp(oem_id, "IBM", 3) && + (!strncmp(oem_table_id, "SERVIGIL", 8) + || !strncmp(oem_table_id, "EXA", 3))){ + mark_tsc_unstable("Summit based system"); + use_cyclone = 1; /*enable cyclone-timer*/ + setup_summit(); + return 1; + } + return 0; +} + +struct rio_table_hdr { + unsigned char version; /* Version number of this data structure */ + /* Version 3 adds chassis_num & WP_index */ + unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */ + unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */ +} __attribute__((packed)); + +struct scal_detail { + unsigned char node_id; /* Scalability Node ID */ + unsigned long CBAR; /* Address of 1MB register space */ + unsigned char port0node; /* Node ID port connected to: 0xFF=None */ + unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char port1node; /* Node ID port connected to: 0xFF = None */ + unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char port2node; /* Node ID port connected to: 0xFF = None */ + unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */ +} __attribute__((packed)); + +struct rio_detail { + unsigned char node_id; /* RIO Node ID */ + unsigned long BBAR; /* Address of 1MB register space */ + unsigned char type; /* Type of device */ + unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/ + /* For CYC: Node ID of Twister that owns this CYC */ + unsigned char port0node; /* Node ID port connected to: 0xFF=None */ + unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char port1node; /* Node ID port connected to: 0xFF=None */ + unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */ + /* For CYC: 0 */ + unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */ + /* = 0 : the XAPIC is not used, ie:*/ + /* ints fwded to another XAPIC */ + /* Bits1:7 Reserved */ + /* For CYC: Bits0:7 Reserved */ + unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */ + /* lower slot numbers/PCI bus numbers */ + /* For CYC: No meaning */ + unsigned char chassis_num; /* 1 based Chassis number */ + /* For LookOut WPEGs this field indicates the */ + /* Expansion Chassis #, enumerated from Boot */ + /* Node WPEG external port, then Boot Node CYC */ + /* external port, then Next Vigil chassis WPEG */ + /* external port, etc. */ + /* Shared Lookouts have only 1 chassis number (the */ + /* first one assigned) */ +} __attribute__((packed)); + + +typedef enum { + CompatTwister = 0, /* Compatibility Twister */ + AltTwister = 1, /* Alternate Twister of internal 8-way */ + CompatCyclone = 2, /* Compatibility Cyclone */ + AltCyclone = 3, /* Alternate Cyclone of internal 8-way */ + CompatWPEG = 4, /* Compatibility WPEG */ + AltWPEG = 5, /* Second Planar WPEG */ + LookOutAWPEG = 6, /* LookOut WPEG */ + LookOutBWPEG = 7, /* LookOut WPEG */ +} node_type; + +static inline int is_WPEG(struct rio_detail *rio){ + return (rio->type == CompatWPEG || rio->type == AltWPEG || + rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); +} + + +/* In clustered mode, the high nibble of APIC ID is a cluster number. + * The low nibble is a 4-bit bitmap. */ +#define XAPIC_DEST_CPUS_SHIFT 4 +#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) +#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) + +#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) + +static inline const cpumask_t *summit_target_cpus(void) +{ + /* CPU_MASK_ALL (0xff) has undefined behaviour with + * dest_LowestPrio mode logical clustered apic interrupt routing + * Just start on cpu 0. IRQ balancing will spread load + */ + return &cpumask_of_cpu(0); +} + +static inline unsigned long +summit_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return 0; +} + +/* we don't use the phys_cpu_present_map to indicate apicid presence */ +static inline unsigned long summit_check_apicid_present(int bit) +{ + return 1; +} + +#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) + +extern u8 cpu_2_logical_apicid[]; + +static inline void summit_init_apic_ldr(void) +{ + unsigned long val, id; + int count = 0; + u8 my_id = (u8)hard_smp_processor_id(); + u8 my_cluster = (u8)apicid_cluster(my_id); +#ifdef CONFIG_SMP + u8 lid; + int i; + + /* Create logical APIC IDs by counting CPUs already in cluster. */ + for (count = 0, i = nr_cpu_ids; --i >= 0; ) { + lid = cpu_2_logical_apicid[i]; + if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) + ++count; + } +#endif + /* We only have a 4 wide bitmap in cluster mode. If a deranged + * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ + BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); + id = my_cluster | (1UL << count); + apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_APIC_LOGICAL_ID(id); + apic_write(APIC_LDR, val); +} + +static inline int summit_apic_id_registered(void) +{ + return 1; +} + +static inline void summit_setup_apic_routing(void) +{ + printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", + nr_ioapics); +} + +static inline int summit_apicid_to_node(int logical_apicid) +{ +#ifdef CONFIG_SMP + return apicid_2_node[hard_smp_processor_id()]; +#else + return 0; +#endif +} + +/* Mapping from cpu number to logical apicid */ +static inline int summit_cpu_to_logical_apicid(int cpu) +{ +#ifdef CONFIG_SMP + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return (int)cpu_2_logical_apicid[cpu]; +#else + return logical_smp_processor_id(); +#endif +} + +static inline int summit_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static inline physid_mask_t +summit_ioapic_phys_id_map(physid_mask_t phys_id_map) +{ + /* For clustered we don't have a good way to do this yet - hack */ + return physids_promote(0x0F); +} + +static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) +{ + return physid_mask_of_physid(0); +} + +static inline void summit_setup_portio_remap(void) +{ +} + +static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return 1; +} + +static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + int cpus_found = 0; + int num_bits_set; + int apicid; + int cpu; + + num_bits_set = cpus_weight(*cpumask); + /* Return id to all */ + if (num_bits_set >= nr_cpu_ids) + return 0xFF; + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of target_cpus(): + */ + cpu = first_cpu(*cpumask); + apicid = summit_cpu_to_logical_apicid(cpu); + + while (cpus_found < num_bits_set) { + if (cpu_isset(cpu, *cpumask)) { + int new_apicid = summit_cpu_to_logical_apicid(cpu); + + if (apicid_cluster(apicid) != + apicid_cluster(new_apicid)) { + printk ("%s: Not a valid mask!\n", __func__); + + return 0xFF; + } + apicid = apicid | new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static inline unsigned int +summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, + const struct cpumask *andmask) +{ + int apicid = summit_cpu_to_logical_apicid(0); + cpumask_var_t cpumask; + + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) + return apicid; + + cpumask_and(cpumask, inmask, andmask); + cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = summit_cpu_mask_to_apicid(cpumask); + + free_cpumask_var(cpumask); + + return apicid; +} + +/* + * cpuid returns the value latched in the HW at reset, not the APIC ID + * register's value. For any box whose BIOS changes APIC IDs, like + * clustered APIC systems, we must use hard_smp_processor_id. + * + * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. + */ +static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return hard_smp_processor_id() >> index_msb; +} + +static int probe_summit(void) +{ + /* probed later in mptable/ACPI hooks */ + return 0; +} + +static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; +} static struct rio_table_hdr *rio_table_hdr __initdata; static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; @@ -186,3 +543,61 @@ void __init setup_summit(void) next_wpeg = 0; } while (next_wpeg != 0); } + + +struct genapic apic_summit = { + + .name = "summit", + .probe = probe_summit, + .acpi_madt_oem_check = summit_acpi_madt_oem_check, + .apic_id_registered = summit_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + /* logical delivery broadcast to all CPUs: */ + .irq_dest_mode = 1, + + .target_cpus = summit_target_cpus, + .disable_esr = 1, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = summit_check_apicid_used, + .check_apicid_present = summit_check_apicid_present, + + .vector_allocation_domain = summit_vector_allocation_domain, + .init_apic_ldr = summit_init_apic_ldr, + + .ioapic_phys_id_map = summit_ioapic_phys_id_map, + .setup_apic_routing = summit_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = summit_apicid_to_node, + .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, + .cpu_present_to_apicid = summit_cpu_present_to_apicid, + .apicid_to_cpu_present = summit_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = summit_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = summit_phys_pkg_id, + .mps_oem_check = summit_mps_oem_check, + + .get_apic_id = summit_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0xFF << 24, + + .cpu_mask_to_apicid = summit_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, + + .send_IPI_mask = summit_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = summit_send_IPI_allbutself, + .send_IPI_all = summit_send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + + .wait_for_init_deassert = default_wait_for_init_deassert, + + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .inquire_remote_apic = default_inquire_remote_apic, +}; diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile index 6730f4e7c74..78ab5735cb8 100644 --- a/arch/x86/mach-generic/Makefile +++ b/arch/x86/mach-generic/Makefile @@ -6,6 +6,5 @@ EXTRA_CFLAGS := -Iarch/x86/kernel obj-y := probe.o default.o obj-$(CONFIG_X86_NUMAQ) += numaq.o -obj-$(CONFIG_X86_SUMMIT) += summit.o obj-$(CONFIG_X86_BIGSMP) += bigsmp.o obj-$(CONFIG_X86_ES7000) += es7000.o diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c deleted file mode 100644 index 673a64f8b46..00000000000 --- a/arch/x86/mach-generic/summit.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * APIC driver for the IBM "Summit" chipset. - */ -#define APIC_DEFINITION 1 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int probe_summit(void) -{ - /* probed later in mptable/ACPI hooks */ - return 0; -} - -static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; -} - -struct genapic apic_summit = { - - .name = "summit", - .probe = probe_summit, - .acpi_madt_oem_check = summit_acpi_madt_oem_check, - .apic_id_registered = summit_apic_id_registered, - - .irq_delivery_mode = dest_LowestPrio, - /* logical delivery broadcast to all CPUs: */ - .irq_dest_mode = 1, - - .target_cpus = summit_target_cpus, - .disable_esr = 1, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = summit_check_apicid_used, - .check_apicid_present = summit_check_apicid_present, - - .vector_allocation_domain = summit_vector_allocation_domain, - .init_apic_ldr = summit_init_apic_ldr, - - .ioapic_phys_id_map = summit_ioapic_phys_id_map, - .setup_apic_routing = summit_setup_apic_routing, - .multi_timer_check = NULL, - .apicid_to_node = summit_apicid_to_node, - .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, - .cpu_present_to_apicid = summit_cpu_present_to_apicid, - .apicid_to_cpu_present = summit_apicid_to_cpu_present, - .setup_portio_remap = NULL, - .check_phys_apicid_present = summit_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = summit_phys_pkg_id, - .mps_oem_check = summit_mps_oem_check, - - .get_apic_id = summit_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0xFF << 24, - - .cpu_mask_to_apicid = summit_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, - - .send_IPI_mask = summit_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = summit_send_IPI_allbutself, - .send_IPI_all = summit_send_IPI_all, - .send_IPI_self = NULL, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - - .wait_for_init_deassert = default_wait_for_init_deassert, - - .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, - .inquire_remote_apic = default_inquire_remote_apic, -}; -- cgit v1.2.3 From 42990701f938b9318e46102d9919ceb28e5b0e6d Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 20 Jan 2009 21:14:37 +0000 Subject: sh: Relax inline assembly constraints When dereferencing the memory address contained in a register and modifying the value at that memory address, the register should not be listed in the inline asm outputs. The value at the memory address is an output (which is taken care of with the "memory" clobber), not the register. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/bitops-llsc.h | 72 +++++++++++++++++++------------------- arch/sh/include/asm/cmpxchg-llsc.h | 38 ++++++++++---------- 2 files changed, 55 insertions(+), 55 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h index 1d2fc0b010a..d8328be0619 100644 --- a/arch/sh/include/asm/bitops-llsc.h +++ b/arch/sh/include/asm/bitops-llsc.h @@ -1,7 +1,7 @@ #ifndef __ASM_SH_BITOPS_LLSC_H #define __ASM_SH_BITOPS_LLSC_H -static inline void set_bit(int nr, volatile void * addr) +static inline void set_bit(int nr, volatile void *addr) { int mask; volatile unsigned int *a = addr; @@ -13,16 +13,16 @@ static inline void set_bit(int nr, volatile void * addr) __asm__ __volatile__ ( "1: \n\t" "movli.l @%1, %0 ! set_bit \n\t" - "or %3, %0 \n\t" + "or %2, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" - : "=&z" (tmp), "=r" (a) - : "1" (a), "r" (mask) + : "=&z" (tmp) + : "r" (a), "r" (mask) : "t", "memory" ); } -static inline void clear_bit(int nr, volatile void * addr) +static inline void clear_bit(int nr, volatile void *addr) { int mask; volatile unsigned int *a = addr; @@ -34,16 +34,16 @@ static inline void clear_bit(int nr, volatile void * addr) __asm__ __volatile__ ( "1: \n\t" "movli.l @%1, %0 ! clear_bit \n\t" - "and %3, %0 \n\t" + "and %2, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" - : "=&z" (tmp), "=r" (a) - : "1" (a), "r" (~mask) + : "=&z" (tmp) + : "r" (a), "r" (~mask) : "t", "memory" ); } -static inline void change_bit(int nr, volatile void * addr) +static inline void change_bit(int nr, volatile void *addr) { int mask; volatile unsigned int *a = addr; @@ -55,16 +55,16 @@ static inline void change_bit(int nr, volatile void * addr) __asm__ __volatile__ ( "1: \n\t" "movli.l @%1, %0 ! change_bit \n\t" - "xor %3, %0 \n\t" + "xor %2, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" - : "=&z" (tmp), "=r" (a) - : "1" (a), "r" (mask) + : "=&z" (tmp) + : "r" (a), "r" (mask) : "t", "memory" ); } -static inline int test_and_set_bit(int nr, volatile void * addr) +static inline int test_and_set_bit(int nr, volatile void *addr) { int mask, retval; volatile unsigned int *a = addr; @@ -75,21 +75,21 @@ static inline int test_and_set_bit(int nr, volatile void * addr) __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! test_and_set_bit \n\t" - "mov %0, %2 \n\t" - "or %4, %0 \n\t" - "movco.l %0, @%1 \n\t" + "movli.l @%2, %0 ! test_and_set_bit \n\t" + "mov %0, %1 \n\t" + "or %3, %0 \n\t" + "movco.l %0, @%2 \n\t" "bf 1b \n\t" - "and %4, %2 \n\t" - : "=&z" (tmp), "=r" (a), "=&r" (retval) - : "1" (a), "r" (mask) + "and %3, %1 \n\t" + : "=&z" (tmp), "=&r" (retval) + : "r" (a), "r" (mask) : "t", "memory" ); return retval != 0; } -static inline int test_and_clear_bit(int nr, volatile void * addr) +static inline int test_and_clear_bit(int nr, volatile void *addr) { int mask, retval; volatile unsigned int *a = addr; @@ -100,22 +100,22 @@ static inline int test_and_clear_bit(int nr, volatile void * addr) __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! test_and_clear_bit \n\t" - "mov %0, %2 \n\t" - "and %5, %0 \n\t" - "movco.l %0, @%1 \n\t" + "movli.l @%2, %0 ! test_and_clear_bit \n\t" + "mov %0, %1 \n\t" + "and %4, %0 \n\t" + "movco.l %0, @%2 \n\t" "bf 1b \n\t" - "and %4, %2 \n\t" + "and %3, %1 \n\t" "synco \n\t" - : "=&z" (tmp), "=r" (a), "=&r" (retval) - : "1" (a), "r" (mask), "r" (~mask) + : "=&z" (tmp), "=&r" (retval) + : "r" (a), "r" (mask), "r" (~mask) : "t", "memory" ); return retval != 0; } -static inline int test_and_change_bit(int nr, volatile void * addr) +static inline int test_and_change_bit(int nr, volatile void *addr) { int mask, retval; volatile unsigned int *a = addr; @@ -126,15 +126,15 @@ static inline int test_and_change_bit(int nr, volatile void * addr) __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! test_and_change_bit \n\t" - "mov %0, %2 \n\t" - "xor %4, %0 \n\t" - "movco.l %0, @%1 \n\t" + "movli.l @%2, %0 ! test_and_change_bit \n\t" + "mov %0, %1 \n\t" + "xor %3, %0 \n\t" + "movco.l %0, @%2 \n\t" "bf 1b \n\t" - "and %4, %2 \n\t" + "and %3, %1 \n\t" "synco \n\t" - : "=&z" (tmp), "=r" (a), "=&r" (retval) - : "1" (a), "r" (mask) + : "=&z" (tmp), "=&r" (retval) + : "r" (a), "r" (mask) : "t", "memory" ); diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index aee3bf28658..0fac3da536c 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h @@ -8,14 +8,14 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! xchg_u32 \n\t" - "mov %0, %2 \n\t" - "mov %4, %0 \n\t" - "movco.l %0, @%1 \n\t" + "movli.l @%2, %0 ! xchg_u32 \n\t" + "mov %0, %1 \n\t" + "mov %3, %0 \n\t" + "movco.l %0, @%2 \n\t" "bf 1b \n\t" "synco \n\t" - : "=&z"(tmp), "=r" (m), "=&r" (retval) - : "1" (m), "r" (val) + : "=&z"(tmp), "=&r" (retval) + : "r" (m), "r" (val) : "t", "memory" ); @@ -29,14 +29,14 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! xchg_u8 \n\t" - "mov %0, %2 \n\t" - "mov %4, %0 \n\t" - "movco.l %0, @%1 \n\t" + "movli.l @%2, %0 ! xchg_u8 \n\t" + "mov %0, %1 \n\t" + "mov %3, %0 \n\t" + "movco.l %0, @%2 \n\t" "bf 1b \n\t" "synco \n\t" - : "=&z"(tmp), "=r" (m), "=&r" (retval) - : "1" (m), "r" (val & 0xff) + : "=&z"(tmp), "=&r" (retval) + : "r" (m), "r" (val & 0xff) : "t", "memory" ); @@ -51,17 +51,17 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __cmpxchg_u32 \n\t" - "mov %0, %2 \n\t" - "cmp/eq %2, %4 \n\t" + "movli.l @%2, %0 ! __cmpxchg_u32 \n\t" + "mov %0, %1 \n\t" + "cmp/eq %1, %3 \n\t" "bf 2f \n\t" - "mov %5, %0 \n\t" + "mov %3, %0 \n\t" "2: \n\t" - "movco.l %0, @%1 \n\t" + "movco.l %0, @%2 \n\t" "bf 1b \n\t" "synco \n\t" - : "=&z" (tmp), "=r" (m), "=&r" (retval) - : "1" (m), "r" (old), "r" (new) + : "=&z" (tmp), "=&r" (retval) + : "r" (m), "r" (old), "r" (new) : "t", "memory" ); -- cgit v1.2.3 From 84fdf6cda30df72994baa2496da86787fb44cbb4 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 20 Jan 2009 21:14:38 +0000 Subject: sh: Use the atomic_t "counter" member Now that atomic_t is a generic opaque type for all architectures, it is unwise to use intimate knowledge of its internals when manipulating it. Instead of relying on the "counter" member being at offset 0 from the beginning of an atomic_t, explicitly reference the member. This guards us from any changes to the layout of the beginning of the atomic_t type. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/atomic-irq.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h index 74f7943cff6..a0b348068ca 100644 --- a/arch/sh/include/asm/atomic-irq.h +++ b/arch/sh/include/asm/atomic-irq.h @@ -11,7 +11,7 @@ static inline void atomic_add(int i, atomic_t *v) unsigned long flags; local_irq_save(flags); - *(long *)v += i; + v->counter += i; local_irq_restore(flags); } @@ -20,7 +20,7 @@ static inline void atomic_sub(int i, atomic_t *v) unsigned long flags; local_irq_save(flags); - *(long *)v -= i; + v->counter -= i; local_irq_restore(flags); } @@ -29,9 +29,9 @@ static inline int atomic_add_return(int i, atomic_t *v) unsigned long temp, flags; local_irq_save(flags); - temp = *(long *)v; + temp = v->counter; temp += i; - *(long *)v = temp; + v->counter = temp; local_irq_restore(flags); return temp; @@ -42,9 +42,9 @@ static inline int atomic_sub_return(int i, atomic_t *v) unsigned long temp, flags; local_irq_save(flags); - temp = *(long *)v; + temp = v->counter; temp -= i; - *(long *)v = temp; + v->counter = temp; local_irq_restore(flags); return temp; @@ -55,7 +55,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) unsigned long flags; local_irq_save(flags); - *(long *)v &= ~mask; + v->counter &= ~mask; local_irq_restore(flags); } @@ -64,7 +64,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) unsigned long flags; local_irq_save(flags); - *(long *)v |= mask; + v->counter |= mask; local_irq_restore(flags); } -- cgit v1.2.3 From a448720ca3248e8a7a426336885549d6e923fd8e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Jan 2009 15:42:23 -0800 Subject: x86: unify asm/io.h: IO_SPACE_LIMIT Impact: Cleanup (trivial unification) Move common define IO_SPACE_LIMIT from to . Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/io.h | 1 + arch/x86/include/asm/io_32.h | 2 -- arch/x86/include/asm/io_64.h | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 05cfed4485f..975207c08b3 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -106,5 +106,6 @@ extern void __iomem *early_memremap(unsigned long offset, unsigned long size); extern void early_iounmap(void __iomem *addr, unsigned long size); extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); +#define IO_SPACE_LIMIT 0xffff #endif /* _ASM_X86_IO_H */ diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h index d8e242e1b39..e08d8ed05a1 100644 --- a/arch/x86/include/asm/io_32.h +++ b/arch/x86/include/asm/io_32.h @@ -37,8 +37,6 @@ * - Arnaldo Carvalho de Melo */ -#define IO_SPACE_LIMIT 0xffff - #define XQUAD_PORTIO_BASE 0xfe400000 #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h index 563c16270ba..1131d8ea2c6 100644 --- a/arch/x86/include/asm/io_64.h +++ b/arch/x86/include/asm/io_64.h @@ -136,8 +136,6 @@ __OUTS(b) __OUTS(w) __OUTS(l) -#define IO_SPACE_LIMIT 0xffff - #if defined(__KERNEL__) && defined(__x86_64__) #include -- cgit v1.2.3 From dc66ff6220f0a6c938df41add526d645852d9a75 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Thu, 22 Jan 2009 15:16:14 +0000 Subject: SH: fix start_thread and user_stack_pointer macros Fix macros start_thread and user_stack_pointer. When these macros aren't called with a variable named regs as second argument, this will result in a build failure. Signed-off-by: Roel Kluin Signed-off-by: Paul Mundt --- arch/sh/include/asm/kprobes.h | 2 +- arch/sh/include/asm/processor_32.h | 12 ++++++------ arch/sh/include/asm/processor_64.h | 14 +++++++------- 3 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h index 6078d8e551d..613644a758e 100644 --- a/arch/sh/include/asm/kprobes.h +++ b/arch/sh/include/asm/kprobes.h @@ -16,7 +16,7 @@ typedef u16 kprobe_opcode_t; ? (MAX_STACK_SIZE) \ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) -#define regs_return_value(regs) ((regs)->regs[0]) +#define regs_return_value(_regs) ((_regs)->regs[0]) #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index d79063c5eb9..dcaed24c71f 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -108,12 +108,12 @@ extern int ubc_usercnt; /* * Do necessary setup to start up a newly executed thread. */ -#define start_thread(regs, new_pc, new_sp) \ +#define start_thread(_regs, new_pc, new_sp) \ set_fs(USER_DS); \ - regs->pr = 0; \ - regs->sr = SR_FD; /* User mode. */ \ - regs->pc = new_pc; \ - regs->regs[15] = new_sp + _regs->pr = 0; \ + _regs->sr = SR_FD; /* User mode. */ \ + _regs->pc = new_pc; \ + _regs->regs[15] = new_sp /* Forward declaration, a strange C thing */ struct task_struct; @@ -189,7 +189,7 @@ extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15]) -#define user_stack_pointer(regs) ((regs)->regs[15]) +#define user_stack_pointer(_regs) ((_regs)->regs[15]) #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ defined(CONFIG_CPU_SH4) diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 803177fcf08..5727d31b0cc 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -145,13 +145,13 @@ struct thread_struct { */ #define SR_USER (SR_MMU | SR_FD) -#define start_thread(regs, new_pc, new_sp) \ +#define start_thread(_regs, new_pc, new_sp) \ set_fs(USER_DS); \ - regs->sr = SR_USER; /* User mode. */ \ - regs->pc = new_pc - 4; /* Compensate syscall exit */ \ - regs->pc |= 1; /* Set SHmedia ! */ \ - regs->regs[18] = 0; \ - regs->regs[15] = new_sp + _regs->sr = SR_USER; /* User mode. */ \ + _regs->pc = new_pc - 4; /* Compensate syscall exit */ \ + _regs->pc |= 1; /* Set SHmedia ! */ \ + _regs->regs[18] = 0; \ + _regs->regs[15] = new_sp /* Forward declaration, a strange C thing */ struct task_struct; @@ -226,7 +226,7 @@ extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.pc) #define KSTK_ESP(tsk) ((tsk)->thread.sp) -#define user_stack_pointer(regs) ((regs)->regs[15]) +#define user_stack_pointer(_regs) ((_regs)->regs[15]) #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_PROCESSOR_64_H */ -- cgit v1.2.3 From 955c0778723501cc16fec40501cd54b7e72d3e74 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 22 Jan 2009 09:55:31 +0000 Subject: sh: rework clocksource and sched_clock Rework and simplify the sched_clock and clocksource code. Instead of registering the clocksource in a shared file we move it into the tmu driver. Also, add code to handle sched_clock in the case of no clocksource. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/timer.h | 4 +-- arch/sh/kernel/time_32.c | 59 ++++++--------------------------------- arch/sh/kernel/timers/timer-tmu.c | 10 +++++-- 3 files changed, 19 insertions(+), 54 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/timer.h b/arch/sh/include/asm/timer.h index a7ca3a195bb..4c3b66e30af 100644 --- a/arch/sh/include/asm/timer.h +++ b/arch/sh/include/asm/timer.h @@ -9,7 +9,6 @@ struct sys_timer_ops { int (*init)(void); int (*start)(void); int (*stop)(void); - cycle_t (*read)(void); #ifndef CONFIG_GENERIC_TIME unsigned long (*get_offset)(void); #endif @@ -39,6 +38,7 @@ struct sys_timer *get_sys_timer(void); /* arch/sh/kernel/time.c */ void handle_timer_tick(void); -extern unsigned long sh_hpt_frequency; + +extern struct clocksource clocksource_sh; #endif /* __ASM_SH_TIMER_H */ diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index 8457f83242c..ca22eef669a 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -41,14 +41,6 @@ static int null_rtc_set_time(const time_t secs) return 0; } -/* - * Null high precision timer functions for systems lacking one. - */ -static cycle_t null_hpt_read(void) -{ - return 0; -} - void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; @@ -200,42 +192,21 @@ device_initcall(timer_init_sysfs); void (*board_time_init)(void); -/* - * Shamelessly based on the MIPS and Sparc64 work. - */ -static unsigned long timer_ticks_per_nsec_quotient __read_mostly; -unsigned long sh_hpt_frequency = 0; - -#define NSEC_PER_CYC_SHIFT 10 - -static struct clocksource clocksource_sh = { +struct clocksource clocksource_sh = { .name = "SuperH", - .rating = 200, - .mask = CLOCKSOURCE_MASK(32), - .read = null_hpt_read, - .shift = 16, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static void __init init_sh_clocksource(void) -{ - if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read) - return; - - clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency, - clocksource_sh.shift); - - timer_ticks_per_nsec_quotient = - clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT); - - clocksource_register(&clocksource_sh); -} - #ifdef CONFIG_GENERIC_TIME unsigned long long sched_clock(void) { - unsigned long long ticks = clocksource_sh.read(); - return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT; + unsigned long long cycles; + + /* jiffies based sched_clock if no clocksource is installed */ + if (!clocksource_sh.rating) + return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); + + cycles = clocksource_sh.read(); + return cyc2ns(&clocksource_sh, cycles); } #endif @@ -260,16 +231,4 @@ void __init time_init(void) */ sys_timer = get_sys_timer(); printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); - - - if (sys_timer->ops->read) - clocksource_sh.read = sys_timer->ops->read; - - init_sh_clocksource(); - - if (sh_hpt_frequency) - printk("Using %lu.%03lu MHz high precision timer.\n", - ((sh_hpt_frequency + 500) / 1000) / 1000, - ((sh_hpt_frequency + 500) / 1000) % 1000); - } diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 0db3f951033..33a24fcf0a1 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -254,7 +254,14 @@ static int tmu_timer_init(void) _tmu_start(TMU1); - sh_hpt_frequency = clk_get_rate(&tmu1_clk); + clocksource_sh.rating = 200; + clocksource_sh.mask = CLOCKSOURCE_MASK(32); + clocksource_sh.read = tmu_timer_read; + clocksource_sh.shift = 10; + clocksource_sh.mult = clocksource_hz2mult(clk_get_rate(&tmu1_clk), + clocksource_sh.shift); + clocksource_sh.flags = CLOCK_SOURCE_IS_CONTINUOUS; + clocksource_register(&clocksource_sh); tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC, tmu0_clockevent.shift); @@ -274,7 +281,6 @@ static struct sys_timer_ops tmu_timer_ops = { .init = tmu_timer_init, .start = tmu_timer_start, .stop = tmu_timer_stop, - .read = tmu_timer_read, }; struct sys_timer tmu_timer = { -- cgit v1.2.3 From 70f0800133b2a6d694c10908b8673a5327b3bfd6 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 22 Jan 2009 09:55:40 +0000 Subject: sh: tmu disable support Add TMU disable support so we can use other clockevents. Also, setup the clockevent rating. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/timers/timer-tmu.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 33a24fcf0a1..2b62f9cff22 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -146,7 +146,14 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) _tmu_clear_status(TMU0); _tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT); - evt->event_handler(evt); + switch (tmu0_clockevent.mode) { + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_PERIODIC: + evt->event_handler(evt); + break; + default: + break; + } return IRQ_HANDLED; } @@ -271,6 +278,7 @@ static int tmu_timer_init(void) clockevent_delta2ns(1, &tmu0_clockevent); tmu0_clockevent.cpumask = cpumask_of(0); + tmu0_clockevent.rating = 100; clockevents_register_device(&tmu0_clockevent); -- cgit v1.2.3 From 07821d3310996746a2cf1e9c705ffe64223d1112 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 22 Jan 2009 09:55:49 +0000 Subject: sh: fix no sys_timer case Handle the case with a sys_timer set to NULL. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/time_32.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index ca22eef669a..766554ba2a5 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -181,7 +181,12 @@ static struct sysdev_class timer_sysclass = { static int __init timer_init_sysfs(void) { - int ret = sysdev_class_register(&timer_sysclass); + int ret; + + if (!sys_timer) + return 0; + + ret = sysdev_class_register(&timer_sysclass); if (ret != 0) return ret; @@ -230,5 +235,8 @@ void __init time_init(void) * initialized for us. */ sys_timer = get_sys_timer(); + if (unlikely(!sys_timer)) + panic("System timer missing.\n"); + printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); } -- cgit v1.2.3 From 3fb1b6ad0679ad671bd496712b2a088550ee86b2 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 22 Jan 2009 09:55:59 +0000 Subject: sh: CMT clockevent platform driver SuperH CMT clockevent driver. Both 16-bit and 32-bit CMT versions are supported, but only 32-bit is tested. This driver contains support for both clockevents and clocksources, but no unregistration is supported at this point. Works fine as clock source and/or event in periodic or oneshot mode. Tested on sh7722 and sh7723, but should work with any cpu/architecture. This version is lacking clocksource and early platform driver support for now - this to minimize the amount of dependencies. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ebabe518e72..5407e1295e5 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -397,6 +397,14 @@ source "arch/sh/boards/Kconfig" menu "Timer and clock configuration" +config SH_TIMER_CMT + def_bool n + prompt "CMT support" + select GENERIC_TIME + select GENERIC_CLOCKEVENTS + help + This enables build of the CMT system timer driver. + config SH_TMU def_bool y prompt "TMU timer support" -- cgit v1.2.3 From 424f59d04d7450555ef2bf1eb4a5e2cd2ecf08cd Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 22 Jan 2009 09:56:09 +0000 Subject: sh: CMT platform data for sh7723/sh7722/sh7366/sh7343 CMT platform data for SuperH Mobile sh7723/sh7722/sh7343/sh7366. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7343.c | 34 ++++++++++++++++++++++++++++++++++ arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 34 ++++++++++++++++++++++++++++++++++ arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 34 ++++++++++++++++++++++++++++++++++ arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 34 ++++++++++++++++++++++++++++++++++ 4 files changed, 136 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 4ff4dc64520..c1549382c87 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c @@ -12,6 +12,7 @@ #include #include #include +#include #include static struct resource iic0_resources[] = { @@ -140,6 +141,38 @@ static struct platform_device jpu_device = { .num_resources = ARRAY_SIZE(jpu_resources), }; +static struct sh_cmt_config cmt_platform_data = { + .name = "CMT", + .channel_offset = 0x60, + .timer_bit = 5, + .clk = "cmt0", + .clockevent_rating = 125, + .clocksource_rating = 200, +}; + +static struct resource cmt_resources[] = { + [0] = { + .name = "CMT", + .start = 0x044a0060, + .end = 0x044a006b, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 104, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device cmt_device = { + .name = "sh_cmt", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + static struct plat_sci_port sci_platform_data[] = { { .mapbase = 0xffe00000, @@ -175,6 +208,7 @@ static struct platform_device sci_device = { }; static struct platform_device *sh7343_devices[] __initdata = { + &cmt_device, &iic0_device, &iic1_device, &sci_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index 839ae97a7fd..93ecf8ed5c6 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -14,6 +14,7 @@ #include #include #include +#include #include static struct resource iic_resources[] = { @@ -147,6 +148,38 @@ static struct platform_device veu1_device = { .num_resources = ARRAY_SIZE(veu1_resources), }; +static struct sh_cmt_config cmt_platform_data = { + .name = "CMT", + .channel_offset = 0x60, + .timer_bit = 5, + .clk = "cmt0", + .clockevent_rating = 125, + .clocksource_rating = 200, +}; + +static struct resource cmt_resources[] = { + [0] = { + .name = "CMT", + .start = 0x044a0060, + .end = 0x044a006b, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 104, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device cmt_device = { + .name = "sh_cmt", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + static struct plat_sci_port sci_platform_data[] = { { .mapbase = 0xffe00000, @@ -167,6 +200,7 @@ static struct platform_device sci_device = { }; static struct platform_device *sh7366_devices[] __initdata = { + &cmt_device, &iic_device, &sci_device, &usb_host_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 5146afc156e..0e5d204bc79 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -176,6 +177,38 @@ static struct platform_device jpu_device = { .num_resources = ARRAY_SIZE(jpu_resources), }; +static struct sh_cmt_config cmt_platform_data = { + .name = "CMT", + .channel_offset = 0x60, + .timer_bit = 5, + .clk = "cmt0", + .clockevent_rating = 125, + .clocksource_rating = 200, +}; + +static struct resource cmt_resources[] = { + [0] = { + .name = "CMT", + .start = 0x044a0060, + .end = 0x044a006b, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 104, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device cmt_device = { + .name = "sh_cmt", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + static struct plat_sci_port sci_platform_data[] = { { .mapbase = 0xffe00000, @@ -209,6 +242,7 @@ static struct platform_device sci_device = { }; static struct platform_device *sh7722_devices[] __initdata = { + &cmt_device, &rtc_device, &usbf_device, &iic_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 849770d780a..5338dacbcfb 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -100,6 +101,38 @@ static struct platform_device veu1_device = { .num_resources = ARRAY_SIZE(veu1_resources), }; +static struct sh_cmt_config cmt_platform_data = { + .name = "CMT", + .channel_offset = 0x60, + .timer_bit = 5, + .clk = "cmt0", + .clockevent_rating = 125, + .clocksource_rating = 200, +}; + +static struct resource cmt_resources[] = { + [0] = { + .name = "CMT", + .start = 0x044a0060, + .end = 0x044a006b, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 104, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device cmt_device = { + .name = "sh_cmt", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + static struct plat_sci_port sci_platform_data[] = { { .mapbase = 0xffe00000, @@ -221,6 +254,7 @@ static struct platform_device iic_device = { }; static struct platform_device *sh7723_devices[] __initdata = { + &cmt_device, &sci_device, &rtc_device, &iic_device, -- cgit v1.2.3 From f5ad881b425616741bf8696f70b2749abe54a936 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 29 Jan 2009 18:08:58 +0900 Subject: sh: Use SYS_SUPPORTS_CMT for managing CMT timer dependencies. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 43 ++++++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5407e1295e5..5784bcec1a1 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -107,6 +107,9 @@ config SYS_SUPPORTS_NUMA config SYS_SUPPORTS_PCI bool +config SYS_SUPPORTS_CMT + bool + config STACKTRACE_SUPPORT def_bool y @@ -188,6 +191,7 @@ choice config CPU_SUBTYPE_SH7619 bool "Support SH7619 processor" select CPU_SH2 + select SYS_SUPPORTS_CMT # SH-2A Processor Support @@ -200,15 +204,18 @@ config CPU_SUBTYPE_SH7203 bool "Support SH7203 processor" select CPU_SH2A select CPU_HAS_FPU + select SYS_SUPPORTS_CMT config CPU_SUBTYPE_SH7206 bool "Support SH7206 processor" select CPU_SH2A + select SYS_SUPPORTS_CMT config CPU_SUBTYPE_SH7263 bool "Support SH7263 processor" select CPU_SH2A select CPU_HAS_FPU + select SYS_SUPPORTS_CMT config CPU_SUBTYPE_MXG bool "Support MX-G processor" @@ -324,6 +331,7 @@ config CPU_SUBTYPE_SH7723 select CPU_SH4A select CPU_SHX2 select ARCH_SPARSEMEM_ENABLE + select SYS_SUPPORTS_CMT help Select SH7723 if you have an SH-MobileR2 CPU. @@ -362,6 +370,7 @@ config CPU_SUBTYPE_SHX3 config CPU_SUBTYPE_SH7343 bool "Support SH7343 processor" select CPU_SH4AL_DSP + select SYS_SUPPORTS_CMT config CPU_SUBTYPE_SH7722 bool "Support SH7722 processor" @@ -369,6 +378,7 @@ config CPU_SUBTYPE_SH7722 select CPU_SHX2 select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA + select SYS_SUPPORTS_CMT config CPU_SUBTYPE_SH7366 bool "Support SH7366 processor" @@ -376,6 +386,7 @@ config CPU_SUBTYPE_SH7366 select CPU_SHX2 select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA + select SYS_SUPPORTS_CMT # SH-5 Processor Support @@ -397,34 +408,36 @@ source "arch/sh/boards/Kconfig" menu "Timer and clock configuration" -config SH_TIMER_CMT - def_bool n - prompt "CMT support" - select GENERIC_TIME - select GENERIC_CLOCKEVENTS - help - This enables build of the CMT system timer driver. - config SH_TMU - def_bool y - prompt "TMU timer support" + bool "TMU timer support" depends on CPU_SH3 || CPU_SH4 + default y select GENERIC_TIME select GENERIC_CLOCKEVENTS help This enables the use of the TMU as the system timer. config SH_CMT - def_bool y - prompt "CMT timer support" - depends on CPU_SH2 && !CPU_SUBTYPE_MXG + bool "CMT timer support" + depends on SYS_SUPPORTS_CMT + default y help This enables the use of the CMT as the system timer. +# +# Support for the new-style CMT driver. This will replace SH_CMT +# once its other dependencies are merged. +# +config SH_TIMER_CMT + bool "CMT clockevents driver" + depends on SYS_SUPPORTS_CMT && !SH_CMT + select GENERIC_TIME + select GENERIC_CLOCKEVENTS + config SH_MTU2 - def_bool n - prompt "MTU2 timer support" + bool "MTU2 timer support" depends on CPU_SH2A + default y help This enables the use of the MTU2 as the system timer. -- cgit v1.2.3 From d63f3a5857906851b9c1a39e3871a97f4acc1005 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 29 Jan 2009 18:10:13 +0900 Subject: sh: Fix up MTU2 support for SH7203. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 3 ++- arch/sh/kernel/timers/timer-mtu2.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5784bcec1a1..c6faad734e5 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -447,7 +447,8 @@ config SH_TIMER_IRQ CPU_SUBTYPE_SH7763 default "86" if CPU_SUBTYPE_SH7619 default "140" if CPU_SUBTYPE_SH7206 - default "142" if CPU_SUBTYPE_SH7203 + default "142" if CPU_SUBTYPE_SH7203 && SH_CMT + default "153" if CPU_SUBTYPE_SH7203 && SH_MTU2 default "238" if CPU_SUBTYPE_MXG default "16" diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c index c3d237e1d56..9a77ae86b40 100644 --- a/arch/sh/kernel/timers/timer-mtu2.c +++ b/arch/sh/kernel/timers/timer-mtu2.c @@ -35,7 +35,8 @@ #define MTU2_TSR_1 0xfffe4385 #define MTU2_TCNT_1 0xfffe4386 /* 16-bit counter */ -#if defined(CONFIG_CPU_SUBTYPE_SH7201) +#if defined(CONFIG_CPU_SUBTYPE_SH7201) || \ + defined(CONFIG_CPU_SUBTYPE_SH7203) #define MTU2_TGRA_1 0xfffe4388 #else #define MTU2_TGRA_1 0xfffe438a -- cgit v1.2.3 From c161e40f45d32b48f8facbee17720e708607002f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 29 Jan 2009 18:11:25 +0900 Subject: sh: Don't enable GENERIC_TIME for the CMT clockevent driver yet. GENERIC_TIME still depends on the clocksource bits being there, which is presently not supported. This allows the CMT clockevent driver to be used alongside alternate system timers that do not yet provide a clocksource of their own (MTU2 and so on in the case of SH-2A). Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 - arch/sh/kernel/time_32.c | 2 -- 2 files changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index c6faad734e5..50c992444e5 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -431,7 +431,6 @@ config SH_CMT config SH_TIMER_CMT bool "CMT clockevents driver" depends on SYS_SUPPORTS_CMT && !SH_CMT - select GENERIC_TIME select GENERIC_CLOCKEVENTS config SH_MTU2 diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index 766554ba2a5..c34e1e0f9b0 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -104,7 +104,6 @@ int do_settimeofday(struct timespec *tv) EXPORT_SYMBOL(do_settimeofday); #endif /* !CONFIG_GENERIC_TIME */ -#ifndef CONFIG_GENERIC_CLOCKEVENTS /* last time the RTC clock got updated */ static long last_rtc_update; @@ -148,7 +147,6 @@ void handle_timer_tick(void) update_process_times(user_mode(get_irq_regs())); #endif } -#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #ifdef CONFIG_PM int timer_suspend(struct sys_device *dev, pm_message_t state) -- cgit v1.2.3 From 7c20dcc545d78946e40e8fab99637fe815b1d211 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 29 Jan 2009 11:29:22 +0100 Subject: x86, summit: consolidate code, fix Build fix for !NUMA Summit. Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/summit_32.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 37fa30bada1..a382ca6f6a1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -73,7 +73,7 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_X86_NUMAQ) += numaq_32.o obj-$(CONFIG_X86_ES7000) += es7000_32.o -obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o +obj-$(CONFIG_X86_SUMMIT) += summit_32.o obj-y += vsmp_64.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_MODULES) += module_$(BITS).o diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 3b60dd5e57f..84ff9ebbcc9 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -389,6 +389,7 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; } +#ifdef CONFIG_X86_SUMMIT_NUMA static struct rio_table_hdr *rio_table_hdr __initdata; static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; @@ -543,7 +544,7 @@ void __init setup_summit(void) next_wpeg = 0; } while (next_wpeg != 0); } - +#endif struct genapic apic_summit = { -- cgit v1.2.3 From 1dcdd3d15ecea0c22a09d4d001a39d425fceff2c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 17:55:37 +0100 Subject: x86: remove mach_apic.h Spread mach_apic.h definitions into genapic.h. (with some knock-on effects on smp.h and apic.h.) Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 2 +- arch/x86/include/asm/genapic.h | 139 +++++++++++++++++++++++++ arch/x86/include/asm/mach-default/mach_apic.h | 144 -------------------------- arch/x86/include/asm/mach-generic/mach_apic.h | 8 -- arch/x86/include/asm/smp.h | 19 ---- arch/x86/kernel/acpi/boot.c | 14 +-- arch/x86/kernel/apic.c | 22 +++- arch/x86/kernel/cpu/addon_cpuid_features.c | 2 +- arch/x86/kernel/cpu/amd.c | 2 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpu/intel.c | 2 +- arch/x86/kernel/io_apic.c | 2 +- arch/x86/kernel/ipi.c | 2 +- arch/x86/kernel/irq_32.c | 2 +- arch/x86/kernel/mpparse.c | 3 +- arch/x86/kernel/setup.c | 2 +- arch/x86/kernel/smp.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- arch/x86/kernel/tlb_uv.c | 2 +- arch/x86/kernel/visws_quirks.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mach-generic/probe.c | 5 - 22 files changed, 175 insertions(+), 207 deletions(-) delete mode 100644 arch/x86/include/asm/mach-default/mach_apic.h delete mode 100644 arch/x86/include/asm/mach-generic/mach_apic.h (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 3a3202074c6..6a77068e261 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -215,7 +215,7 @@ static inline void disable_local_APIC(void) { } #define SET_APIC_ID(x) (apic->set_apic_id(x)) #else -static inline unsigned default_get_apic_id(unsigned long x) +static inline unsigned default_get_apic_id(unsigned long x) { unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 1772dad01b1..ce3655a4948 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -120,4 +120,143 @@ static inline void default_wait_for_init_deassert(atomic_t *deassert) return; } +extern void generic_bigsmp_probe(void); + + +#ifdef CONFIG_X86_LOCAL_APIC + +#include + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +static inline const struct cpumask *default_target_cpus(void) +{ +#ifdef CONFIG_SMP + return cpu_online_mask; +#else + return cpumask_of(0); +#endif +} + +DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); + + +static inline unsigned int read_apic_id(void) +{ + unsigned int reg; + + reg = *(u32 *)(APIC_BASE + APIC_ID); + + return apic->get_apic_id(reg); +} + +#ifdef CONFIG_X86_64 +extern void default_setup_apic_routing(void); +#else + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +extern void default_init_apic_ldr(void); + +static inline int default_apic_id_registered(void) +{ + return physid_isset(read_apic_id(), phys_cpu_present_map); +} + +static inline unsigned int +default_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + return cpumask_bits(cpumask)[0]; +} + +static inline unsigned int +default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + unsigned long mask1 = cpumask_bits(cpumask)[0]; + unsigned long mask2 = cpumask_bits(andmask)[0]; + unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; + + return (unsigned int)(mask1 & mask2 & mask3); +} + +static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +static inline void default_setup_apic_routing(void) +{ +#ifdef CONFIG_X86_IO_APIC + printk("Enabling APIC mode: %s. Using %d I/O APICs\n", + "Flat", nr_ioapics); +#endif +} + +extern int default_apicid_to_node(int logical_apicid); + +#endif + +static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} + +static inline unsigned long default_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) +{ + return phys_map; +} + +/* Mapping from cpu number to logical apicid */ +static inline int default_cpu_to_logical_apicid(int cpu) +{ + return 1 << cpu; +} + +static inline int __default_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static inline int +__default_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); +} + +#ifdef CONFIG_X86_32 +static inline int default_cpu_present_to_apicid(int mps_cpu) +{ + return __default_cpu_present_to_apicid(mps_cpu); +} + +static inline int +default_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return __default_check_phys_apicid_present(boot_cpu_physical_apicid); +} +#else +extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); +#endif + +static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) +{ + return physid_mask_of_physid(phys_apicid); +} + +#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* _ASM_X86_GENAPIC_64_H */ diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h deleted file mode 100644 index bae053cdcde..00000000000 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ /dev/null @@ -1,144 +0,0 @@ -#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H -#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H - -#ifdef CONFIG_X86_LOCAL_APIC - -#include - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -static inline const struct cpumask *default_target_cpus(void) -{ -#ifdef CONFIG_SMP - return cpu_online_mask; -#else - return cpumask_of(0); -#endif -} - -#ifdef CONFIG_X86_64 -#include -#define read_apic_id() (apic->get_apic_id(apic_read(APIC_ID))) -extern void default_setup_apic_routing(void); -#else -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void default_init_apic_ldr(void) -{ - unsigned long val; - - apic_write(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); - apic_write(APIC_LDR, val); -} - -static inline int default_apic_id_registered(void) -{ - return physid_isset(read_apic_id(), phys_cpu_present_map); -} - -static inline unsigned int -default_cpu_mask_to_apicid(const struct cpumask *cpumask) -{ - return cpumask_bits(cpumask)[0]; -} - -static inline unsigned int -default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - unsigned long mask1 = cpumask_bits(cpumask)[0]; - unsigned long mask2 = cpumask_bits(andmask)[0]; - unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; - - return (unsigned int)(mask1 & mask2 & mask3); -} - -static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -static inline void default_setup_apic_routing(void) -{ -#ifdef CONFIG_X86_IO_APIC - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Flat", nr_ioapics); -#endif -} - -static inline int default_apicid_to_node(int logical_apicid) -{ -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; -#else - return 0; -#endif -} - -#endif - -static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} - -static inline unsigned long default_check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) -{ - return phys_map; -} - -/* Mapping from cpu number to logical apicid */ -static inline int default_cpu_to_logical_apicid(int cpu) -{ - return 1 << cpu; -} - -static inline int __default_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) - return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - -static inline int -__default_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); -} - -#ifdef CONFIG_X86_32 -static inline int default_cpu_present_to_apicid(int mps_cpu) -{ - return __default_cpu_present_to_apicid(mps_cpu); -} - -static inline int -default_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return __default_check_phys_apicid_present(boot_cpu_physical_apicid); -} -#else -extern int default_cpu_present_to_apicid(int mps_cpu); -extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); -#endif - -static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) -{ - return physid_mask_of_physid(phys_apicid); -} - -#endif /* CONFIG_X86_LOCAL_APIC */ -#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h deleted file mode 100644 index 96f217f819e..00000000000 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H -#define _ASM_X86_MACH_GENERIC_MACH_APIC_H - -#include - -extern void generic_bigsmp_probe(void); - -#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index d4ac4de4bce..47d0e21f2b9 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -173,8 +173,6 @@ extern int safe_smp_processor_id(void); #endif -#include - #ifdef CONFIG_X86_LOCAL_APIC #ifndef CONFIG_X86_64 @@ -184,26 +182,9 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } -static inline unsigned int read_apic_id(void) -{ - unsigned int reg; - - reg = *(u32 *)(APIC_BASE + APIC_ID); - - return apic->get_apic_id(reg); -} #endif - -# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64) extern int hard_smp_processor_id(void); -# else -static inline int hard_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return read_apic_id(); -} -# endif /* APIC_DEFINITION */ #else /* CONFIG_X86_LOCAL_APIC */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 7b02a1cedca..cb8b52785e3 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -42,10 +42,6 @@ #include #include -#ifdef CONFIG_X86_LOCAL_APIC -# include -#endif - static int __initdata acpi_force = 0; u32 acpi_rsdt_forced; #ifdef CONFIG_ACPI @@ -56,15 +52,7 @@ int acpi_disabled = 1; EXPORT_SYMBOL(acpi_disabled); #ifdef CONFIG_X86_64 - -#include - -#else /* X86 */ - -#ifdef CONFIG_X86_LOCAL_APIC -#include -#endif /* CONFIG_X86_LOCAL_APIC */ - +# include #endif /* X86 */ #define BAD_MADT_ENTRY(entry, end) ( \ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index e6220809ca1..41a0ba34d6b 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -49,7 +49,6 @@ #include #include -#include #include /* @@ -1910,11 +1909,30 @@ void __cpuinit generic_processor_info(int apicid, int version) set_cpu_present(cpu, true); } -#ifdef CONFIG_X86_64 int hard_smp_processor_id(void) { return read_apic_id(); } + +void default_init_apic_ldr(void) +{ + unsigned long val; + + apic_write(APIC_DFR, APIC_DFR_VALUE); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); + apic_write(APIC_LDR, val); +} + +#ifdef CONFIG_X86_32 +int default_apicid_to_node(int logical_apicid) +{ +#ifdef CONFIG_SMP + return apicid_2_node[hard_smp_processor_id()]; +#else + return 0; +#endif +} #endif /* diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index e8bb892c09f..4a48bb40974 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -7,7 +7,7 @@ #include #include -#include +#include struct cpuid_bit { u16 feature; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c878f6aa91..ff4d7b9e32e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -12,7 +12,7 @@ # include #endif -#include +#include #include "cpu.h" diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 055b9c3a660..c4bdc7f0020 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -26,7 +26,7 @@ #ifdef CONFIG_X86_LOCAL_APIC #include #include -#include +#include #include #include #endif diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 5deefae9064..1cef0aa5e5d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -24,7 +24,7 @@ #ifdef CONFIG_X86_LOCAL_APIC #include #include -#include +#include #endif static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index abae81989c2..e0744ea6d0f 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -63,7 +63,7 @@ #include #include -#include +#include #define __apicdebuginit(type) static type __init diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index e16c41b2e4e..50076d92fbc 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -19,7 +19,7 @@ #include #ifdef CONFIG_X86_32 -#include +#include #include /* diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index e0f29be8ab0..d802c844991 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -231,7 +231,7 @@ unsigned int do_IRQ(struct pt_regs *regs) } #ifdef CONFIG_HOTPLUG_CPU -#include +#include /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a1452a53d14..94fe71029c3 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -29,8 +29,7 @@ #include #include -#include - +#include /* * Checksum an MP configuration block. */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6b27f6dc7bf..92e42939fb0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -97,7 +97,7 @@ #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index c48ba6cc32a..892e7c389be 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include /* * Some notes on x86 processor bugs affecting SMP operation: * diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 3fed177f345..489fde9d947 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -65,7 +65,7 @@ #include #include -#include +#include #include #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 89fce1b6d01..755ede02b13 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -20,7 +20,7 @@ #include #include -#include +#include static struct bau_control **uv_bau_table_bases __read_mostly; static int uv_bau_retry_limit __read_mostly; diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 2ed5bdf15c9..3bd7f47a91b 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -34,7 +34,7 @@ #include -#include "mach_apic.h" +#include #include diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 07817b2a787..7d5123e474e 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index ab68c6e5c48..c03c7222132 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -154,8 +154,3 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) } return 0; } - -int hard_smp_processor_id(void) -{ - return apic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID)); -} -- cgit v1.2.3 From 83d7aeabe4cf20e59b5d7fd56a75cfd0e0b6b880 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Wed, 28 Jan 2009 17:52:57 -0800 Subject: x86: remove mach_apic.h, fix Use apic_read() instead of open-coded mmio. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index ce3655a4948..ccfcd19d5b7 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -145,7 +145,7 @@ static inline unsigned int read_apic_id(void) { unsigned int reg; - reg = *(u32 *)(APIC_BASE + APIC_ID); + reg = apic_read(APIC_ID); return apic->get_apic_id(reg); } -- cgit v1.2.3 From 2e096df8edefad78155bb406a5a86c182b17786e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:01:05 +0100 Subject: x86, ES7000: Consolidate code Move all ES7000 code into arch/x86/kernel/es7000_32.c. With this it ceases to rely on any build-time subarch features. Signed-off-by: Ingo Molnar --- arch/x86/kernel/es7000_32.c | 428 +++++++++++++++++++++++++++++++++++++++++ arch/x86/mach-generic/Makefile | 1 - arch/x86/mach-generic/es7000.c | 427 ---------------------------------------- 3 files changed, 428 insertions(+), 428 deletions(-) delete mode 100644 arch/x86/mach-generic/es7000.c (limited to 'arch') diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 8faea13c8fa..078364ccfa0 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -372,3 +372,431 @@ void __init es7000_enable_apic_mode(void) mip_status); } } + +/* + * APIC driver for the Unisys ES7000 chipset. + */ +#define APIC_DEFINITION 1 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) +#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) +#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +extern void es7000_enable_apic_mode(void); +extern int apic_version [MAX_APICS]; +extern u8 cpu_2_logical_apicid[]; +extern unsigned int boot_cpu_physical_apicid; + +extern int parse_unisys_oem (char *oemptr); +extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); +extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); +extern void setup_unisys(void); + +#define apicid_cluster(apicid) (apicid & 0xF0) +#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) + +static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; +} + + +static void es7000_wait_for_init_deassert(atomic_t *deassert) +{ +#ifndef CONFIG_ES7000_CLUSTERED_APIC + while (!atomic_read(deassert)) + cpu_relax(); +#endif + return; +} + +static unsigned int es7000_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0xFF; +} + +#ifdef CONFIG_ACPI +static int es7000_check_dsdt(void) +{ + struct acpi_table_header header; + + if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && + !strncmp(header.oem_id, "UNISYS", 6)) + return 1; + return 0; +} +#endif + +static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_sequence(mask, vector); +} + +static void es7000_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself(cpu_online_mask, vector); +} + +static void es7000_send_IPI_all(int vector) +{ + es7000_send_IPI_mask(cpu_online_mask, vector); +} + +static int es7000_apic_id_registered(void) +{ + return 1; +} + +static const cpumask_t *target_cpus_cluster(void) +{ + return &CPU_MASK_ALL; +} + +static const cpumask_t *es7000_target_cpus(void) +{ + return &cpumask_of_cpu(smp_processor_id()); +} + +static unsigned long +es7000_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return 0; +} +static unsigned long es7000_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static unsigned long calculate_ldr(int cpu) +{ + unsigned long id = xapic_phys_to_log_apicid(cpu); + + return (SET_APIC_LOGICAL_ID(id)); +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LdR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +static void es7000_init_apic_ldr_cluster(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + +static void es7000_init_apic_ldr(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_VALUE); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + +static void es7000_setup_apic_routing(void) +{ + int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); + printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", + (apic_version[apic] == 0x14) ? + "Physical Cluster" : "Logical Cluster", + nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); +} + +static int es7000_apicid_to_node(int logical_apicid) +{ + return 0; +} + + +static int es7000_cpu_present_to_apicid(int mps_cpu) +{ + if (!mps_cpu) + return boot_cpu_physical_apicid; + else if (mps_cpu < nr_cpu_ids) + return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) +{ + static int id = 0; + physid_mask_t mask; + + mask = physid_mask_of_physid(id); + ++id; + + return mask; +} + +/* Mapping from cpu number to logical apicid */ +static int es7000_cpu_to_logical_apicid(int cpu) +{ +#ifdef CONFIG_SMP + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return (int)cpu_2_logical_apicid[cpu]; +#else + return logical_smp_processor_id(); +#endif +} + +static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) +{ + /* For clustered we don't have a good way to do this yet - hack */ + return physids_promote(0xff); +} + +static int es7000_check_phys_apicid_present(int cpu_physical_apicid) +{ + boot_cpu_physical_apicid = read_apic_id(); + return (1); +} + +static unsigned int +es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) +{ + int cpus_found = 0; + int num_bits_set; + int apicid; + int cpu; + + num_bits_set = cpumask_weight(cpumask); + /* Return id to all */ + if (num_bits_set == nr_cpu_ids) + return 0xFF; + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of target_cpus(): + */ + cpu = cpumask_first(cpumask); + apicid = es7000_cpu_to_logical_apicid(cpu); + + while (cpus_found < num_bits_set) { + if (cpumask_test_cpu(cpu, cpumask)) { + int new_apicid = es7000_cpu_to_logical_apicid(cpu); + + if (apicid_cluster(apicid) != + apicid_cluster(new_apicid)) { + printk ("%s: Not a valid mask!\n", __func__); + + return 0xFF; + } + apicid = new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + int cpus_found = 0; + int num_bits_set; + int apicid; + int cpu; + + num_bits_set = cpus_weight(*cpumask); + /* Return id to all */ + if (num_bits_set == nr_cpu_ids) + return es7000_cpu_to_logical_apicid(0); + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of target_cpus(): + */ + cpu = first_cpu(*cpumask); + apicid = es7000_cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { + if (cpu_isset(cpu, *cpumask)) { + int new_apicid = es7000_cpu_to_logical_apicid(cpu); + + if (apicid_cluster(apicid) != + apicid_cluster(new_apicid)) { + printk ("%s: Not a valid mask!\n", __func__); + + return es7000_cpu_to_logical_apicid(0); + } + apicid = new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static unsigned int +es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, + const struct cpumask *andmask) +{ + int apicid = es7000_cpu_to_logical_apicid(0); + cpumask_var_t cpumask; + + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) + return apicid; + + cpumask_and(cpumask, inmask, andmask); + cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = es7000_cpu_mask_to_apicid(cpumask); + + free_cpumask_var(cpumask); + + return apicid; +} + +static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +void __init es7000_update_genapic_to_cluster(void) +{ + apic->target_cpus = target_cpus_cluster; + apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER; + apic->irq_dest_mode = INT_DEST_MODE_CLUSTER; + + apic->init_apic_ldr = es7000_init_apic_ldr_cluster; + + apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster; +} + +static int probe_es7000(void) +{ + /* probed later in mptable/ACPI hooks */ + return 0; +} + +static __init int +es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +{ + if (mpc->oemptr) { + struct mpc_oemtable *oem_table = + (struct mpc_oemtable *)mpc->oemptr; + + if (!strncmp(oem, "UNISYS", 6)) + return parse_unisys_oem((char *)oem_table); + } + return 0; +} + +#ifdef CONFIG_ACPI +/* Hook from generic ACPI tables.c */ +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + unsigned long oem_addr = 0; + int check_dsdt; + int ret = 0; + + /* check dsdt at first to avoid clear fix_map for oem_addr */ + check_dsdt = es7000_check_dsdt(); + + if (!find_unisys_acpi_oem_table(&oem_addr)) { + if (check_dsdt) + ret = parse_unisys_oem((char *)oem_addr); + else { + setup_unisys(); + ret = 1; + } + /* + * we need to unmap it + */ + unmap_unisys_acpi_oem_table(oem_addr); + } + return ret; +} +#else +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return 0; +} +#endif + + +struct genapic apic_es7000 = { + + .name = "es7000", + .probe = probe_es7000, + .acpi_madt_oem_check = es7000_acpi_madt_oem_check, + .apic_id_registered = es7000_apic_id_registered, + + .irq_delivery_mode = dest_Fixed, + /* phys delivery to target CPUs: */ + .irq_dest_mode = 0, + + .target_cpus = es7000_target_cpus, + .disable_esr = 1, + .dest_logical = 0, + .check_apicid_used = es7000_check_apicid_used, + .check_apicid_present = es7000_check_apicid_present, + + .vector_allocation_domain = es7000_vector_allocation_domain, + .init_apic_ldr = es7000_init_apic_ldr, + + .ioapic_phys_id_map = es7000_ioapic_phys_id_map, + .setup_apic_routing = es7000_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = es7000_apicid_to_node, + .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, + .cpu_present_to_apicid = es7000_cpu_present_to_apicid, + .apicid_to_cpu_present = es7000_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = es7000_check_phys_apicid_present, + .enable_apic_mode = es7000_enable_apic_mode, + .phys_pkg_id = es7000_phys_pkg_id, + .mps_oem_check = es7000_mps_oem_check, + + .get_apic_id = es7000_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0xFF << 24, + + .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, + + .send_IPI_mask = es7000_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = es7000_send_IPI_allbutself, + .send_IPI_all = es7000_send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + + .trampoline_phys_low = 0x467, + .trampoline_phys_high = 0x469, + + .wait_for_init_deassert = es7000_wait_for_init_deassert, + + /* Nothing to do for most platforms, since cleared by the INIT cycle: */ + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .inquire_remote_apic = default_inquire_remote_apic, +}; diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile index 78ab5735cb8..05e47acfd66 100644 --- a/arch/x86/mach-generic/Makefile +++ b/arch/x86/mach-generic/Makefile @@ -7,4 +7,3 @@ EXTRA_CFLAGS := -Iarch/x86/kernel obj-y := probe.o default.o obj-$(CONFIG_X86_NUMAQ) += numaq.o obj-$(CONFIG_X86_BIGSMP) += bigsmp.o -obj-$(CONFIG_X86_ES7000) += es7000.o diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c deleted file mode 100644 index bb11166b7c3..00000000000 --- a/arch/x86/mach-generic/es7000.c +++ /dev/null @@ -1,427 +0,0 @@ -/* - * APIC driver for the Unisys ES7000 chipset. - */ -#define APIC_DEFINITION 1 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) -#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) -#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -extern void es7000_enable_apic_mode(void); -extern int apic_version [MAX_APICS]; -extern u8 cpu_2_logical_apicid[]; -extern unsigned int boot_cpu_physical_apicid; - -extern int parse_unisys_oem (char *oemptr); -extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); -extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); -extern void setup_unisys(void); - -#define apicid_cluster(apicid) (apicid & 0xF0) -#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) - -static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; -} - - -static void es7000_wait_for_init_deassert(atomic_t *deassert) -{ -#ifndef CONFIG_ES7000_CLUSTERED_APIC - while (!atomic_read(deassert)) - cpu_relax(); -#endif - return; -} - -static unsigned int es7000_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0xFF; -} - -#ifdef CONFIG_ACPI -static int es7000_check_dsdt(void) -{ - struct acpi_table_header header; - - if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && - !strncmp(header.oem_id, "UNISYS", 6)) - return 1; - return 0; -} -#endif - -static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_sequence(mask, vector); -} - -static void es7000_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself(cpu_online_mask, vector); -} - -static void es7000_send_IPI_all(int vector) -{ - es7000_send_IPI_mask(cpu_online_mask, vector); -} - -static int es7000_apic_id_registered(void) -{ - return 1; -} - -static const cpumask_t *target_cpus_cluster(void) -{ - return &CPU_MASK_ALL; -} - -static const cpumask_t *es7000_target_cpus(void) -{ - return &cpumask_of_cpu(smp_processor_id()); -} - -static unsigned long -es7000_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} -static unsigned long es7000_check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -static unsigned long calculate_ldr(int cpu) -{ - unsigned long id = xapic_phys_to_log_apicid(cpu); - - return (SET_APIC_LOGICAL_ID(id)); -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LdR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static void es7000_init_apic_ldr_cluster(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); -} - -static void es7000_init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_VALUE); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); -} - -static void es7000_setup_apic_routing(void) -{ - int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); - printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", - (apic_version[apic] == 0x14) ? - "Physical Cluster" : "Logical Cluster", - nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); -} - -static int es7000_apicid_to_node(int logical_apicid) -{ - return 0; -} - - -static int es7000_cpu_present_to_apicid(int mps_cpu) -{ - if (!mps_cpu) - return boot_cpu_physical_apicid; - else if (mps_cpu < nr_cpu_ids) - return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - -static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) -{ - static int id = 0; - physid_mask_t mask; - - mask = physid_mask_of_physid(id); - ++id; - - return mask; -} - -/* Mapping from cpu number to logical apicid */ -static int es7000_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - -static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xff); -} - -static int es7000_check_phys_apicid_present(int cpu_physical_apicid) -{ - boot_cpu_physical_apicid = read_apic_id(); - return (1); -} - -static unsigned int -es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpumask_weight(cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - return 0xFF; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = cpumask_first(cpumask); - apicid = es7000_cpu_to_logical_apicid(cpu); - - while (cpus_found < num_bits_set) { - if (cpumask_test_cpu(cpu, cpumask)) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); - - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); - - return 0xFF; - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpus_weight(*cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - return es7000_cpu_to_logical_apicid(0); - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = first_cpu(*cpumask); - apicid = es7000_cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask)) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); - - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); - - return es7000_cpu_to_logical_apicid(0); - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static unsigned int -es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask) -{ - int apicid = es7000_cpu_to_logical_apicid(0); - cpumask_var_t cpumask; - - if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) - return apicid; - - cpumask_and(cpumask, inmask, andmask); - cpumask_and(cpumask, cpumask, cpu_online_mask); - apicid = es7000_cpu_mask_to_apicid(cpumask); - - free_cpumask_var(cpumask); - - return apicid; -} - -static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -void __init es7000_update_genapic_to_cluster(void) -{ - apic->target_cpus = target_cpus_cluster; - apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER; - apic->irq_dest_mode = INT_DEST_MODE_CLUSTER; - - apic->init_apic_ldr = es7000_init_apic_ldr_cluster; - - apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster; -} - -static int probe_es7000(void) -{ - /* probed later in mptable/ACPI hooks */ - return 0; -} - -static __init int -es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - if (mpc->oemptr) { - struct mpc_oemtable *oem_table = - (struct mpc_oemtable *)mpc->oemptr; - - if (!strncmp(oem, "UNISYS", 6)) - return parse_unisys_oem((char *)oem_table); - } - return 0; -} - -#ifdef CONFIG_ACPI -/* Hook from generic ACPI tables.c */ -static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - unsigned long oem_addr = 0; - int check_dsdt; - int ret = 0; - - /* check dsdt at first to avoid clear fix_map for oem_addr */ - check_dsdt = es7000_check_dsdt(); - - if (!find_unisys_acpi_oem_table(&oem_addr)) { - if (check_dsdt) - ret = parse_unisys_oem((char *)oem_addr); - else { - setup_unisys(); - ret = 1; - } - /* - * we need to unmap it - */ - unmap_unisys_acpi_oem_table(oem_addr); - } - return ret; -} -#else -static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 0; -} -#endif - - -struct genapic apic_es7000 = { - - .name = "es7000", - .probe = probe_es7000, - .acpi_madt_oem_check = es7000_acpi_madt_oem_check, - .apic_id_registered = es7000_apic_id_registered, - - .irq_delivery_mode = dest_Fixed, - /* phys delivery to target CPUs: */ - .irq_dest_mode = 0, - - .target_cpus = es7000_target_cpus, - .disable_esr = 1, - .dest_logical = 0, - .check_apicid_used = es7000_check_apicid_used, - .check_apicid_present = es7000_check_apicid_present, - - .vector_allocation_domain = es7000_vector_allocation_domain, - .init_apic_ldr = es7000_init_apic_ldr, - - .ioapic_phys_id_map = es7000_ioapic_phys_id_map, - .setup_apic_routing = es7000_setup_apic_routing, - .multi_timer_check = NULL, - .apicid_to_node = es7000_apicid_to_node, - .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, - .cpu_present_to_apicid = es7000_cpu_present_to_apicid, - .apicid_to_cpu_present = es7000_apicid_to_cpu_present, - .setup_portio_remap = NULL, - .check_phys_apicid_present = es7000_check_phys_apicid_present, - .enable_apic_mode = es7000_enable_apic_mode, - .phys_pkg_id = es7000_phys_pkg_id, - .mps_oem_check = es7000_mps_oem_check, - - .get_apic_id = es7000_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0xFF << 24, - - .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, - - .send_IPI_mask = es7000_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = es7000_send_IPI_allbutself, - .send_IPI_all = es7000_send_IPI_all, - .send_IPI_self = NULL, - - .wakeup_cpu = NULL, - - .trampoline_phys_low = 0x467, - .trampoline_phys_high = 0x469, - - .wait_for_init_deassert = es7000_wait_for_init_deassert, - - /* Nothing to do for most platforms, since cleared by the INIT cycle: */ - .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, - .inquire_remote_apic = default_inquire_remote_apic, -}; -- cgit v1.2.3 From 61b90b7ca10cc65d8b850ab542859dc593e5a381 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:01:05 +0100 Subject: x86, NUMAQ: Consolidate code Move all NUMAQ code into arch/x86/kernel/numaq.c. With this it ceases to rely on any build-time subarch features. Signed-off-by: Ingo Molnar --- arch/x86/kernel/numaq_32.c | 278 +++++++++++++++++++++++++++++++++++++++++ arch/x86/mach-generic/Makefile | 1 - arch/x86/mach-generic/numaq.c | 277 ---------------------------------------- 3 files changed, 278 insertions(+), 278 deletions(-) delete mode 100644 arch/x86/mach-generic/numaq.c (limited to 'arch') diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 3928280278f..83bb05524f4 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -291,3 +291,281 @@ int __init get_memcfg_numaq(void) smp_dump_qct(); return 1; } + +/* + * APIC driver for the IBM NUMAQ chipset. + */ +#define APIC_DEFINITION 1 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) + +static inline unsigned int numaq_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0x0F; +} + +void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); + +static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_sequence(mask, vector); +} + +static inline void numaq_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself(cpu_online_mask, vector); +} + +static inline void numaq_send_IPI_all(int vector) +{ + numaq_send_IPI_mask(cpu_online_mask, vector); +} + +extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); + +#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) +#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) + +/* + * Because we use NMIs rather than the INIT-STARTUP sequence to + * bootstrap the CPUs, the APIC may be in a weird state. Kick it: + */ +static inline void numaq_smp_callin_clear_local_apic(void) +{ + clear_local_APIC(); +} + +static inline void +numaq_store_NMI_vector(unsigned short *high, unsigned short *low) +{ + printk("Storing NMI vector\n"); + *high = + *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)); + *low = + *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); +} + +static inline const cpumask_t *numaq_target_cpus(void) +{ + return &CPU_MASK_ALL; +} + +static inline unsigned long +numaq_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} + +static inline unsigned long numaq_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +#define apicid_cluster(apicid) (apicid & 0xF0) + +static inline int numaq_apic_id_registered(void) +{ + return 1; +} + +static inline void numaq_init_apic_ldr(void) +{ + /* Already done in NUMA-Q firmware */ +} + +static inline void numaq_setup_apic_routing(void) +{ + printk("Enabling APIC mode: %s. Using %d I/O APICs\n", + "NUMA-Q", nr_ioapics); +} + +/* + * Skip adding the timer int on secondary nodes, which causes + * a small but painful rift in the time-space continuum. + */ +static inline int numaq_multi_timer_check(int apic, int irq) +{ + return apic != 0 && irq == 0; +} + +static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) +{ + /* We don't have a good way to do this yet - hack */ + return physids_promote(0xFUL); +} + +/* Mapping from cpu number to logical apicid */ +extern u8 cpu_2_logical_apicid[]; + +static inline int numaq_cpu_to_logical_apicid(int cpu) +{ + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return (int)cpu_2_logical_apicid[cpu]; +} + +/* + * Supporting over 60 cpus on NUMA-Q requires a locality-dependent + * cpu to APIC ID relation to properly interact with the intelligent + * mode of the cluster controller. + */ +static inline int numaq_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < 60) + return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); + else + return BAD_APICID; +} + +static inline int numaq_apicid_to_node(int logical_apicid) +{ + return logical_apicid >> 4; +} + +static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) +{ + int node = numaq_apicid_to_node(logical_apicid); + int cpu = __ffs(logical_apicid & 0xf); + + return physid_mask_of_physid(cpu + 4*node); +} + +extern void *xquad_portio; + +static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return 1; +} + +/* + * We use physical apicids here, not logical, so just return the default + * physical broadcast to stop people from breaking us + */ +static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + return 0x0F; +} + +static inline unsigned int +numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + return 0x0F; +} + +/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ +static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} +static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +{ + numaq_mps_oem_check(mpc, oem, productid); + return found_numaq; +} + +static int probe_numaq(void) +{ + /* already know from get_memcfg_numaq() */ + return found_numaq; +} + +static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; +} + +static void numaq_setup_portio_remap(void) +{ + int num_quads = num_online_nodes(); + + if (num_quads <= 1) + return; + + printk("Remapping cross-quad port I/O for %d quads\n", num_quads); + xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); + printk("xquad_portio vaddr 0x%08lx, len %08lx\n", + (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); +} + +struct genapic apic_numaq = { + + .name = "NUMAQ", + .probe = probe_numaq, + .acpi_madt_oem_check = NULL, + .apic_id_registered = numaq_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + /* physical delivery on LOCAL quad: */ + .irq_dest_mode = 0, + + .target_cpus = numaq_target_cpus, + .disable_esr = 1, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = numaq_check_apicid_used, + .check_apicid_present = numaq_check_apicid_present, + + .vector_allocation_domain = numaq_vector_allocation_domain, + .init_apic_ldr = numaq_init_apic_ldr, + + .ioapic_phys_id_map = numaq_ioapic_phys_id_map, + .setup_apic_routing = numaq_setup_apic_routing, + .multi_timer_check = numaq_multi_timer_check, + .apicid_to_node = numaq_apicid_to_node, + .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, + .cpu_present_to_apicid = numaq_cpu_present_to_apicid, + .apicid_to_cpu_present = numaq_apicid_to_cpu_present, + .setup_portio_remap = numaq_setup_portio_remap, + .check_phys_apicid_present = numaq_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = numaq_phys_pkg_id, + .mps_oem_check = __numaq_mps_oem_check, + + .get_apic_id = numaq_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0x0F << 24, + + .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, + + .send_IPI_mask = numaq_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = numaq_send_IPI_allbutself, + .send_IPI_all = numaq_send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, + + /* We don't do anything here because we use NMI's to boot instead */ + .wait_for_init_deassert = NULL, + + .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, + .store_NMI_vector = numaq_store_NMI_vector, + .inquire_remote_apic = NULL, +}; diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile index 05e47acfd66..4ede08d309e 100644 --- a/arch/x86/mach-generic/Makefile +++ b/arch/x86/mach-generic/Makefile @@ -5,5 +5,4 @@ EXTRA_CFLAGS := -Iarch/x86/kernel obj-y := probe.o default.o -obj-$(CONFIG_X86_NUMAQ) += numaq.o obj-$(CONFIG_X86_BIGSMP) += bigsmp.o diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c deleted file mode 100644 index c221cfb2c2d..00000000000 --- a/arch/x86/mach-generic/numaq.c +++ /dev/null @@ -1,277 +0,0 @@ -/* - * APIC driver for the IBM NUMAQ chipset. - */ -#define APIC_DEFINITION 1 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) - -static inline unsigned int numaq_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0x0F; -} - -void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); - -static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_sequence(mask, vector); -} - -static inline void numaq_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself(cpu_online_mask, vector); -} - -static inline void numaq_send_IPI_all(int vector) -{ - numaq_send_IPI_mask(cpu_online_mask, vector); -} - -extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); - -#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) -#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) - -/* - * Because we use NMIs rather than the INIT-STARTUP sequence to - * bootstrap the CPUs, the APIC may be in a weird state. Kick it: - */ -static inline void numaq_smp_callin_clear_local_apic(void) -{ - clear_local_APIC(); -} - -static inline void -numaq_store_NMI_vector(unsigned short *high, unsigned short *low) -{ - printk("Storing NMI vector\n"); - *high = - *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)); - *low = - *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); -} - -static inline const cpumask_t *numaq_target_cpus(void) -{ - return &CPU_MASK_ALL; -} - -static inline unsigned long -numaq_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} - -static inline unsigned long numaq_check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -#define apicid_cluster(apicid) (apicid & 0xF0) - -static inline int numaq_apic_id_registered(void) -{ - return 1; -} - -static inline void numaq_init_apic_ldr(void) -{ - /* Already done in NUMA-Q firmware */ -} - -static inline void numaq_setup_apic_routing(void) -{ - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "NUMA-Q", nr_ioapics); -} - -/* - * Skip adding the timer int on secondary nodes, which causes - * a small but painful rift in the time-space continuum. - */ -static inline int numaq_multi_timer_check(int apic, int irq) -{ - return apic != 0 && irq == 0; -} - -static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* We don't have a good way to do this yet - hack */ - return physids_promote(0xFUL); -} - -/* Mapping from cpu number to logical apicid */ -extern u8 cpu_2_logical_apicid[]; - -static inline int numaq_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -} - -/* - * Supporting over 60 cpus on NUMA-Q requires a locality-dependent - * cpu to APIC ID relation to properly interact with the intelligent - * mode of the cluster controller. - */ -static inline int numaq_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < 60) - return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); - else - return BAD_APICID; -} - -static inline int numaq_apicid_to_node(int logical_apicid) -{ - return logical_apicid >> 4; -} - -static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) -{ - int node = numaq_apicid_to_node(logical_apicid); - int cpu = __ffs(logical_apicid & 0xf); - - return physid_mask_of_physid(cpu + 4*node); -} - -extern void *xquad_portio; - -static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -/* - * We use physical apicids here, not logical, so just return the default - * physical broadcast to stop people from breaking us - */ -static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - return 0x0F; -} - -static inline unsigned int -numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - return 0x0F; -} - -/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ -static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} -static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - numaq_mps_oem_check(mpc, oem, productid); - return found_numaq; -} - -static int probe_numaq(void) -{ - /* already know from get_memcfg_numaq() */ - return found_numaq; -} - -static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; -} - -static void numaq_setup_portio_remap(void) -{ - int num_quads = num_online_nodes(); - - if (num_quads <= 1) - return; - - printk("Remapping cross-quad port I/O for %d quads\n", num_quads); - xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); - printk("xquad_portio vaddr 0x%08lx, len %08lx\n", - (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); -} - -struct genapic apic_numaq = { - - .name = "NUMAQ", - .probe = probe_numaq, - .acpi_madt_oem_check = NULL, - .apic_id_registered = numaq_apic_id_registered, - - .irq_delivery_mode = dest_LowestPrio, - /* physical delivery on LOCAL quad: */ - .irq_dest_mode = 0, - - .target_cpus = numaq_target_cpus, - .disable_esr = 1, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = numaq_check_apicid_used, - .check_apicid_present = numaq_check_apicid_present, - - .vector_allocation_domain = numaq_vector_allocation_domain, - .init_apic_ldr = numaq_init_apic_ldr, - - .ioapic_phys_id_map = numaq_ioapic_phys_id_map, - .setup_apic_routing = numaq_setup_apic_routing, - .multi_timer_check = numaq_multi_timer_check, - .apicid_to_node = numaq_apicid_to_node, - .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, - .cpu_present_to_apicid = numaq_cpu_present_to_apicid, - .apicid_to_cpu_present = numaq_apicid_to_cpu_present, - .setup_portio_remap = numaq_setup_portio_remap, - .check_phys_apicid_present = numaq_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = numaq_phys_pkg_id, - .mps_oem_check = __numaq_mps_oem_check, - - .get_apic_id = numaq_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0x0F << 24, - - .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, - - .send_IPI_mask = numaq_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = numaq_send_IPI_allbutself, - .send_IPI_all = numaq_send_IPI_all, - .send_IPI_self = NULL, - - .wakeup_cpu = NULL, - .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, - - /* We don't do anything here because we use NMI's to boot instead */ - .wait_for_init_deassert = NULL, - - .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, - .store_NMI_vector = numaq_store_NMI_vector, - .inquire_remote_apic = NULL, -}; -- cgit v1.2.3 From b3daa3a1a56cf09fb91773f3658692fd02d08bb1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:04:37 +0100 Subject: x86, bigsmp: consolidate code Move all code to arch/x86/kernel/bigsmp_32.c. With this it ceases to rely on any build-time subarch features. Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 1 + arch/x86/kernel/bigsmp_32.c | 113 +++++++++++++++++++++++++++++++++++++++++ arch/x86/mach-generic/Makefile | 1 - arch/x86/mach-generic/bigsmp.c | 113 ----------------------------------------- 4 files changed, 114 insertions(+), 114 deletions(-) create mode 100644 arch/x86/kernel/bigsmp_32.c delete mode 100644 arch/x86/mach-generic/bigsmp.c (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index a382ca6f6a1..4239847906a 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o +obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o obj-$(CONFIG_X86_NUMAQ) += numaq_32.o obj-$(CONFIG_X86_ES7000) += es7000_32.o obj-$(CONFIG_X86_SUMMIT) += summit_32.o diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c new file mode 100644 index 00000000000..626f45ca4e7 --- /dev/null +++ b/arch/x86/kernel/bigsmp_32.c @@ -0,0 +1,113 @@ +/* + * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. + * Drives the local APIC in "clustered mode". + */ +#define APIC_DEFINITION 1 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int dmi_bigsmp; /* can be set by dmi scanners */ + +static int hp_ht_bigsmp(const struct dmi_system_id *d) +{ + printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); + dmi_bigsmp = 1; + return 0; +} + + +static const struct dmi_system_id bigsmp_dmi_table[] = { + { hp_ht_bigsmp, "HP ProLiant DL760 G2", + { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_BIOS_VERSION, "P44-"),} + }, + + { hp_ht_bigsmp, "HP ProLiant DL740", + { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_BIOS_VERSION, "P47-"),} + }, + { } +}; + +static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + cpus_clear(*retmask); + cpu_set(cpu, *retmask); +} + +static int probe_bigsmp(void) +{ + if (def_to_bigsmp) + dmi_bigsmp = 1; + else + dmi_check_system(bigsmp_dmi_table); + return dmi_bigsmp; +} + +struct genapic apic_bigsmp = { + + .name = "bigsmp", + .probe = probe_bigsmp, + .acpi_madt_oem_check = NULL, + .apic_id_registered = bigsmp_apic_id_registered, + + .irq_delivery_mode = dest_Fixed, + /* phys delivery to target CPU: */ + .irq_dest_mode = 0, + + .target_cpus = bigsmp_target_cpus, + .disable_esr = 1, + .dest_logical = 0, + .check_apicid_used = bigsmp_check_apicid_used, + .check_apicid_present = bigsmp_check_apicid_present, + + .vector_allocation_domain = bigsmp_vector_allocation_domain, + .init_apic_ldr = bigsmp_init_apic_ldr, + + .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, + .setup_apic_routing = bigsmp_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = bigsmp_apicid_to_node, + .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, + .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, + .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = bigsmp_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = bigsmp_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = bigsmp_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0xFF << 24, + + .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, + + .send_IPI_mask = default_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = bigsmp_send_IPI_allbutself, + .send_IPI_all = bigsmp_send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + + .wait_for_init_deassert = default_wait_for_init_deassert, + + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .inquire_remote_apic = default_inquire_remote_apic, +}; diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile index 4ede08d309e..05e4a7ca774 100644 --- a/arch/x86/mach-generic/Makefile +++ b/arch/x86/mach-generic/Makefile @@ -5,4 +5,3 @@ EXTRA_CFLAGS := -Iarch/x86/kernel obj-y := probe.o default.o -obj-$(CONFIG_X86_BIGSMP) += bigsmp.o diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c deleted file mode 100644 index 626f45ca4e7..00000000000 --- a/arch/x86/mach-generic/bigsmp.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. - * Drives the local APIC in "clustered mode". - */ -#define APIC_DEFINITION 1 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int dmi_bigsmp; /* can be set by dmi scanners */ - -static int hp_ht_bigsmp(const struct dmi_system_id *d) -{ - printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); - dmi_bigsmp = 1; - return 0; -} - - -static const struct dmi_system_id bigsmp_dmi_table[] = { - { hp_ht_bigsmp, "HP ProLiant DL760 G2", - { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P44-"),} - }, - - { hp_ht_bigsmp, "HP ProLiant DL740", - { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P47-"),} - }, - { } -}; - -static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - cpus_clear(*retmask); - cpu_set(cpu, *retmask); -} - -static int probe_bigsmp(void) -{ - if (def_to_bigsmp) - dmi_bigsmp = 1; - else - dmi_check_system(bigsmp_dmi_table); - return dmi_bigsmp; -} - -struct genapic apic_bigsmp = { - - .name = "bigsmp", - .probe = probe_bigsmp, - .acpi_madt_oem_check = NULL, - .apic_id_registered = bigsmp_apic_id_registered, - - .irq_delivery_mode = dest_Fixed, - /* phys delivery to target CPU: */ - .irq_dest_mode = 0, - - .target_cpus = bigsmp_target_cpus, - .disable_esr = 1, - .dest_logical = 0, - .check_apicid_used = bigsmp_check_apicid_used, - .check_apicid_present = bigsmp_check_apicid_present, - - .vector_allocation_domain = bigsmp_vector_allocation_domain, - .init_apic_ldr = bigsmp_init_apic_ldr, - - .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, - .setup_apic_routing = bigsmp_setup_apic_routing, - .multi_timer_check = NULL, - .apicid_to_node = bigsmp_apicid_to_node, - .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, - .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, - .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, - .setup_portio_remap = NULL, - .check_phys_apicid_present = bigsmp_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = bigsmp_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = bigsmp_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0xFF << 24, - - .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, - - .send_IPI_mask = default_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = bigsmp_send_IPI_allbutself, - .send_IPI_all = bigsmp_send_IPI_all, - .send_IPI_self = NULL, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - - .wait_for_init_deassert = default_wait_for_init_deassert, - - .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, - .inquire_remote_apic = default_inquire_remote_apic, -}; -- cgit v1.2.3 From 9f4187f0a3b93fc215b4472063b6c0b44364e60c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:19:12 +0100 Subject: x86, bigsmp: consolidate header code Move all the asm/bigsmp/*.h definitions into bigsmp_32.c. Signed-off-by: Ingo Molnar --- arch/x86/kernel/bigsmp_32.c | 163 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 159 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index 626f45ca4e7..b1f91931003 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -12,10 +12,165 @@ #include #include #include -#include #include -#include -#include + + +static inline unsigned bigsmp_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0xFF; +} + +#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) + +static inline int bigsmp_apic_id_registered(void) +{ + return 1; +} + +static inline const cpumask_t *bigsmp_target_cpus(void) +{ +#ifdef CONFIG_SMP + return &cpu_online_map; +#else + return &cpumask_of_cpu(0); +#endif +} + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +static inline unsigned long +bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return 0; +} + +static inline unsigned long bigsmp_check_apicid_present(int bit) +{ + return 1; +} + +static inline unsigned long calculate_ldr(int cpu) +{ + unsigned long val, id; + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + id = xapic_phys_to_log_apicid(cpu); + val |= SET_APIC_LOGICAL_ID(id); + return val; +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +static inline void bigsmp_init_apic_ldr(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_VALUE); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + +static inline void bigsmp_setup_apic_routing(void) +{ + printk("Enabling APIC mode: %s. Using %d I/O APICs\n", + "Physflat", nr_ioapics); +} + +static inline int bigsmp_apicid_to_node(int logical_apicid) +{ + return apicid_2_node[hard_smp_processor_id()]; +} + +static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids) + return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); + + return BAD_APICID; +} + +static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) +{ + return physid_mask_of_physid(phys_apicid); +} + +extern u8 cpu_2_logical_apicid[]; +/* Mapping from cpu number to logical apicid */ +static inline int bigsmp_cpu_to_logical_apicid(int cpu) +{ + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return cpu_physical_id(cpu); +} + +static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) +{ + /* For clustered we don't have a good way to do this yet - hack */ + return physids_promote(0xFFL); +} + +static inline void bigsmp_setup_portio_remap(void) +{ +} + +static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return 1; +} + +/* As we are using single CPU as destination, pick only one CPU here */ +static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); +} + +static inline unsigned int +bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + if (cpu < nr_cpu_ids) + return bigsmp_cpu_to_logical_apicid(cpu); + + return BAD_APICID; +} + +static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); + +static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_sequence(mask, vector); +} + +static inline void bigsmp_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself(cpu_online_mask, vector); +} + +static inline void bigsmp_send_IPI_all(int vector) +{ + bigsmp_send_IPI_mask(cpu_online_mask, vector); +} static int dmi_bigsmp; /* can be set by dmi scanners */ @@ -95,7 +250,7 @@ struct genapic apic_bigsmp = { .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, - .send_IPI_mask = default_send_IPI_mask, + .send_IPI_mask = bigsmp_send_IPI_mask, .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = bigsmp_send_IPI_allbutself, .send_IPI_all = bigsmp_send_IPI_all, -- cgit v1.2.3 From d53e2f2855f1c7c2725d550c1ae6b26f4d671c50 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:14:52 +0100 Subject: x86, smp: remove mach_ipi.h Move mach_ipi.h definitions into genapic.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 1 + arch/x86/include/asm/ipi.h | 61 +++++++++++++++++++++++- arch/x86/include/asm/mach-default/mach_ipi.h | 58 ---------------------- arch/x86/include/asm/mach-generic/gpio.h | 15 ------ arch/x86/include/asm/mach-generic/mach_ipi.h | 6 --- arch/x86/include/asm/mach-generic/mach_wakecpu.h | 4 -- arch/x86/kernel/apic.c | 2 +- arch/x86/kernel/crash.c | 2 +- arch/x86/kernel/io_apic.c | 1 - arch/x86/kernel/ipi.c | 1 - arch/x86/kernel/kgdb.c | 2 +- arch/x86/kernel/reboot.c | 2 +- arch/x86/kernel/smp.c | 1 - arch/x86/kernel/visws_quirks.c | 2 +- arch/x86/mach-default/setup.c | 2 +- arch/x86/mach-generic/default.c | 2 +- arch/x86/mm/tlb.c | 2 +- 17 files changed, 68 insertions(+), 96 deletions(-) delete mode 100644 arch/x86/include/asm/mach-default/mach_ipi.h delete mode 100644 arch/x86/include/asm/mach-generic/gpio.h delete mode 100644 arch/x86/include/asm/mach-generic/mach_ipi.h delete mode 100644 arch/x86/include/asm/mach-generic/mach_wakecpu.h (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index ccfcd19d5b7..273b99452ae 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -259,4 +259,5 @@ static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) } #endif /* CONFIG_X86_LOCAL_APIC */ + #endif /* _ASM_X86_GENAPIC_64_H */ diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index a8d717f2c7e..e2e8e4e0a65 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_IPI_H #define _ASM_X86_IPI_H +#ifdef CONFIG_X86_LOCAL_APIC + /* * Copyright 2004 James Cleverdon, IBM. * Subject to the GNU Public License, v.2 @@ -56,8 +58,7 @@ static inline void __xapic_wait_icr_idle(void) } static inline void -__default_send_IPI_shortcut(unsigned int shortcut, - int vector, unsigned int dest) +__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) { /* * Subtle. In the case of the 'never do double writes' workaround @@ -156,4 +157,60 @@ default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) local_irq_restore(flags); } + +/* Avoid include hell */ +#define NMI_VECTOR 0x02 + +void default_send_IPI_mask_bitmask(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); + +extern int no_broadcast; + +#ifdef CONFIG_X86_64 +#include +#else +static inline void default_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_bitmask(mask, vector); +} +void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); +#endif + +static inline void __default_local_send_IPI_allbutself(int vector) +{ + if (no_broadcast || vector == NMI_VECTOR) + apic->send_IPI_mask_allbutself(cpu_online_mask, vector); + else + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical); +} + +static inline void __default_local_send_IPI_all(int vector) +{ + if (no_broadcast || vector == NMI_VECTOR) + apic->send_IPI_mask(cpu_online_mask, vector); + else + __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical); +} + +#ifdef CONFIG_X86_32 +static inline void default_send_IPI_allbutself(int vector) +{ + /* + * if there are no other CPUs in the system then we get an APIC send + * error if we try to broadcast, thus avoid sending IPIs in this case. + */ + if (!(num_online_cpus() > 1)) + return; + + __default_local_send_IPI_allbutself(vector); +} + +static inline void default_send_IPI_all(int vector) +{ + __default_local_send_IPI_all(vector); +} +#endif + +#endif + #endif /* _ASM_X86_IPI_H */ diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h deleted file mode 100644 index 85dec630c69..00000000000 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ /dev/null @@ -1,58 +0,0 @@ -#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H -#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H - -/* Avoid include hell */ -#define NMI_VECTOR 0x02 - -void default_send_IPI_mask_bitmask(const struct cpumask *mask, int vector); -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -void __default_send_IPI_shortcut(unsigned int shortcut, int vector); - -extern int no_broadcast; - -#ifdef CONFIG_X86_64 -#include -#else -static inline void default_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_bitmask(mask, vector); -} -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -#endif - -static inline void __default_local_send_IPI_allbutself(int vector) -{ - if (no_broadcast || vector == NMI_VECTOR) - apic->send_IPI_mask_allbutself(cpu_online_mask, vector); - else - __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector); -} - -static inline void __default_local_send_IPI_all(int vector) -{ - if (no_broadcast || vector == NMI_VECTOR) - apic->send_IPI_mask(cpu_online_mask, vector); - else - __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector); -} - -#ifdef CONFIG_X86_32 -static inline void default_send_IPI_allbutself(int vector) -{ - /* - * if there are no other CPUs in the system then we get an APIC send - * error if we try to broadcast, thus avoid sending IPIs in this case. - */ - if (!(num_online_cpus() > 1)) - return; - - __default_local_send_IPI_allbutself(vector); -} - -static inline void default_send_IPI_all(int vector) -{ - __default_local_send_IPI_all(vector); -} -#endif - -#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */ diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h deleted file mode 100644 index 995c45efdb3..00000000000 --- a/arch/x86/include/asm/mach-generic/gpio.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _ASM_X86_MACH_GENERIC_GPIO_H -#define _ASM_X86_MACH_GENERIC_GPIO_H - -int gpio_request(unsigned gpio, const char *label); -void gpio_free(unsigned gpio); -int gpio_direction_input(unsigned gpio); -int gpio_direction_output(unsigned gpio, int value); -int gpio_get_value(unsigned gpio); -void gpio_set_value(unsigned gpio, int value); -int gpio_to_irq(unsigned gpio); -int irq_to_gpio(unsigned irq); - -#include /* cansleep wrappers */ - -#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h deleted file mode 100644 index 5691c09645c..00000000000 --- a/arch/x86/include/asm/mach-generic/mach_ipi.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H -#define _ASM_X86_MACH_GENERIC_MACH_IPI_H - -#include - -#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h deleted file mode 100644 index 0b884c03a3f..00000000000 --- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H -#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H - -#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 41a0ba34d6b..81efe86eca8 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -49,7 +49,7 @@ #include #include -#include +#include /* * Sanity check diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 11b93cabdf7..ad7f2a696f4 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -28,7 +28,7 @@ #include #include -#include +#include #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index e0744ea6d0f..241a01d6fd4 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -62,7 +62,6 @@ #include #include -#include #include #define __apicdebuginit(type) static type __init diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 50076d92fbc..0893fa14458 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -20,7 +20,6 @@ #ifdef CONFIG_X86_32 #include -#include /* * the following functions deal with sending IPIs between CPUs. diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index b62a3811e01..5c4f5548384 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -46,7 +46,7 @@ #include #include -#include +#include /* * Put the error code here just in case the user cares: diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 38dace28d62..32e8f0af292 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -24,7 +24,7 @@ # include #endif -#include +#include /* * Power off function, if any diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 892e7c389be..0eb32ae9bf1 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -26,7 +26,6 @@ #include #include #include -#include #include /* * Some notes on x86 processor bugs affecting SMP operation: diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 3bd7f47a91b..4fd646e6dd4 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -32,7 +32,7 @@ #include #include -#include +#include #include diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index df167f26562..b65ff0bf730 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c @@ -10,7 +10,7 @@ #include #include -#include +#include #ifdef CONFIG_HOTPLUG_CPU #define DEFAULT_SEND_IPI (1) diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 7d5123e474e..d9d44c8c3db 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) { diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 6348e114692..14c5af4d11e 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -14,7 +14,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0, }; -#include +#include /* * Smarter SMP flushing macros. * c/o Linus Torvalds. -- cgit v1.2.3 From 7b38725318f4517af6168ccbff99060d67aba1c8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:11:44 +0100 Subject: x86: remove subarchitecture support code Remove remaining bits of the subarchitecture code. Now that all the special platforms are runtime probed and runtime handled, we can remove these facilities. Signed-off-by: Ingo Molnar --- arch/x86/Makefile | 5 - arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/probe_32.c | 248 +++++++++++++++++++++++++++++++++++++++++ arch/x86/mach-generic/Makefile | 7 -- arch/x86/mach-generic/probe.c | 156 -------------------------- 5 files changed, 249 insertions(+), 169 deletions(-) create mode 100644 arch/x86/kernel/probe_32.c delete mode 100644 arch/x86/mach-generic/Makefile delete mode 100644 arch/x86/mach-generic/probe.c (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index cacee981d16..799a0d931c8 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -113,11 +113,6 @@ mcore-y := arch/x86/mach-default/ mflags-$(CONFIG_X86_VOYAGER) := -Iarch/x86/include/asm/mach-voyager mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/ -# generic subarchitecture -mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic -fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/ -mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/ - # default subarch .h files mflags-y += -Iarch/x86/include/asm/mach-default diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 4239847906a..d3f8f49aed6 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -30,7 +30,7 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o obj-y += setup.o i8259.o irqinit_$(BITS).o obj-$(CONFIG_X86_VISWS) += visws_quirks.o -obj-$(CONFIG_X86_32) += probe_roms_32.o +obj-$(CONFIG_X86_32) += probe_32.o probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c new file mode 100644 index 00000000000..a6fba691416 --- /dev/null +++ b/arch/x86/kernel/probe_32.c @@ -0,0 +1,248 @@ +/* + * Default generic APIC driver. This handles up to 8 CPUs. + * + * Copyright 2003 Andi Kleen, SuSE Labs. + * Subject to the GNU Public License, v.2 + * + * Generic x86 APIC driver probe layer. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* + * Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; +} + +/* should be called last. */ +static int probe_default(void) +{ + return 1; +} + +struct genapic apic_default = { + + .name = "default", + .probe = probe_default, + .acpi_madt_oem_check = NULL, + .apic_id_registered = default_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + /* logical delivery broadcast to all CPUs: */ + .irq_dest_mode = 1, + + .target_cpus = default_target_cpus, + .disable_esr = 0, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = default_check_apicid_used, + .check_apicid_present = default_check_apicid_present, + + .vector_allocation_domain = default_vector_allocation_domain, + .init_apic_ldr = default_init_apic_ldr, + + .ioapic_phys_id_map = default_ioapic_phys_id_map, + .setup_apic_routing = default_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = default_apicid_to_node, + .cpu_to_logical_apicid = default_cpu_to_logical_apicid, + .cpu_present_to_apicid = default_cpu_present_to_apicid, + .apicid_to_cpu_present = default_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = default_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = default_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0x0F << 24, + + .cpu_mask_to_apicid = default_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, + + .send_IPI_mask = default_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = default_send_IPI_allbutself, + .send_IPI_all = default_send_IPI_all, + .send_IPI_self = NULL, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + + .wait_for_init_deassert = default_wait_for_init_deassert, + + .smp_callin_clear_local_apic = NULL, + .store_NMI_vector = NULL, + .inquire_remote_apic = default_inquire_remote_apic, +}; + +extern struct genapic apic_numaq; +extern struct genapic apic_summit; +extern struct genapic apic_bigsmp; +extern struct genapic apic_es7000; +extern struct genapic apic_default; + +struct genapic *apic = &apic_default; + +static struct genapic *apic_probe[] __initdata = { +#ifdef CONFIG_X86_NUMAQ + &apic_numaq, +#endif +#ifdef CONFIG_X86_SUMMIT + &apic_summit, +#endif +#ifdef CONFIG_X86_BIGSMP + &apic_bigsmp, +#endif +#ifdef CONFIG_X86_ES7000 + &apic_es7000, +#endif + &apic_default, /* must be last */ + NULL, +}; + +static int cmdline_apic __initdata; +static int __init parse_apic(char *arg) +{ + int i; + + if (!arg) + return -EINVAL; + + for (i = 0; apic_probe[i]; i++) { + if (!strcmp(apic_probe[i]->name, arg)) { + apic = apic_probe[i]; + cmdline_apic = 1; + return 0; + } + } + + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); + + /* Parsed again by __setup for debug/verbose */ + return 0; +} +early_param("apic", parse_apic); + +void __init generic_bigsmp_probe(void) +{ +#ifdef CONFIG_X86_BIGSMP + /* + * This routine is used to switch to bigsmp mode when + * - There is no apic= option specified by the user + * - generic_apic_probe() has chosen apic_default as the sub_arch + * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support + */ + + if (!cmdline_apic && apic == &apic_default) { + if (apic_bigsmp.probe()) { + apic = &apic_bigsmp; + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); + printk(KERN_INFO "Overriding APIC driver with %s\n", + apic->name); + } + } +#endif +} + +void __init generic_apic_probe(void) +{ + if (!cmdline_apic) { + int i; + for (i = 0; apic_probe[i]; i++) { + if (apic_probe[i]->probe()) { + apic = apic_probe[i]; + break; + } + } + /* Not visible without early console */ + if (!apic_probe[i]) + panic("Didn't find an APIC driver"); + + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); + } + printk(KERN_INFO "Using APIC driver %s\n", apic->name); +} + +/* These functions can switch the APIC even after the initial ->probe() */ + +int __init +generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +{ + int i; + + for (i = 0; apic_probe[i]; ++i) { + if (!apic_probe[i]->mps_oem_check) + continue; + if (!apic_probe[i]->mps_oem_check(mpc, oem, productid)) + continue; + + if (!cmdline_apic) { + apic = apic_probe[i]; + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); + printk(KERN_INFO "Switched to APIC driver `%s'.\n", + apic->name); + } + return 1; + } + return 0; +} + +int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + int i; + + for (i = 0; apic_probe[i]; ++i) { + if (!apic_probe[i]->acpi_madt_oem_check) + continue; + if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) + continue; + + if (!cmdline_apic) { + apic = apic_probe[i]; + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); + printk(KERN_INFO "Switched to APIC driver `%s'.\n", + apic->name); + } + return 1; + } + return 0; +} diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile deleted file mode 100644 index 05e4a7ca774..00000000000 --- a/arch/x86/mach-generic/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the generic architecture -# - -EXTRA_CFLAGS := -Iarch/x86/kernel - -obj-y := probe.o default.o diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c deleted file mode 100644 index c03c7222132..00000000000 --- a/arch/x86/mach-generic/probe.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2003 Andi Kleen, SuSE Labs. - * Subject to the GNU Public License, v.2 - * - * Generic x86 APIC driver probe layer. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -extern struct genapic apic_numaq; -extern struct genapic apic_summit; -extern struct genapic apic_bigsmp; -extern struct genapic apic_es7000; -extern struct genapic apic_default; - -struct genapic *apic = &apic_default; - -static struct genapic *apic_probe[] __initdata = { -#ifdef CONFIG_X86_NUMAQ - &apic_numaq, -#endif -#ifdef CONFIG_X86_SUMMIT - &apic_summit, -#endif -#ifdef CONFIG_X86_BIGSMP - &apic_bigsmp, -#endif -#ifdef CONFIG_X86_ES7000 - &apic_es7000, -#endif - &apic_default, /* must be last */ - NULL, -}; - -static int cmdline_apic __initdata; -static int __init parse_apic(char *arg) -{ - int i; - - if (!arg) - return -EINVAL; - - for (i = 0; apic_probe[i]; i++) { - if (!strcmp(apic_probe[i]->name, arg)) { - apic = apic_probe[i]; - cmdline_apic = 1; - return 0; - } - } - - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); - - /* Parsed again by __setup for debug/verbose */ - return 0; -} -early_param("apic", parse_apic); - -void __init generic_bigsmp_probe(void) -{ -#ifdef CONFIG_X86_BIGSMP - /* - * This routine is used to switch to bigsmp mode when - * - There is no apic= option specified by the user - * - generic_apic_probe() has chosen apic_default as the sub_arch - * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support - */ - - if (!cmdline_apic && apic == &apic_default) { - if (apic_bigsmp.probe()) { - apic = &apic_bigsmp; - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); - printk(KERN_INFO "Overriding APIC driver with %s\n", - apic->name); - } - } -#endif -} - -void __init generic_apic_probe(void) -{ - if (!cmdline_apic) { - int i; - for (i = 0; apic_probe[i]; i++) { - if (apic_probe[i]->probe()) { - apic = apic_probe[i]; - break; - } - } - /* Not visible without early console */ - if (!apic_probe[i]) - panic("Didn't find an APIC driver"); - - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); - } - printk(KERN_INFO "Using APIC driver %s\n", apic->name); -} - -/* These functions can switch the APIC even after the initial ->probe() */ - -int __init -generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - int i; - - for (i = 0; apic_probe[i]; ++i) { - if (!apic_probe[i]->mps_oem_check) - continue; - if (!apic_probe[i]->mps_oem_check(mpc, oem, productid)) - continue; - - if (!cmdline_apic) { - apic = apic_probe[i]; - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - apic->name); - } - return 1; - } - return 0; -} - -int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - int i; - - for (i = 0; apic_probe[i]; ++i) { - if (!apic_probe[i]->acpi_madt_oem_check) - continue; - if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) - continue; - - if (!cmdline_apic) { - apic = apic_probe[i]; - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - apic->name); - } - return 1; - } - return 0; -} -- cgit v1.2.3 From 1164dd0099c0d79146a55319670f57ab7ad1d352 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:34:09 +0100 Subject: x86: move mach-default/*.h files to asm/ We are getting rid of subarchitecture support - move the hook files to asm/. (These are now stale and should be replaced with more explicit runtime mechanisms - but the transition is simpler this way.) Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apm.h | 73 +++++++++++++++++++++++ arch/x86/include/asm/do_timer.h | 16 +++++ arch/x86/include/asm/entry_arch.h | 57 ++++++++++++++++++ arch/x86/include/asm/mach-default/apm.h | 73 ----------------------- arch/x86/include/asm/mach-default/do_timer.h | 16 ----- arch/x86/include/asm/mach-default/entry_arch.h | 52 ---------------- arch/x86/include/asm/mach-default/mach_timer.h | 48 --------------- arch/x86/include/asm/mach-default/mach_traps.h | 33 ---------- arch/x86/include/asm/mach-default/pci-functions.h | 19 ------ arch/x86/include/asm/mach-default/setup_arch.h | 3 - arch/x86/include/asm/mach-default/smpboot_hooks.h | 61 ------------------- arch/x86/include/asm/mach_timer.h | 48 +++++++++++++++ arch/x86/include/asm/mach_traps.h | 33 ++++++++++ arch/x86/include/asm/pci-functions.h | 19 ++++++ arch/x86/include/asm/setup_arch.h | 3 + arch/x86/include/asm/smpboot_hooks.h | 61 +++++++++++++++++++ arch/x86/kernel/apm_32.c | 2 +- arch/x86/kernel/entry_32.S | 2 +- arch/x86/kernel/nmi.c | 2 +- arch/x86/kernel/probe_roms_32.c | 2 +- arch/x86/kernel/setup.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- arch/x86/kernel/time_32.c | 2 +- arch/x86/kernel/traps.c | 2 +- arch/x86/pci/pcbios.c | 2 +- 25 files changed, 319 insertions(+), 314 deletions(-) create mode 100644 arch/x86/include/asm/apm.h create mode 100644 arch/x86/include/asm/do_timer.h create mode 100644 arch/x86/include/asm/entry_arch.h delete mode 100644 arch/x86/include/asm/mach-default/apm.h delete mode 100644 arch/x86/include/asm/mach-default/do_timer.h delete mode 100644 arch/x86/include/asm/mach-default/entry_arch.h delete mode 100644 arch/x86/include/asm/mach-default/mach_timer.h delete mode 100644 arch/x86/include/asm/mach-default/mach_traps.h delete mode 100644 arch/x86/include/asm/mach-default/pci-functions.h delete mode 100644 arch/x86/include/asm/mach-default/setup_arch.h delete mode 100644 arch/x86/include/asm/mach-default/smpboot_hooks.h create mode 100644 arch/x86/include/asm/mach_timer.h create mode 100644 arch/x86/include/asm/mach_traps.h create mode 100644 arch/x86/include/asm/pci-functions.h create mode 100644 arch/x86/include/asm/setup_arch.h create mode 100644 arch/x86/include/asm/smpboot_hooks.h (limited to 'arch') diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h new file mode 100644 index 00000000000..20370c6db74 --- /dev/null +++ b/arch/x86/include/asm/apm.h @@ -0,0 +1,73 @@ +/* + * Machine specific APM BIOS functions for generic. + * Split out from apm.c by Osamu Tomita + */ + +#ifndef _ASM_X86_MACH_DEFAULT_APM_H +#define _ASM_X86_MACH_DEFAULT_APM_H + +#ifdef APM_ZERO_SEGS +# define APM_DO_ZERO_SEGS \ + "pushl %%ds\n\t" \ + "pushl %%es\n\t" \ + "xorl %%edx, %%edx\n\t" \ + "mov %%dx, %%ds\n\t" \ + "mov %%dx, %%es\n\t" \ + "mov %%dx, %%fs\n\t" \ + "mov %%dx, %%gs\n\t" +# define APM_DO_POP_SEGS \ + "popl %%es\n\t" \ + "popl %%ds\n\t" +#else +# define APM_DO_ZERO_SEGS +# define APM_DO_POP_SEGS +#endif + +static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + u32 *eax, u32 *ebx, u32 *ecx, + u32 *edx, u32 *esi) +{ + /* + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" + "lcall *%%cs:apm_bios_entry\n\t" + "setc %%al\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" + APM_DO_POP_SEGS + : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx), + "=S" (*esi) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); +} + +static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, + u32 ecx_in, u32 *eax) +{ + int cx, dx, si; + u8 error; + + /* + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" + "lcall *%%cs:apm_bios_entry\n\t" + "setc %%bl\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" + APM_DO_POP_SEGS + : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx), + "=S" (si) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); + return error; +} + +#endif /* _ASM_X86_MACH_DEFAULT_APM_H */ diff --git a/arch/x86/include/asm/do_timer.h b/arch/x86/include/asm/do_timer.h new file mode 100644 index 00000000000..23ecda0b28a --- /dev/null +++ b/arch/x86/include/asm/do_timer.h @@ -0,0 +1,16 @@ +/* defines for inline arch setup functions */ +#include + +#include +#include + +/** + * do_timer_interrupt_hook - hook into timer tick + * + * Call the pit clock event handler. see asm/i8253.h + **/ + +static inline void do_timer_interrupt_hook(void) +{ + global_clock_event->event_handler(global_clock_event); +} diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h new file mode 100644 index 00000000000..b87b077cc23 --- /dev/null +++ b/arch/x86/include/asm/entry_arch.h @@ -0,0 +1,57 @@ +/* + * This file is designed to contain the BUILD_INTERRUPT specifications for + * all of the extra named interrupt vectors used by the architecture. + * Usually this is the Inter Process Interrupts (IPIs) + */ + +/* + * The following vectors are part of the Linux architecture, there + * is no hardware IRQ pin equivalent for them, they are triggered + * through the ICC by us (IPIs) + */ +#ifdef CONFIG_X86_SMP +BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) +BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) +BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) +BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) + +BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6, + smp_invalidate_interrupt) +BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, + smp_invalidate_interrupt) +#endif + +/* + * every pentium local APIC has two 'local interrupts', with a + * soft-definable vector attached to both interrupts, one of + * which is a timer interrupt, the other one is error counter + * overflow. Linux uses the local APIC timer interrupt to get + * a much simpler SMP time architecture: + */ +#ifdef CONFIG_X86_LOCAL_APIC + +BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) +BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) +BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) + +#ifdef CONFIG_PERF_COUNTERS +BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR) +#endif + +#ifdef CONFIG_X86_MCE_P4THERMAL +BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) +#endif + +#endif diff --git a/arch/x86/include/asm/mach-default/apm.h b/arch/x86/include/asm/mach-default/apm.h deleted file mode 100644 index 20370c6db74..00000000000 --- a/arch/x86/include/asm/mach-default/apm.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Machine specific APM BIOS functions for generic. - * Split out from apm.c by Osamu Tomita - */ - -#ifndef _ASM_X86_MACH_DEFAULT_APM_H -#define _ASM_X86_MACH_DEFAULT_APM_H - -#ifdef APM_ZERO_SEGS -# define APM_DO_ZERO_SEGS \ - "pushl %%ds\n\t" \ - "pushl %%es\n\t" \ - "xorl %%edx, %%edx\n\t" \ - "mov %%dx, %%ds\n\t" \ - "mov %%dx, %%es\n\t" \ - "mov %%dx, %%fs\n\t" \ - "mov %%dx, %%gs\n\t" -# define APM_DO_POP_SEGS \ - "popl %%es\n\t" \ - "popl %%ds\n\t" -#else -# define APM_DO_ZERO_SEGS -# define APM_DO_POP_SEGS -#endif - -static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, - u32 *eax, u32 *ebx, u32 *ecx, - u32 *edx, u32 *esi) -{ - /* - * N.B. We do NOT need a cld after the BIOS call - * because we always save and restore the flags. - */ - __asm__ __volatile__(APM_DO_ZERO_SEGS - "pushl %%edi\n\t" - "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" - "setc %%al\n\t" - "popl %%ebp\n\t" - "popl %%edi\n\t" - APM_DO_POP_SEGS - : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx), - "=S" (*esi) - : "a" (func), "b" (ebx_in), "c" (ecx_in) - : "memory", "cc"); -} - -static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, - u32 ecx_in, u32 *eax) -{ - int cx, dx, si; - u8 error; - - /* - * N.B. We do NOT need a cld after the BIOS call - * because we always save and restore the flags. - */ - __asm__ __volatile__(APM_DO_ZERO_SEGS - "pushl %%edi\n\t" - "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" - "setc %%bl\n\t" - "popl %%ebp\n\t" - "popl %%edi\n\t" - APM_DO_POP_SEGS - : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx), - "=S" (si) - : "a" (func), "b" (ebx_in), "c" (ecx_in) - : "memory", "cc"); - return error; -} - -#endif /* _ASM_X86_MACH_DEFAULT_APM_H */ diff --git a/arch/x86/include/asm/mach-default/do_timer.h b/arch/x86/include/asm/mach-default/do_timer.h deleted file mode 100644 index 23ecda0b28a..00000000000 --- a/arch/x86/include/asm/mach-default/do_timer.h +++ /dev/null @@ -1,16 +0,0 @@ -/* defines for inline arch setup functions */ -#include - -#include -#include - -/** - * do_timer_interrupt_hook - hook into timer tick - * - * Call the pit clock event handler. see asm/i8253.h - **/ - -static inline void do_timer_interrupt_hook(void) -{ - global_clock_event->event_handler(global_clock_event); -} diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/mach-default/entry_arch.h deleted file mode 100644 index 6fa399ad1de..00000000000 --- a/arch/x86/include/asm/mach-default/entry_arch.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * This file is designed to contain the BUILD_INTERRUPT specifications for - * all of the extra named interrupt vectors used by the architecture. - * Usually this is the Inter Process Interrupts (IPIs) - */ - -/* - * The following vectors are part of the Linux architecture, there - * is no hardware IRQ pin equivalent for them, they are triggered - * through the ICC by us (IPIs) - */ -#ifdef CONFIG_X86_SMP -BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) -BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) -BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) -BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) - -BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, - smp_invalidate_interrupt) -#endif - -/* - * every pentium local APIC has two 'local interrupts', with a - * soft-definable vector attached to both interrupts, one of - * which is a timer interrupt, the other one is error counter - * overflow. Linux uses the local APIC timer interrupt to get - * a much simpler SMP time architecture: - */ -#ifdef CONFIG_X86_LOCAL_APIC -BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) -BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) -BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) - -#ifdef CONFIG_X86_MCE_P4THERMAL -BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) -#endif - -#endif diff --git a/arch/x86/include/asm/mach-default/mach_timer.h b/arch/x86/include/asm/mach-default/mach_timer.h deleted file mode 100644 index 853728519ae..00000000000 --- a/arch/x86/include/asm/mach-default/mach_timer.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Machine specific calibrate_tsc() for generic. - * Split out from timer_tsc.c by Osamu Tomita - */ -/* ------ Calibrate the TSC ------- - * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset(). - * Too much 64-bit arithmetic here to do this cleanly in C, and for - * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2) - * output busy loop as low as possible. We avoid reading the CTC registers - * directly because of the awkward 8-bit access mechanism of the 82C54 - * device. - */ -#ifndef _ASM_X86_MACH_DEFAULT_MACH_TIMER_H -#define _ASM_X86_MACH_DEFAULT_MACH_TIMER_H - -#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ -#define CALIBRATE_LATCH \ - ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) - -static inline void mach_prepare_counter(void) -{ - /* Set the Gate high, disable speaker */ - outb((inb(0x61) & ~0x02) | 0x01, 0x61); - - /* - * Now let's take care of CTC channel 2 - * - * Set the Gate high, program CTC channel 2 for mode 0, - * (interrupt on terminal count mode), binary count, - * load 5 * LATCH count, (LSB and MSB) to begin countdown. - * - * Some devices need a delay here. - */ - outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ - outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ - outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ -} - -static inline void mach_countup(unsigned long *count_p) -{ - unsigned long count = 0; - do { - count++; - } while ((inb_p(0x61) & 0x20) == 0); - *count_p = count; -} - -#endif /* _ASM_X86_MACH_DEFAULT_MACH_TIMER_H */ diff --git a/arch/x86/include/asm/mach-default/mach_traps.h b/arch/x86/include/asm/mach-default/mach_traps.h deleted file mode 100644 index f7920601e47..00000000000 --- a/arch/x86/include/asm/mach-default/mach_traps.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Machine specific NMI handling for generic. - * Split out from traps.c by Osamu Tomita - */ -#ifndef _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H -#define _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H - -#include - -static inline unsigned char get_nmi_reason(void) -{ - return inb(0x61); -} - -static inline void reassert_nmi(void) -{ - int old_reg = -1; - - if (do_i_have_lock_cmos()) - old_reg = current_lock_cmos_reg(); - else - lock_cmos(0); /* register doesn't matter here */ - outb(0x8f, 0x70); - inb(0x71); /* dummy */ - outb(0x0f, 0x70); - inb(0x71); /* dummy */ - if (old_reg >= 0) - outb(old_reg, 0x70); - else - unlock_cmos(); -} - -#endif /* _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H */ diff --git a/arch/x86/include/asm/mach-default/pci-functions.h b/arch/x86/include/asm/mach-default/pci-functions.h deleted file mode 100644 index ed0bab42735..00000000000 --- a/arch/x86/include/asm/mach-default/pci-functions.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * PCI BIOS function numbering for conventional PCI BIOS - * systems - */ - -#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX -#define PCIBIOS_PCI_BIOS_PRESENT 0xb101 -#define PCIBIOS_FIND_PCI_DEVICE 0xb102 -#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103 -#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106 -#define PCIBIOS_READ_CONFIG_BYTE 0xb108 -#define PCIBIOS_READ_CONFIG_WORD 0xb109 -#define PCIBIOS_READ_CONFIG_DWORD 0xb10a -#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b -#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c -#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d -#define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e -#define PCIBIOS_SET_PCI_HW_INT 0xb10f - diff --git a/arch/x86/include/asm/mach-default/setup_arch.h b/arch/x86/include/asm/mach-default/setup_arch.h deleted file mode 100644 index 38846208b54..00000000000 --- a/arch/x86/include/asm/mach-default/setup_arch.h +++ /dev/null @@ -1,3 +0,0 @@ -/* Hook to call BIOS initialisation function */ - -/* no action for generic */ diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/mach-default/smpboot_hooks.h deleted file mode 100644 index 1def6011490..00000000000 --- a/arch/x86/include/asm/mach-default/smpboot_hooks.h +++ /dev/null @@ -1,61 +0,0 @@ -/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws - * which needs to alter them. */ - -static inline void smpboot_clear_io_apic_irqs(void) -{ -#ifdef CONFIG_X86_IO_APIC - io_apic_irqs = 0; -#endif -} - -static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) -{ - CMOS_WRITE(0xa, 0xf); - local_flush_tlb(); - pr_debug("1.\n"); - *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) = - start_eip >> 4; - pr_debug("2.\n"); - *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) = - start_eip & 0xf; - pr_debug("3.\n"); -} - -static inline void smpboot_restore_warm_reset_vector(void) -{ - /* - * Install writable page 0 entry to set BIOS data area. - */ - local_flush_tlb(); - - /* - * Paranoid: Set warm reset code and vector here back - * to default values. - */ - CMOS_WRITE(0, 0xf); - - *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; -} - -static inline void __init smpboot_setup_io_apic(void) -{ -#ifdef CONFIG_X86_IO_APIC - /* - * Here we can be sure that there is an IO-APIC in the system. Let's - * go and set it up: - */ - if (!skip_ioapic_setup && nr_ioapics) - setup_IO_APIC(); - else { - nr_ioapics = 0; - localise_nmi_watchdog(); - } -#endif -} - -static inline void smpboot_clear_io_apic(void) -{ -#ifdef CONFIG_X86_IO_APIC - nr_ioapics = 0; -#endif -} diff --git a/arch/x86/include/asm/mach_timer.h b/arch/x86/include/asm/mach_timer.h new file mode 100644 index 00000000000..853728519ae --- /dev/null +++ b/arch/x86/include/asm/mach_timer.h @@ -0,0 +1,48 @@ +/* + * Machine specific calibrate_tsc() for generic. + * Split out from timer_tsc.c by Osamu Tomita + */ +/* ------ Calibrate the TSC ------- + * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset(). + * Too much 64-bit arithmetic here to do this cleanly in C, and for + * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2) + * output busy loop as low as possible. We avoid reading the CTC registers + * directly because of the awkward 8-bit access mechanism of the 82C54 + * device. + */ +#ifndef _ASM_X86_MACH_DEFAULT_MACH_TIMER_H +#define _ASM_X86_MACH_DEFAULT_MACH_TIMER_H + +#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ +#define CALIBRATE_LATCH \ + ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) + +static inline void mach_prepare_counter(void) +{ + /* Set the Gate high, disable speaker */ + outb((inb(0x61) & ~0x02) | 0x01, 0x61); + + /* + * Now let's take care of CTC channel 2 + * + * Set the Gate high, program CTC channel 2 for mode 0, + * (interrupt on terminal count mode), binary count, + * load 5 * LATCH count, (LSB and MSB) to begin countdown. + * + * Some devices need a delay here. + */ + outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ + outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ + outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ +} + +static inline void mach_countup(unsigned long *count_p) +{ + unsigned long count = 0; + do { + count++; + } while ((inb_p(0x61) & 0x20) == 0); + *count_p = count; +} + +#endif /* _ASM_X86_MACH_DEFAULT_MACH_TIMER_H */ diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h new file mode 100644 index 00000000000..f7920601e47 --- /dev/null +++ b/arch/x86/include/asm/mach_traps.h @@ -0,0 +1,33 @@ +/* + * Machine specific NMI handling for generic. + * Split out from traps.c by Osamu Tomita + */ +#ifndef _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H +#define _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H + +#include + +static inline unsigned char get_nmi_reason(void) +{ + return inb(0x61); +} + +static inline void reassert_nmi(void) +{ + int old_reg = -1; + + if (do_i_have_lock_cmos()) + old_reg = current_lock_cmos_reg(); + else + lock_cmos(0); /* register doesn't matter here */ + outb(0x8f, 0x70); + inb(0x71); /* dummy */ + outb(0x0f, 0x70); + inb(0x71); /* dummy */ + if (old_reg >= 0) + outb(old_reg, 0x70); + else + unlock_cmos(); +} + +#endif /* _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H */ diff --git a/arch/x86/include/asm/pci-functions.h b/arch/x86/include/asm/pci-functions.h new file mode 100644 index 00000000000..ed0bab42735 --- /dev/null +++ b/arch/x86/include/asm/pci-functions.h @@ -0,0 +1,19 @@ +/* + * PCI BIOS function numbering for conventional PCI BIOS + * systems + */ + +#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX +#define PCIBIOS_PCI_BIOS_PRESENT 0xb101 +#define PCIBIOS_FIND_PCI_DEVICE 0xb102 +#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103 +#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106 +#define PCIBIOS_READ_CONFIG_BYTE 0xb108 +#define PCIBIOS_READ_CONFIG_WORD 0xb109 +#define PCIBIOS_READ_CONFIG_DWORD 0xb10a +#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b +#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c +#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d +#define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e +#define PCIBIOS_SET_PCI_HW_INT 0xb10f + diff --git a/arch/x86/include/asm/setup_arch.h b/arch/x86/include/asm/setup_arch.h new file mode 100644 index 00000000000..38846208b54 --- /dev/null +++ b/arch/x86/include/asm/setup_arch.h @@ -0,0 +1,3 @@ +/* Hook to call BIOS initialisation function */ + +/* no action for generic */ diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h new file mode 100644 index 00000000000..1def6011490 --- /dev/null +++ b/arch/x86/include/asm/smpboot_hooks.h @@ -0,0 +1,61 @@ +/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws + * which needs to alter them. */ + +static inline void smpboot_clear_io_apic_irqs(void) +{ +#ifdef CONFIG_X86_IO_APIC + io_apic_irqs = 0; +#endif +} + +static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) +{ + CMOS_WRITE(0xa, 0xf); + local_flush_tlb(); + pr_debug("1.\n"); + *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) = + start_eip >> 4; + pr_debug("2.\n"); + *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) = + start_eip & 0xf; + pr_debug("3.\n"); +} + +static inline void smpboot_restore_warm_reset_vector(void) +{ + /* + * Install writable page 0 entry to set BIOS data area. + */ + local_flush_tlb(); + + /* + * Paranoid: Set warm reset code and vector here back + * to default values. + */ + CMOS_WRITE(0, 0xf); + + *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; +} + +static inline void __init smpboot_setup_io_apic(void) +{ +#ifdef CONFIG_X86_IO_APIC + /* + * Here we can be sure that there is an IO-APIC in the system. Let's + * go and set it up: + */ + if (!skip_ioapic_setup && nr_ioapics) + setup_IO_APIC(); + else { + nr_ioapics = 0; + localise_nmi_watchdog(); + } +#endif +} + +static inline void smpboot_clear_io_apic(void) +{ +#ifdef CONFIG_X86_IO_APIC + nr_ioapics = 0; +#endif +} diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 98807bb095a..37ba5f85b71 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -301,7 +301,7 @@ extern int (*console_blank_hook)(int); */ #define APM_ZERO_SEGS -#include "apm.h" +#include /* * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index a0b91aac72a..65efd42454b 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -688,7 +688,7 @@ ENDPROC(name) #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name) /* The include is where all of the SMP etc. interrupts come from */ -#include "entry_arch.h" +#include ENTRY(coprocessor_error) RING0_INT_FRAME diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 23b6d9e6e4f..bdfad80c3cf 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -34,7 +34,7 @@ #include -#include +#include int unknown_nmi_panic; int nmi_watchdog_enabled; diff --git a/arch/x86/kernel/probe_roms_32.c b/arch/x86/kernel/probe_roms_32.c index 675a48c404a..071e7fea42e 100644 --- a/arch/x86/kernel/probe_roms_32.c +++ b/arch/x86/kernel/probe_roms_32.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include static struct resource system_rom_resource = { .name = "System ROM", diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 92e42939fb0..e645d4793e4 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -81,7 +81,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 489fde9d947..e90b3e50b54 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -66,7 +66,7 @@ #include #include -#include +#include #ifdef CONFIG_X86_32 u8 apicid_2_node[MAX_APICID]; diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index 3985cac0ed4..764c74e871f 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c @@ -38,7 +38,7 @@ #include #include -#include "do_timer.h" +#include int timer_ack; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ed5aee5f3fc..214bc327a0c 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -54,7 +54,7 @@ #include #include -#include +#include #ifdef CONFIG_X86_64 #include diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index b82cae970df..1c975cc9839 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include /* BIOS32 signature: "_32_" */ #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) -- cgit v1.2.3 From 6bda2c8b32febeb38ee128047253751e080bad52 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:32:55 +0100 Subject: x86: remove subarchitecture support Remove the 32-bit subarchitecture support code. All subarchitectures but Voyager have been converted. Voyager will be done later or will be removed. Signed-off-by: Ingo Molnar --- arch/x86/Makefile | 21 ------ arch/x86/include/asm/apic.h | 2 + arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/io_apic.c | 17 ----- arch/x86/kernel/probe_32.c | 163 ++++++++++++++++++++++++++++++++++++++++ arch/x86/mach-default/setup.c | 162 --------------------------------------- arch/x86/mach-generic/default.c | 93 ----------------------- 7 files changed, 166 insertions(+), 294 deletions(-) delete mode 100644 arch/x86/mach-default/setup.c delete mode 100644 arch/x86/mach-generic/default.c (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 799a0d931c8..99550c40799 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -102,24 +102,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # prevent gcc from generating any FP code by mistake KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) -### -# Sub architecture support -# fcore-y is linked before mcore-y files. - -# Default subarch .c files -mcore-y := arch/x86/mach-default/ - -# Voyager subarch support -mflags-$(CONFIG_X86_VOYAGER) := -Iarch/x86/include/asm/mach-voyager -mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/ - -# default subarch .h files -mflags-y += -Iarch/x86/include/asm/mach-default - -# 64 bit does not support subarch support - clear sub arch variables -fcore-$(CONFIG_X86_64) := -mcore-$(CONFIG_X86_64) := - KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) @@ -145,9 +127,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/ core-y += arch/x86/kernel/ core-y += arch/x86/mm/ -# Remaining sub architecture files -core-y += $(mcore-y) - core-y += arch/x86/crypto/ core-y += arch/x86/vdso/ core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/ diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 6a77068e261..b03711d7990 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -215,6 +215,7 @@ static inline void disable_local_APIC(void) { } #define SET_APIC_ID(x) (apic->set_apic_id(x)) #else +#ifdef CONFIG_X86_LOCAL_APIC static inline unsigned default_get_apic_id(unsigned long x) { unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); @@ -224,6 +225,7 @@ static inline unsigned default_get_apic_id(unsigned long x) else return (x >> 24) & 0x0F; } +#endif #endif diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d3f8f49aed6..1bc4cdc797d 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -63,7 +63,7 @@ obj-$(CONFIG_SMP) += setup_percpu.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_MPPARSE) += mpparse.o -obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o +obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o ipi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 241a01d6fd4..3378ffb2140 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -797,23 +797,6 @@ static void clear_IO_APIC (void) clear_IO_APIC_pin(apic, pin); } -#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32) -void default_send_IPI_self(int vector) -{ - unsigned int cfg; - - /* - * Wait for idle. - */ - apic_wait_icr_idle(); - cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | apic->dest_logical; - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - apic_write(APIC_ICR, cfg); -} -#endif /* !CONFIG_SMP && CONFIG_X86_32*/ - #ifdef CONFIG_X86_32 /* * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index a6fba691416..61339a0d55c 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -32,6 +32,26 @@ #include #include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_HOTPLUG_CPU +#define DEFAULT_SEND_IPI (1) +#else +#define DEFAULT_SEND_IPI (0) +#endif + +int no_broadcast = DEFAULT_SEND_IPI; + +#ifdef CONFIG_X86_LOCAL_APIC + static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) { /* @@ -246,3 +266,146 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) } return 0; } + +#endif /* CONFIG_X86_LOCAL_APIC */ + +/** + * pre_intr_init_hook - initialisation prior to setting up interrupt vectors + * + * Description: + * Perform any necessary interrupt initialisation prior to setting up + * the "ordinary" interrupt call gates. For legacy reasons, the ISA + * interrupts should be initialised here if the machine emulates a PC + * in any way. + **/ +void __init pre_intr_init_hook(void) +{ + if (x86_quirks->arch_pre_intr_init) { + if (x86_quirks->arch_pre_intr_init()) + return; + } + init_ISA_irqs(); +} + +/** + * intr_init_hook - post gate setup interrupt initialisation + * + * Description: + * Fill in any interrupts that may have been left out by the general + * init_IRQ() routine. interrupts having to do with the machine rather + * than the devices on the I/O bus (like APIC interrupts in intel MP + * systems) are started here. + **/ +void __init intr_init_hook(void) +{ + if (x86_quirks->arch_intr_init) { + if (x86_quirks->arch_intr_init()) + return; + } +} + +/** + * pre_setup_arch_hook - hook called prior to any setup_arch() execution + * + * Description: + * generally used to activate any machine specific identification + * routines that may be needed before setup_arch() runs. On Voyager + * this is used to get the board revision and type. + **/ +void __init pre_setup_arch_hook(void) +{ +} + +/** + * trap_init_hook - initialise system specific traps + * + * Description: + * Called as the final act of trap_init(). Used in VISWS to initialise + * the various board specific APIC traps. + **/ +void __init trap_init_hook(void) +{ + if (x86_quirks->arch_trap_init) { + if (x86_quirks->arch_trap_init()) + return; + } +} + +static struct irqaction irq0 = { + .handler = timer_interrupt, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, + .mask = CPU_MASK_NONE, + .name = "timer" +}; + +/** + * pre_time_init_hook - do any specific initialisations before. + * + **/ +void __init pre_time_init_hook(void) +{ + if (x86_quirks->arch_pre_time_init) + x86_quirks->arch_pre_time_init(); +} + +/** + * time_init_hook - do any specific initialisations for the system timer. + * + * Description: + * Must plug the system timer interrupt source at HZ into the IRQ listed + * in irq_vectors.h:TIMER_IRQ + **/ +void __init time_init_hook(void) +{ + if (x86_quirks->arch_time_init) { + /* + * A nonzero return code does not mean failure, it means + * that the architecture quirk does not want any + * generic (timer) setup to be performed after this: + */ + if (x86_quirks->arch_time_init()) + return; + } + + irq0.mask = cpumask_of_cpu(0); + setup_irq(0, &irq0); +} + +#ifdef CONFIG_MCA +/** + * mca_nmi_hook - hook into MCA specific NMI chain + * + * Description: + * The MCA (Microchannel Architecture) has an NMI chain for NMI sources + * along the MCA bus. Use this to hook into that chain if you will need + * it. + **/ +void mca_nmi_hook(void) +{ + /* + * If I recall correctly, there's a whole bunch of other things that + * we can do to check for NMI problems, but that's all I know about + * at the moment. + */ + pr_warning("NMI generated from unknown source!\n"); +} +#endif + +static __init int no_ipi_broadcast(char *str) +{ + get_option(&str, &no_broadcast); + pr_info("Using %s mode\n", + no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); + return 1; +} +__setup("no_ipi_broadcast=", no_ipi_broadcast); + +static int __init print_ipi_mode(void) +{ + pr_info("Using IPI %s mode\n", + no_broadcast ? "No-Shortcut" : "Shortcut"); + return 0; +} + +late_initcall(print_ipi_mode); + diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c deleted file mode 100644 index b65ff0bf730..00000000000 --- a/arch/x86/mach-default/setup.c +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Machine specific setup for generic - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#ifdef CONFIG_HOTPLUG_CPU -#define DEFAULT_SEND_IPI (1) -#else -#define DEFAULT_SEND_IPI (0) -#endif - -int no_broadcast = DEFAULT_SEND_IPI; - -/** - * pre_intr_init_hook - initialisation prior to setting up interrupt vectors - * - * Description: - * Perform any necessary interrupt initialisation prior to setting up - * the "ordinary" interrupt call gates. For legacy reasons, the ISA - * interrupts should be initialised here if the machine emulates a PC - * in any way. - **/ -void __init pre_intr_init_hook(void) -{ - if (x86_quirks->arch_pre_intr_init) { - if (x86_quirks->arch_pre_intr_init()) - return; - } - init_ISA_irqs(); -} - -/** - * intr_init_hook - post gate setup interrupt initialisation - * - * Description: - * Fill in any interrupts that may have been left out by the general - * init_IRQ() routine. interrupts having to do with the machine rather - * than the devices on the I/O bus (like APIC interrupts in intel MP - * systems) are started here. - **/ -void __init intr_init_hook(void) -{ - if (x86_quirks->arch_intr_init) { - if (x86_quirks->arch_intr_init()) - return; - } -} - -/** - * pre_setup_arch_hook - hook called prior to any setup_arch() execution - * - * Description: - * generally used to activate any machine specific identification - * routines that may be needed before setup_arch() runs. On Voyager - * this is used to get the board revision and type. - **/ -void __init pre_setup_arch_hook(void) -{ -} - -/** - * trap_init_hook - initialise system specific traps - * - * Description: - * Called as the final act of trap_init(). Used in VISWS to initialise - * the various board specific APIC traps. - **/ -void __init trap_init_hook(void) -{ - if (x86_quirks->arch_trap_init) { - if (x86_quirks->arch_trap_init()) - return; - } -} - -static struct irqaction irq0 = { - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, - .mask = CPU_MASK_NONE, - .name = "timer" -}; - -/** - * pre_time_init_hook - do any specific initialisations before. - * - **/ -void __init pre_time_init_hook(void) -{ - if (x86_quirks->arch_pre_time_init) - x86_quirks->arch_pre_time_init(); -} - -/** - * time_init_hook - do any specific initialisations for the system timer. - * - * Description: - * Must plug the system timer interrupt source at HZ into the IRQ listed - * in irq_vectors.h:TIMER_IRQ - **/ -void __init time_init_hook(void) -{ - if (x86_quirks->arch_time_init) { - /* - * A nonzero return code does not mean failure, it means - * that the architecture quirk does not want any - * generic (timer) setup to be performed after this: - */ - if (x86_quirks->arch_time_init()) - return; - } - - irq0.mask = cpumask_of_cpu(0); - setup_irq(0, &irq0); -} - -#ifdef CONFIG_MCA -/** - * mca_nmi_hook - hook into MCA specific NMI chain - * - * Description: - * The MCA (Microchannel Architecture) has an NMI chain for NMI sources - * along the MCA bus. Use this to hook into that chain if you will need - * it. - **/ -void mca_nmi_hook(void) -{ - /* - * If I recall correctly, there's a whole bunch of other things that - * we can do to check for NMI problems, but that's all I know about - * at the moment. - */ - pr_warning("NMI generated from unknown source!\n"); -} -#endif - -static __init int no_ipi_broadcast(char *str) -{ - get_option(&str, &no_broadcast); - pr_info("Using %s mode\n", - no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); - return 1; -} -__setup("no_ipi_broadcast=", no_ipi_broadcast); - -static int __init print_ipi_mode(void) -{ - pr_info("Using IPI %s mode\n", - no_broadcast ? "No-Shortcut" : "Shortcut"); - return 0; -} - -late_initcall(print_ipi_mode); - diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c deleted file mode 100644 index d9d44c8c3db..00000000000 --- a/arch/x86/mach-generic/default.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Default generic APIC driver. This handles up to 8 CPUs. - */ -#define APIC_DEFINITION 1 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - /* - * Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; -} - -/* should be called last. */ -static int probe_default(void) -{ - return 1; -} - -struct genapic apic_default = { - - .name = "default", - .probe = probe_default, - .acpi_madt_oem_check = NULL, - .apic_id_registered = default_apic_id_registered, - - .irq_delivery_mode = dest_LowestPrio, - /* logical delivery broadcast to all CPUs: */ - .irq_dest_mode = 1, - - .target_cpus = default_target_cpus, - .disable_esr = 0, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = default_check_apicid_used, - .check_apicid_present = default_check_apicid_present, - - .vector_allocation_domain = default_vector_allocation_domain, - .init_apic_ldr = default_init_apic_ldr, - - .ioapic_phys_id_map = default_ioapic_phys_id_map, - .setup_apic_routing = default_setup_apic_routing, - .multi_timer_check = NULL, - .apicid_to_node = default_apicid_to_node, - .cpu_to_logical_apicid = default_cpu_to_logical_apicid, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = default_apicid_to_cpu_present, - .setup_portio_remap = NULL, - .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = default_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = default_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0x0F << 24, - - .cpu_mask_to_apicid = default_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, - - .send_IPI_mask = default_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = default_send_IPI_allbutself, - .send_IPI_all = default_send_IPI_all, - .send_IPI_self = NULL, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - - .wait_for_init_deassert = default_wait_for_init_deassert, - - .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, - .inquire_remote_apic = default_inquire_remote_apic, -}; -- cgit v1.2.3 From e7c64981949ec983ee41c2927c036fdb00d8e68b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:54:17 +0100 Subject: x86/Voyager: clean up BROKEN Kconfig reference CONFIG_BROKEN has been removed from the upstream kernel years ago, but X86_VOYAGER still had a stale reference to it - remove it. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d6218e6c982..45c7bdbc156 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -303,7 +303,7 @@ config X86_ELAN config X86_VOYAGER bool "Voyager (NCR)" - depends on X86_32 && (SMP || BROKEN) && !PCI + depends on X86_32 && SMP && !PCI help Voyager is an MCA-based 32-way capable SMP architecture proprietary to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. -- cgit v1.2.3 From 61b8172e57c4b8db1fcb7f5fd8dfda85ffd8e052 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 28 Jan 2009 19:55:34 +0100 Subject: x86: disable Voyager temporarily x86/Voyager does not build right now and it's unclear whether it will be cleaned up and ported to the subarch-less 32-bit x86 code - so disable it for now. If it's fixed we'll re-enable it - or remove it after some time. There's a very low number of systems running development kernels on x86/Voyager currently. (one or two on the whole planet) Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 45c7bdbc156..f983f4055b1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -303,7 +303,7 @@ config X86_ELAN config X86_VOYAGER bool "Voyager (NCR)" - depends on X86_32 && SMP && !PCI + depends on X86_32 && SMP && !PCI && BROKEN help Voyager is an MCA-based 32-way capable SMP architecture proprietary to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. -- cgit v1.2.3 From 72ee6ebbb3fe1ac23d1a669b177b858d2028bf09 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 16:57:49 +0100 Subject: x86/Voyager: remove MCA Kconfig quirk Remove Voyager Kconfig quirk: just like any other hardware platform users of Voyager systems can configure in the hardware drivers they need. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f983f4055b1..ae37a7504b5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1873,8 +1873,7 @@ config EISA source "drivers/eisa/Kconfig" config MCA - bool "MCA support" if !X86_VOYAGER - default y if X86_VOYAGER + bool "MCA support" help MicroChannel Architecture is found in some IBM PS/2 machines and laptops. It is a bus system similar to PCI or ISA. See -- cgit v1.2.3 From 07ef83ae9e99377f38f4d0e472ba6ff09324e5e9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 16:59:11 +0100 Subject: x86/Voyager: remove NATSEMI Kconfig quirk x86/Voyager has this quirk for SCx200 support: config SCx200 tristate "NatSemi SCx200 support" depends on !X86_VOYAGER Remove it - Voyager users can disable drivers they dont need. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ae37a7504b5..9727dd59f54 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1884,7 +1884,6 @@ source "drivers/mca/Kconfig" config SCx200 tristate "NatSemi SCx200 support" - depends on !X86_VOYAGER help This provides basic support for National Semiconductor's (now AMD's) Geode processors. The driver probes for the -- cgit v1.2.3 From e0ec9483dbe8f534e0f3ef413f9eba9a5ff78050 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:01:14 +0100 Subject: x86/Voyager: remove KVM Kconfig quirk Voyager and other subarchitectures have this Kconfig quirk: select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) This is unnecessary, as KVM cleanly detects based on CPUID capabilities. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9727dd59f54..a783fe7759c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -34,7 +34,7 @@ config X86 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST - select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) + select HAVE_KVM select HAVE_ARCH_KGDB if !X86_VOYAGER select HAVE_ARCH_TRACEHOOK select HAVE_GENERIC_DMA_COHERENT if X86_32 -- cgit v1.2.3 From 49793b0341a802cf5ee4179e837a2eb20f12c9fe Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:02:29 +0100 Subject: x86/Voyager: remove KGDB Kconfig quirk x86/Voyager has this KGDB quirk: select HAVE_ARCH_KGDB if !X86_VOYAGER This is completely pointless - there's nothing in KGDB that cannot work on Voyager. Remove it. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a783fe7759c..12b433aaaa6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -35,7 +35,7 @@ config X86 select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KVM - select HAVE_ARCH_KGDB if !X86_VOYAGER + select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK select HAVE_GENERIC_DMA_COHERENT if X86_32 select HAVE_EFFICIENT_UNALIGNED_ACCESS -- cgit v1.2.3 From aced3cee555f0b2fd58501e9b8a8a1295011e134 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:03:24 +0100 Subject: x86/Voyager: remove HIBERNATION Kconfig quirk Voyager has this hibernation quirk: config ARCH_HIBERNATION_POSSIBLE def_bool y depends on !SMP || !X86_VOYAGER Hibernation is a generic facility provided on all x86 platforms. If it is buggy on Voyager then that bug should be fixed - not worked around. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 12b433aaaa6..df952e788a6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -140,7 +140,7 @@ config HAVE_CPUMASK_OF_CPU_MAP config ARCH_HIBERNATION_POSSIBLE def_bool y - depends on !SMP || !X86_VOYAGER + depends on !SMP config ARCH_SUSPEND_POSSIBLE def_bool y -- cgit v1.2.3 From f2fc0e3071230bb9ea9f64a08c3e619ad1357cfb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:04:39 +0100 Subject: x86/Voyager: remove ARCH_SUSPEND_POSSIBLE Kconfig quirk Voyager has this Kconfig quirk for suspend/resume: config ARCH_SUSPEND_POSSIBLE def_bool y depends on !X86_VOYAGER The proper mechanism to not suspend on a piece of hardware to disable CONFIG_SUSPEND. Remove the quirk. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index df952e788a6..8e6413e0e7c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -144,7 +144,6 @@ config ARCH_HIBERNATION_POSSIBLE config ARCH_SUSPEND_POSSIBLE def_bool y - depends on !X86_VOYAGER config ZONE_DMA32 bool -- cgit v1.2.3 From 3e5095d15276efd14a45393666b1bb7536bf179f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:07:08 +0100 Subject: x86: replace CONFIG_X86_SMP with CONFIG_SMP The x86/Voyager subarch used to have this distinction between 'x86 SMP support' and 'Voyager SMP support': config X86_SMP bool depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) This is a pointless distinction - Voyager can (and already does) use smp_ops to implement various SMP quirks it has - and it can be extended more to cover all the specialities of Voyager. So remove this complication in the Kconfig space. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 7 +------ arch/x86/Kconfig.debug | 2 +- arch/x86/include/asm/entry_arch.h | 2 +- arch/x86/include/asm/hw_irq.h | 2 +- arch/x86/kernel/Makefile | 4 ++-- arch/x86/kernel/apic.c | 2 +- arch/x86/kernel/cpu/addon_cpuid_features.c | 2 +- arch/x86/kernel/process.c | 2 +- arch/x86/kernel/setup.c | 2 +- arch/x86/kernel/tsc.c | 2 +- arch/x86/kernel/vmiclock_32.c | 2 +- arch/x86/mm/Makefile | 2 +- 12 files changed, 13 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8e6413e0e7c..3671506fb8d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -173,11 +173,6 @@ config GENERIC_PENDING_IRQ depends on GENERIC_HARDIRQS && SMP default y -config X86_SMP - bool - depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) - default y - config USE_GENERIC_SMP_HELPERS def_bool y depends on SMP @@ -203,7 +198,7 @@ config X86_BIOS_REBOOT config X86_TRAMPOLINE bool - depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP) + depends on SMP || (64BIT && ACPI_SLEEP) default y config KTIME_SCALAR diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 28f111461ca..a38dd6064f1 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -83,7 +83,7 @@ config DEBUG_PAGEALLOC config DEBUG_PER_CPU_MAPS bool "Debug access to per_cpu maps" depends on DEBUG_KERNEL - depends on X86_SMP + depends on SMP default n help Say Y to verify that the per_cpu map being accessed has diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index b87b077cc23..854d538ae85 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -9,7 +9,7 @@ * is no hardware IRQ pin equivalent for them, they are triggered * through the ICC by us (IPIs) */ -#ifdef CONFIG_X86_SMP +#ifdef CONFIG_SMP BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index bfa921fad13..41550797396 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -98,7 +98,7 @@ extern asmlinkage void qic_call_function_interrupt(void); extern void smp_apic_timer_interrupt(struct pt_regs *); extern void smp_spurious_interrupt(struct pt_regs *); extern void smp_error_interrupt(struct pt_regs *); -#ifdef CONFIG_X86_SMP +#ifdef CONFIG_SMP extern void smp_reschedule_interrupt(struct pt_regs *); extern void smp_call_function_interrupt(struct pt_regs *); extern void smp_call_function_single_interrupt(struct pt_regs *); diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 1bc4cdc797d..dc05a57a474 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -57,8 +57,8 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o -obj-$(CONFIG_X86_SMP) += smp.o -obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o ipi.o obj-$(CONFIG_SMP) += setup_percpu.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 81efe86eca8..968c817762a 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1900,7 +1900,7 @@ void __cpuinit generic_processor_info(int apicid, int version) } #endif -#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64) +#if defined(CONFIG_SMP) || defined(CONFIG_X86_64) early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; #endif diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 4a48bb40974..e48640cfac0 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) */ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) { -#ifdef CONFIG_X86_SMP +#ifdef CONFIG_SMP unsigned int eax, ebx, ecx, edx, sub_index; unsigned int ht_mask_width, core_plus_mask_width; unsigned int core_select_mask, core_level_siblings; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e68bb9e3086..89537f678b2 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -344,7 +344,7 @@ static void c1e_idle(void) void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) { -#ifdef CONFIG_X86_SMP +#ifdef CONFIG_SMP if (pm_idle == poll_idle && smp_num_siblings > 1) { printk(KERN_WARNING "WARNING: polling idle and HT enabled," " performance may degrade.\n"); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index e645d4793e4..eeb180b1d7a 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -588,7 +588,7 @@ early_param("elfcorehdr", setup_elfcorehdr); static int __init default_update_genapic(void) { -#ifdef CONFIG_X86_SMP +#ifdef CONFIG_SMP if (!apic->wakeup_cpu) apic->wakeup_cpu = wakeup_secondary_cpu_via_init; #endif diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 599e5816863..83d53ce5d4c 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void) if (!cpu_has_tsc || tsc_unstable) return 1; -#ifdef CONFIG_X86_SMP +#ifdef CONFIG_SMP if (apic_is_clustered_box()) return 1; #endif diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index c4c1f9e0940..a4791ef412d 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -256,7 +256,7 @@ void __devinit vmi_time_bsp_init(void) */ clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); local_irq_disable(); -#ifdef CONFIG_X86_SMP +#ifdef CONFIG_SMP /* * XXX handle_percpu_irq only defined for SMP; we need to switch over * to using it, since this is a local interrupt, which each CPU must diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 9f05157220f..2b938a38491 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,7 +1,7 @@ obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ pat.o pgtable.o gup.o -obj-$(CONFIG_X86_SMP) += tlb.o +obj-$(CONFIG_SMP) += tlb.o obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o -- cgit v1.2.3 From c0b5842a457d44c8788b3fd0c64969be7ef673cd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:13:05 +0100 Subject: x86: generalize boot_cpu_id x86/Voyager can boot on non-zero processors. While that can probably be fixed by properly remapping the physical CPU IDs, keep boot_cpu_id for now for easier transition - and expand it to all of x86. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 4 ---- arch/x86/include/asm/cpu.h | 6 +----- arch/x86/kernel/setup.c | 14 ++++++++++++++ arch/x86/kernel/smpboot.c | 12 ------------ 4 files changed, 15 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3671506fb8d..f4dd851384d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -238,10 +238,6 @@ config SMP If you don't know what to do here, say N. -config X86_HAS_BOOT_CPU_ID - def_bool y - depends on X86_VOYAGER - config SPARSE_IRQ bool "Support sparse irq numbering" depends on PCI_MSI || HT_IRQ diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index f03b23e3286..b185091bf19 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -32,10 +32,6 @@ extern void arch_unregister_cpu(int); DECLARE_PER_CPU(int, cpu_state); -#ifdef CONFIG_X86_HAS_BOOT_CPU_ID -extern unsigned char boot_cpu_id; -#else -#define boot_cpu_id 0 -#endif +extern unsigned int boot_cpu_id; #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index eeb180b1d7a..609e5af6028 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -112,6 +112,20 @@ #define ARCH_SETUP #endif +unsigned int boot_cpu_id __read_mostly; + +#ifdef CONFIG_X86_64 +int default_cpu_present_to_apicid(int mps_cpu) +{ + return __default_cpu_present_to_apicid(mps_cpu); +} + +int default_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return __default_check_phys_apicid_present(boot_cpu_physical_apicid); +} +#endif + #ifndef CONFIG_DEBUG_BOOT_PARAMS struct boot_params __initdata boot_params; #else diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e90b3e50b54..bc7e220ba0b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -905,18 +905,6 @@ do_rest: return boot_error; } -#ifdef CONFIG_X86_64 -int default_cpu_present_to_apicid(int mps_cpu) -{ - return __default_cpu_present_to_apicid(mps_cpu); -} - -int default_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return __default_check_phys_apicid_present(boot_cpu_physical_apicid); -} -#endif - int __cpuinit native_cpu_up(unsigned int cpu) { int apicid = apic->cpu_present_to_apicid(cpu); -- cgit v1.2.3 From 23394d1c9346d9c0aabbc1b6fca52a9c0aa1c297 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:16:16 +0100 Subject: x86/Voyager: remove X86_HT Kconfig quirk Voyager has this Kconfig quirk: depends on (X86_32 && !X86_VOYAGER) || X86_64 That is unnecessary as HT support is CPUID driven and explicitly enumerated. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f4dd851384d..1d50c9de4d2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -188,7 +188,6 @@ config X86_64_SMP config X86_HT bool depends on SMP - depends on (X86_32 && !X86_VOYAGER) || X86_64 default y config X86_BIOS_REBOOT -- cgit v1.2.3 From f095df0a0cb35a52605541f619d038339b90d7cc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:17:55 +0100 Subject: x86/Voyager: remove X86_BIOS_REBOOT Kconfig quirk Voyager has this Kconfig quirk: config X86_BIOS_REBOOT bool depends on !X86_VOYAGER default y Voyager should use the existing machine_ops.emergency_restart reboot quirk mechanism instead of a build-time quirk. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 6 ------ arch/x86/include/asm/proto.h | 4 ---- arch/x86/kernel/Makefile | 2 +- 3 files changed, 1 insertion(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1d50c9de4d2..c0d79ab366d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -190,11 +190,6 @@ config X86_HT depends on SMP default y -config X86_BIOS_REBOOT - bool - depends on !X86_VOYAGER - default y - config X86_TRAMPOLINE bool depends on SMP || (64BIT && ACPI_SLEEP) @@ -1361,7 +1356,6 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" - depends on X86_BIOS_REBOOT help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index d6a22f92ba7..49fb3ecf3bb 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void); extern void check_efer(void); -#ifdef CONFIG_X86_BIOS_REBOOT extern int reboot_force; -#else -static const int reboot_force = 0; -#endif long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index dc05a57a474..24f357e7557 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -50,7 +50,7 @@ obj-y += step.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += cpu/ obj-y += acpi/ -obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o +obj-y += reboot.o obj-$(CONFIG_MCA) += mca_32.o obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_X86_CPUID) += cpuid.o -- cgit v1.2.3 From 550fe4f198558c147c6b8273a709568222a1668a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:28:08 +0100 Subject: x86/Voyager: remove X86_FIND_SMP_CONFIG Kconfig quirk x86/Voyager had this Kconfig quirk: config X86_FIND_SMP_CONFIG def_bool y depends on X86_MPPARSE || X86_VOYAGER Which splits off the find_smp_config() callback into a build-time quirk. Voyager should use the existing x86_quirks.mach_find_smp_config() callback to introduce SMP-config quirks. NUMAQ-32 and VISWS already use this. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 4 ---- arch/x86/include/asm/mpspec.h | 4 +++- arch/x86/kernel/setup.c | 3 +-- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c0d79ab366d..df7cb8d68e2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -254,10 +254,6 @@ config NUMA_MIGRATE_IRQ_DESC If you don't know what to do here, say N. -config X86_FIND_SMP_CONFIG - def_bool y - depends on X86_MPPARSE || X86_VOYAGER - config X86_MPPARSE bool "Enable MPS table" if ACPI default y diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 03fb0d39654..d22f732eab8 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -56,11 +56,13 @@ extern int smp_found_config; extern int mpc_default_type; extern unsigned long mp_lapic_addr; -extern void find_smp_config(void); extern void get_smp_config(void); + #ifdef CONFIG_X86_MPPARSE +extern void find_smp_config(void); extern void early_reserve_e820_mpc_new(void); #else +static inline void find_smp_config(void) { } static inline void early_reserve_e820_mpc_new(void) { } #endif diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 609e5af6028..6abce6703c5 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -905,12 +905,11 @@ void __init setup_arch(char **cmdline_p) */ acpi_reserve_bootmem(); #endif -#ifdef CONFIG_X86_FIND_SMP_CONFIG /* * Find and reserve possible boot-time SMP configuration: */ find_smp_config(); -#endif + reserve_crashkernel(); #ifdef CONFIG_X86_64 -- cgit v1.2.3 From 36619a8a80793a803588a17f772313d5c948357d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:33:34 +0100 Subject: x86/VisWS: remove Kconfig quirk VisWS has this quirk currently: config X86_VISWS bool "SGI 320/540 (Visual Workstation)" depends on X86_32 && PCI && !X86_VOYAGER && X86_MPPARSE && PCI_GODIRECT The !Voyager quirk is unnecessary. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index df7cb8d68e2..f59d2920198 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -351,7 +351,7 @@ endchoice config X86_VISWS bool "SGI 320/540 (Visual Workstation)" - depends on X86_32 && PCI && !X86_VOYAGER && X86_MPPARSE && PCI_GODIRECT + depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT help The SGI Visual Workstation series is an IA32-based workstation based on SGI systems chips with some legacy PC hardware attached. -- cgit v1.2.3 From f154f47d5180c2012bf97999e6c600d45db8af2d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:34:56 +0100 Subject: x86/Voyager: remove VMI Kconfig quirk x86/Voyager has this build-time quirk: bool "VMI Guest support" select PARAVIRT depends on X86_32 depends on !X86_VOYAGER Since VMI is auto-detected (and Voyager will be auto-detected) there's no reason for this quirk. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f59d2920198..4bd0c8febae 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -406,7 +406,6 @@ config VMI bool "VMI Guest support" select PARAVIRT depends on X86_32 - depends on !X86_VOYAGER help VMI provides a paravirtualized interface to the VMware ESX server (it could be used by other hypervisors in theory too, but is not -- cgit v1.2.3 From e084e531000a488d2d27864266c13ac824575a8b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:36:31 +0100 Subject: x86/Voyager: remove KVM_CLOCK quirk Voyager has this build-time quirk to exclude KVM_CLOCK: bool "KVM paravirtualized clock" select PARAVIRT select PARAVIRT_CLOCK depends on !X86_VOYAGER Voyager support built into a kernel image does not exclude KVM paravirt clock support - so remove this quirk. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4bd0c8febae..0bf0653969d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -416,7 +416,6 @@ config KVM_CLOCK bool "KVM paravirtualized clock" select PARAVIRT select PARAVIRT_CLOCK - depends on !X86_VOYAGER help Turning on this option will allow you to run a paravirtualized clock when running over the KVM hypervisor. Instead of relying on a PIT -- cgit v1.2.3 From 54523edd237b9e792a3b76988fde23a91d739f43 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:37:33 +0100 Subject: x86/Voyager: remove KVM_GUEST quirk Voyager has this quirk currently: config KVM_GUEST bool "KVM Guest support" select PARAVIRT depends on !X86_VOYAGER Voyager support built into a kernel image does not exclude KVM paravirt guest support - so remove this quirk. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0bf0653969d..363111f40f3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -426,7 +426,6 @@ config KVM_CLOCK config KVM_GUEST bool "KVM Guest support" select PARAVIRT - depends on !X86_VOYAGER help This option enables various optimizations for running under the KVM hypervisor. -- cgit v1.2.3 From c3e6a2042fef33b747d2ae3961f5312af801973d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:38:46 +0100 Subject: x86/Voyager: remove PARAVIRT Kconfig quirk Remove this Kconfig quirk: config PARAVIRT bool "Enable paravirtualization code" depends on !X86_VOYAGER help Voyager support built into a kernel does not preclude paravirt support. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 363111f40f3..82105349612 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -434,7 +434,6 @@ source "arch/x86/lguest/Kconfig" config PARAVIRT bool "Enable paravirtualization code" - depends on !X86_VOYAGER help This changes the kernel so it can modify itself when it is run under a hypervisor, potentially improving performance significantly -- cgit v1.2.3 From 7cd92366a593246650cc7d6198e2c7d3af8c1d8a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:40:48 +0100 Subject: x86/Voyager: remove APIC/IO-APIC Kbuild quirk The lapic/ioapic code properly auto-detects and is safe to run on CPUs that have no local APIC. (or which have their lapic turned off in the hardware) Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 82105349612..6fbb3639d7b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -642,7 +642,7 @@ source "kernel/Kconfig.preempt" config X86_UP_APIC bool "Local APIC support on uniprocessors" - depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH) + depends on X86_32 && !SMP && !X86_GENERICARCH help A local APIC (Advanced Programmable Interrupt Controller) is an integrated interrupt controller in the CPU. If you have a single-CPU @@ -667,11 +667,11 @@ config X86_UP_IOAPIC config X86_LOCAL_APIC def_bool y - depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) + depends on X86_64 || SMP || X86_GENERICARCH || X86_UP_APIC config X86_IO_APIC def_bool y - depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) + depends on X86_64 || SMP || X86_GENERICARCH || X86_UP_APIC config X86_VISWS_APIC def_bool y -- cgit v1.2.3 From e006235e5b9cfb785ecbc05551788e33f96ea0ce Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:45:17 +0100 Subject: x86/Voyager: remove MCE quirk If no MCE code is desired on Voyager hw then the solution is to turn them off in the .config - and to extend the MCE code to not initialize on Voyager. Remove the build-time quirk. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6fbb3639d7b..7cf9f0a8c8c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -703,7 +703,6 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS config X86_MCE bool "Machine Check Exception" - depends on !X86_VOYAGER ---help--- Machine Check Exception support allows the processor to notify the kernel if it detects a problem (e.g. overheating, component failure). -- cgit v1.2.3 From 4b19ed915576e8034c3653b4b10b79bde10f69fa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:47:24 +0100 Subject: x86/Voyager: remove HOTPLUG_CPU Kconfig quirk Voyager has this Kconfig quirk: config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP && HOTPLUG && !X86_VOYAGER But this exception will be moot once Voyager starts using the generic x86 code. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7cf9f0a8c8c..206645ac6f6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1470,7 +1470,7 @@ config PHYSICAL_ALIGN config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" - depends on SMP && HOTPLUG && !X86_VOYAGER + depends on SMP && HOTPLUG ---help--- Say Y here to allow turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. -- cgit v1.2.3 From 1c61d8c309a4080980474de8c6689527be180782 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:49:22 +0100 Subject: x86/Voyager: remove power management Kconfig quirk Voyager has this PM/ACPI Kconfig quirk: menu "Power management and ACPI options" depends on !X86_VOYAGER Most of the PM features are auto-detect so they should be safe to run on just about any hardware. (If not, those instances need fixing.) In any case, if a kernel is built for Voyager, the power management options can be disabled. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 206645ac6f6..f95c3b40e45 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1551,7 +1551,6 @@ config HAVE_ARCH_EARLY_PFN_TO_NID depends on NUMA menu "Power management and ACPI options" - depends on !X86_VOYAGER config ARCH_HIBERNATION_HEADER def_bool y -- cgit v1.2.3 From 1ec2dafd937c0f6fed46cbd8f6878f2c1db4a623 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 17:51:37 +0100 Subject: x86/Voyager: remove ISA quirk Voyager has this ISA quirk (because Voyager has no ISA support): config ISA bool "ISA support" depends on !X86_VOYAGER There's a ton of x86 hardware that does not support ISA, and because most ISA drivers cannot auto-detect in a safe way, the convention in the kernel has always been to not enable ISA drivers if they are not needed. Voyager users can do likewise - no need for a Kconfig quirk. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f95c3b40e45..38ed1a6c6d8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1819,7 +1819,6 @@ if X86_32 config ISA bool "ISA support" - depends on !X86_VOYAGER help Find out whether you have ISA slots on your motherboard. ISA is the name of a bus system, i.e. the way the CPU talks to the other stuff -- cgit v1.2.3 From 06ac8346af04f6a972072f6c5780ba734832ad13 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 18:11:43 +0100 Subject: x86: cleanup, introduce CONFIG_NON_STANDARD_PLATFORMS Introduce a Y/N Kconfig option for non-PC x86 platforms. Make VisWS, RDC321 and SGI/UV depend on this. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 38ed1a6c6d8..b090e5a27da 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -349,9 +349,23 @@ config X86_VSMP endchoice +config X86_NON_STANDARD + bool "Support for non-standard x86 platforms" + help + If you disable this option then the kernel will only support + standard PC platforms. (which covers the vast majority of + systems out there.) + + If you enable this option then you'll be able to select a number + of less common non-PC x86 platforms: VisWS, RDC321, SGI/UV. + + If you have one of these systems, or if you want to build a + generic distribution kernel, say Y here - otherwise say N. + config X86_VISWS bool "SGI 320/540 (Visual Workstation)" depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT + depends on X86_NON_STANDARD help The SGI Visual Workstation series is an IA32-based workstation based on SGI systems chips with some legacy PC hardware attached. @@ -364,6 +378,7 @@ config X86_VISWS config X86_RDC321X bool "RDC R-321x SoC" depends on X86_32 + depends on X86_NON_STANDARD select M486 select X86_REBOOTFIXUPS help @@ -374,6 +389,7 @@ config X86_RDC321X config X86_UV bool "SGI Ultraviolet" depends on X86_64 + depends on X86_NON_STANDARD help This option is needed in order to support SGI Ultraviolet systems. If you don't have one of these, you should say N here. -- cgit v1.2.3 From 9e111f3e167a14dd6252cff14fc7dd2ba4c650c6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 18:18:25 +0100 Subject: x86: move ELAN to the NON_STANDARD_PLATFORM section Move X86_ELAN (old, AMD based web-boxes) from the subarchitecture menu to the non-standard-platform section. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b090e5a27da..b7617e3781a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -271,16 +271,6 @@ config X86_PC help Choose this option if your computer is a standard PC or compatible. -config X86_ELAN - bool "AMD Elan" - depends on X86_32 - help - Select this for an AMD Elan processor. - - Do not use this option for K6/Athlon/Opteron processors! - - If unsure, choose "PC-compatible" instead. - config X86_VOYAGER bool "Voyager (NCR)" depends on X86_32 && SMP && !PCI && BROKEN @@ -394,6 +384,17 @@ config X86_UV This option is needed in order to support SGI Ultraviolet systems. If you don't have one of these, you should say N here. +config X86_ELAN + bool "AMD Elan" + depends on X86_32 + depends on X86_NON_STANDARD + help + Select this for an AMD Elan processor. + + Do not use this option for K6/Athlon/Opteron processors! + + If unsure, choose "PC-compatible" instead. + config SCHED_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" -- cgit v1.2.3 From f67ae5c9e52e385492b94c14376e322004701555 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 18:20:09 +0100 Subject: x86: move VOYAGER to the NON_STANDARD_PLATFORM section Move X86_ELAN (old, NCR hw platform built on Intel CPUs) from the subarchitecture menu to the non-standard-platform section. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b7617e3781a..207dc9592d5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -271,18 +271,6 @@ config X86_PC help Choose this option if your computer is a standard PC or compatible. -config X86_VOYAGER - bool "Voyager (NCR)" - depends on X86_32 && SMP && !PCI && BROKEN - help - Voyager is an MCA-based 32-way capable SMP architecture proprietary - to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. - - *** WARNING *** - - If you do not specifically know you have a Voyager based machine, - say N here, otherwise the kernel you build will not be bootable. - config X86_GENERICARCH bool "Generic architecture" depends on X86_32 @@ -395,6 +383,19 @@ config X86_ELAN If unsure, choose "PC-compatible" instead. +config X86_VOYAGER + bool "Voyager (NCR)" + depends on X86_32 && SMP && !PCI && BROKEN + depends on X86_NON_STANDARD + help + Voyager is an MCA-based 32-way capable SMP architecture proprietary + to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. + + *** WARNING *** + + If you do not specifically know you have a Voyager based machine, + say N here, otherwise the kernel you build will not be bootable. + config SCHED_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" -- cgit v1.2.3 From 9c39801763ed6e08ea8bc694c5ab936643a2b763 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 18:24:57 +0100 Subject: x86: move non-standard 32-bit platform Kconfig entries - make X86_GENERICARCH depend X86_NON_STANDARD - move X86_SUMMIT, X86_ES7000 and X86_BIGSMP out of the subarchitecture menu and under this option Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 88 +++++++++++++++++++++++++++----------------------------- 1 file changed, 43 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 207dc9592d5..2fe298297ce 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -271,51 +271,6 @@ config X86_PC help Choose this option if your computer is a standard PC or compatible. -config X86_GENERICARCH - bool "Generic architecture" - depends on X86_32 - help - This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default - subarchitectures. It is intended for a generic binary kernel. - if you select them all, kernel will probe it one by one. and will - fallback to default. - -if X86_GENERICARCH - -config X86_NUMAQ - bool "NUMAQ (IBM/Sequent)" - depends on SMP && X86_32 && PCI && X86_MPPARSE - select NUMA - help - This option is used for getting Linux to run on a NUMAQ (IBM/Sequent) - NUMA multiquad box. This changes the way that processors are - bootstrapped, and uses Clustered Logical APIC addressing mode instead - of Flat Logical. You will need a new lynxer.elf file to flash your - firmware with - send email to . - -config X86_SUMMIT - bool "Summit/EXA (IBM x440)" - depends on X86_32 && SMP - help - This option is needed for IBM systems that use the Summit/EXA chipset. - In particular, it is needed for the x440. - -config X86_ES7000 - bool "Support for Unisys ES7000 IA32 series" - depends on X86_32 && SMP - help - Support for Unisys ES7000 systems. Say 'Y' here if this kernel is - supposed to run on an IA32-based Unisys ES7000 system. - -config X86_BIGSMP - bool "Support for big SMP systems with more than 8 CPUs" - depends on X86_32 && SMP - help - This option is needed for the systems that have more than 8 CPUs - and if the system is not of any sub-arch type above. - -endif - config X86_VSMP bool "Support for ScaleMP vSMP" select PARAVIRT @@ -396,6 +351,49 @@ config X86_VOYAGER If you do not specifically know you have a Voyager based machine, say N here, otherwise the kernel you build will not be bootable. +config X86_GENERICARCH + bool "Support non-standard 32-bit SMP architectures" + depends on X86_32 && SMP + depends on X86_NON_STANDARD + help + This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default + subarchitectures. It is intended for a generic binary kernel. + if you select them all, kernel will probe it one by one. and will + fallback to default. + +config X86_NUMAQ + bool "NUMAQ (IBM/Sequent)" + depends on X86_GENERICARCH + select NUMA + select X86_MPPARSE + help + This option is used for getting Linux to run on a NUMAQ (IBM/Sequent) + NUMA multiquad box. This changes the way that processors are + bootstrapped, and uses Clustered Logical APIC addressing mode instead + of Flat Logical. You will need a new lynxer.elf file to flash your + firmware with - send email to . + +config X86_SUMMIT + bool "Summit/EXA (IBM x440)" + depends on X86_GENERICARCH + help + This option is needed for IBM systems that use the Summit/EXA chipset. + In particular, it is needed for the x440. + +config X86_ES7000 + bool "Support for Unisys ES7000 IA32 series" + depends on X86_GENERICARCH + help + Support for Unisys ES7000 systems. Say 'Y' here if this kernel is + supposed to run on an IA32-based Unisys ES7000 system. + +config X86_BIGSMP + bool "Support for big SMP systems with more than 8 CPUs" + depends on X86_GENERICARCH + help + This option is needed for the systems that have more than 8 CPUs + and if the system is not of any sub-arch type above. + config SCHED_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" -- cgit v1.2.3 From 6a48565ed6ac76f351def25cd5e9f181331065f6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 18:29:13 +0100 Subject: x86: move X86_VSMP from subarch menu Move X86_VSMP out of the subarch menu - this way it can be enabled together with standard PC support as well, in the same kernel. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2fe298297ce..ddddfb8fe09 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -271,15 +271,6 @@ config X86_PC help Choose this option if your computer is a standard PC or compatible. -config X86_VSMP - bool "Support for ScaleMP vSMP" - select PARAVIRT - depends on X86_64 && PCI - help - Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is - supposed to run on these EM64T-based machines. Only choose this option - if you have one of these machines. - endchoice config X86_NON_STANDARD @@ -327,6 +318,16 @@ config X86_UV This option is needed in order to support SGI Ultraviolet systems. If you don't have one of these, you should say N here. +config X86_VSMP + bool "Support for ScaleMP vSMP" + select PARAVIRT + depends on X86_64 && PCI + depends on X86_NON_STANDARD + help + Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is + supposed to run on these EM64T-based machines. Only choose this option + if you have one of these machines. + config X86_ELAN bool "AMD Elan" depends on X86_32 -- cgit v1.2.3 From e2c75d9f54334646b3dcdf1fea0d1afe7bfbf644 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 18:31:41 +0100 Subject: x86: remove the subarch menu Remove the subarch menu and standardize on X86_PC. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ddddfb8fe09..4773f1c54fb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -262,16 +262,8 @@ config X86_MPPARSE For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it -choice - prompt "Subarchitecture Type" - default X86_PC - config X86_PC - bool "PC-compatible" - help - Choose this option if your computer is a standard PC or compatible. - -endchoice + def_bool y config X86_NON_STANDARD bool "Support for non-standard x86 platforms" -- cgit v1.2.3 From e0c7ae376a13fd79a4dad8becab51040d13dfa90 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 18:43:09 +0100 Subject: x86: rename X86_GENERICARCH to X86_32_NON_STANDARD X86_GENERICARCH is a misnomer - it contains non-PC 32-bit architectures that are not included in the default build. Rename it to X86_32_NON_STANDARD. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 22 +++++++++++----------- arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/mpparse.c | 2 +- arch/x86/kernel/setup.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4773f1c54fb..1427cb1ccd9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -344,7 +344,7 @@ config X86_VOYAGER If you do not specifically know you have a Voyager based machine, say N here, otherwise the kernel you build will not be bootable. -config X86_GENERICARCH +config X86_32_NON_STANDARD bool "Support non-standard 32-bit SMP architectures" depends on X86_32 && SMP depends on X86_NON_STANDARD @@ -356,7 +356,7 @@ config X86_GENERICARCH config X86_NUMAQ bool "NUMAQ (IBM/Sequent)" - depends on X86_GENERICARCH + depends on X86_32_NON_STANDARD select NUMA select X86_MPPARSE help @@ -368,21 +368,21 @@ config X86_NUMAQ config X86_SUMMIT bool "Summit/EXA (IBM x440)" - depends on X86_GENERICARCH + depends on X86_32_NON_STANDARD help This option is needed for IBM systems that use the Summit/EXA chipset. In particular, it is needed for the x440. config X86_ES7000 bool "Support for Unisys ES7000 IA32 series" - depends on X86_GENERICARCH + depends on X86_32_NON_STANDARD help Support for Unisys ES7000 systems. Say 'Y' here if this kernel is supposed to run on an IA32-based Unisys ES7000 system. config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" - depends on X86_GENERICARCH + depends on X86_32_NON_STANDARD help This option is needed for the systems that have more than 8 CPUs and if the system is not of any sub-arch type above. @@ -475,11 +475,11 @@ config MEMTEST config X86_SUMMIT_NUMA def_bool y - depends on X86_32 && NUMA && X86_GENERICARCH + depends on X86_32 && NUMA && X86_32_NON_STANDARD config X86_CYCLONE_TIMER def_bool y - depends on X86_GENERICARCH + depends on X86_32_NON_STANDARD source "arch/x86/Kconfig.cpu" @@ -651,7 +651,7 @@ source "kernel/Kconfig.preempt" config X86_UP_APIC bool "Local APIC support on uniprocessors" - depends on X86_32 && !SMP && !X86_GENERICARCH + depends on X86_32 && !SMP && !X86_32_NON_STANDARD help A local APIC (Advanced Programmable Interrupt Controller) is an integrated interrupt controller in the CPU. If you have a single-CPU @@ -676,11 +676,11 @@ config X86_UP_IOAPIC config X86_LOCAL_APIC def_bool y - depends on X86_64 || SMP || X86_GENERICARCH || X86_UP_APIC + depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC config X86_IO_APIC def_bool y - depends on X86_64 || SMP || X86_GENERICARCH || X86_UP_APIC + depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC config X86_VISWS_APIC def_bool y @@ -1122,7 +1122,7 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SPARSEMEM_ENABLE def_bool y - depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH + depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_32_NON_STANDARD select SPARSEMEM_STATIC if X86_32 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index cb8b52785e3..7352c60f29d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1335,7 +1335,7 @@ static void __init acpi_process_madt(void) if (!error) { acpi_lapic = 1; -#ifdef CONFIG_X86_GENERICARCH +#ifdef CONFIG_X86_32_NON_STANDARD generic_bigsmp_probe(); #endif /* diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 94fe71029c3..89aaced51bd 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -372,7 +372,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) (*x86_quirks->mpc_record)++; } -#ifdef CONFIG_X86_GENERICARCH +#ifdef CONFIG_X86_32_NON_STANDARD generic_bigsmp_probe(); #endif diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6abce6703c5..f64e1a487c9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -936,7 +936,7 @@ void __init setup_arch(char **cmdline_p) map_vsyscall(); #endif -#ifdef CONFIG_X86_GENERICARCH +#ifdef CONFIG_X86_32_NON_STANDARD generic_apic_probe(); #endif diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index bc7e220ba0b..fc80bc18943 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1007,7 +1007,7 @@ static int __init smp_sanity_check(unsigned max_cpus) printk(KERN_WARNING "More than 8 CPUs detected - skipping them.\n" - "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); + "Use CONFIG_X86_32_NON_STANDARD and CONFIG_X86_BIGSMP.\n"); nr = 0; for_each_present_cpu(cpu) { -- cgit v1.2.3 From 3769e7b4d8ef113e08221a210f849ba57475aff5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Jan 2009 18:46:23 +0100 Subject: x86/Voyager: move to the X86_32_NON_STANDARD code section Make Voyager depend on X86_32_NON_STANDARD - it is a non-standard 32-bit SMP architecture. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1427cb1ccd9..5bf0e0c5828 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -331,19 +331,6 @@ config X86_ELAN If unsure, choose "PC-compatible" instead. -config X86_VOYAGER - bool "Voyager (NCR)" - depends on X86_32 && SMP && !PCI && BROKEN - depends on X86_NON_STANDARD - help - Voyager is an MCA-based 32-way capable SMP architecture proprietary - to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. - - *** WARNING *** - - If you do not specifically know you have a Voyager based machine, - say N here, otherwise the kernel you build will not be bootable. - config X86_32_NON_STANDARD bool "Support non-standard 32-bit SMP architectures" depends on X86_32 && SMP @@ -354,6 +341,13 @@ config X86_32_NON_STANDARD if you select them all, kernel will probe it one by one. and will fallback to default. +config X86_BIGSMP + bool "Support for big SMP systems with more than 8 CPUs" + depends on X86_32_NON_STANDARD + help + This option is needed for the systems that have more than 8 CPUs + and if the system is not of any sub-arch type above. + config X86_NUMAQ bool "NUMAQ (IBM/Sequent)" depends on X86_32_NON_STANDARD @@ -380,12 +374,18 @@ config X86_ES7000 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is supposed to run on an IA32-based Unisys ES7000 system. -config X86_BIGSMP - bool "Support for big SMP systems with more than 8 CPUs" +config X86_VOYAGER + bool "Voyager (NCR)" + depends on SMP && !PCI && BROKEN depends on X86_32_NON_STANDARD help - This option is needed for the systems that have more than 8 CPUs - and if the system is not of any sub-arch type above. + Voyager is an MCA-based 32-way capable SMP architecture proprietary + to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. + + *** WARNING *** + + If you do not specifically know you have a Voyager based machine, + say N here, otherwise the kernel you build will not be bootable. config SCHED_OMIT_FRAME_POINTER def_bool y -- cgit v1.2.3 From 0a1e8869f453ceda121a1ff8d6c4380832377be7 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 28 Jan 2009 23:21:25 +0300 Subject: x86: trampoline_64.S - use predefined constants with simplification Impact: cleanup We may use macros from processor-flags.h instead of hardcoding bits. Actually it's not direct mapping of old instructions with new ones -- BTS does change CF flag while MOV does not. But i didn't find any dependency on CF in this code. Signed-off-by: Cyrill Gorcunov Acked-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/kernel/trampoline_64.S | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 894293c598d..95a012a4664 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -29,6 +29,7 @@ #include #include #include +#include .section .rodata, "a", @progbits @@ -37,7 +38,7 @@ ENTRY(trampoline_data) r_base = . cli # We should be safe anyway - wbinvd + wbinvd mov %cs, %ax # Code and data in the same place mov %ax, %ds mov %ax, %es @@ -73,9 +74,8 @@ r_base = . lidtl tidt - r_base # load idt with 0, 0 lgdtl tgdt - r_base # load gdt with whatever is appropriate - xor %ax, %ax - inc %ax # protected mode (PE) bit - lmsw %ax # into protected mode + mov $X86_CR0_PE, %ax # protected mode (PE) bit + lmsw %ax # into protected mode # flush prefetch and jump to startup_32 ljmpl *(startup_32_vector - r_base) @@ -86,9 +86,8 @@ startup_32: movl $__KERNEL_DS, %eax # Initialize the %ds segment register movl %eax, %ds - xorl %eax, %eax - btsl $5, %eax # Enable PAE mode - movl %eax, %cr4 + movl $X86_CR4_PAE, %eax + movl %eax, %cr4 # Enable PAE mode # Setup trampoline 4 level pagetables leal (trampoline_level4_pgt - r_base)(%esi), %eax @@ -99,9 +98,9 @@ startup_32: xorl %edx, %edx wrmsr - xorl %eax, %eax - btsl $31, %eax # Enable paging and in turn activate Long Mode - btsl $0, %eax # Enable protected mode + # Enable paging and in turn activate Long Mode + # Enable protected mode + movl $(X86_CR0_PG | X86_CR0_PE), %eax movl %eax, %cr0 /* -- cgit v1.2.3 From 010060741ad35eacb504414bc6fb9bb575b15f62 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 29 Jan 2009 16:02:12 +0100 Subject: x86: add might_sleep() to do_page_fault() Impact: widen debug checks VirtualBox calls do_page_fault() from an atomic context but runs into a might_sleep() way pas this point, cure that. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8f4b859a04b..eb4d7fe0593 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -888,6 +888,12 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) return; } down_read(&mm->mmap_sem); + } else { + /* + * The above down_read_trylock() might have succeeded in which + * case we'll have missed the might_sleep() from down_read(). + */ + might_sleep(); } vma = find_vma(mm, address); -- cgit v1.2.3 From fbeb2ca0224182033f196cf8f63989c3e6b90aba Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 29 Jan 2009 12:11:08 -0800 Subject: x86: unify genapic code, unify subarchitectures, remove old subarchitecture code, xapic fix xapic fix for 32bit platform with less than 8 cpu's. Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/kernel/probe_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index 61339a0d55c..b5db26f8e06 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -113,7 +113,7 @@ struct genapic apic_default = { .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = default_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, + .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself, .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = NULL, -- cgit v1.2.3 From 019a1369667c3978f9644982ebe6d261dd2bbc40 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Thu, 29 Jan 2009 11:49:18 -0800 Subject: x86: uaccess: fix compilation error on CONFIG_M386 In case of !CONFIG_X86_WP_WORKS_OK, __put_user_size_ex() is not defined. Add macros for !CONFIG_X86_WP_WORKS_OK case. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess.h | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 0ec6de4bcb0..b9a24155f7a 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -525,8 +525,6 @@ struct __large_struct { unsigned long buf[100]; }; */ #define get_user_try uaccess_try #define get_user_catch(err) uaccess_catch(err) -#define put_user_try uaccess_try -#define put_user_catch(err) uaccess_catch(err) #define get_user_ex(x, ptr) do { \ unsigned long __gue_val; \ @@ -534,9 +532,29 @@ struct __large_struct { unsigned long buf[100]; }; (x) = (__force __typeof__(*(ptr)))__gue_val; \ } while (0) +#ifdef CONFIG_X86_WP_WORKS_OK + +#define put_user_try uaccess_try +#define put_user_catch(err) uaccess_catch(err) + #define put_user_ex(x, ptr) \ __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#else /* !CONFIG_X86_WP_WORKS_OK */ + +#define put_user_try do { \ + int __uaccess_err = 0; + +#define put_user_catch(err) \ + (err) |= __uaccess_err; \ +} while (0) + +#define put_user_ex(x, ptr) do { \ + __uaccess_err |= __put_user(x, ptr); \ +} while (0) + +#endif /* CONFIG_X86_WP_WORKS_OK */ + /* * movsl can be slow when source and dest are not both 8-byte aligned */ -- cgit v1.2.3 From 4272ebfbefd0db40073f3ee5990bceaf2894f08b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 29 Jan 2009 15:14:46 -0800 Subject: x86: allow more than 8 cpus to be used on 32-bit X86_PC is the only remaining 'sub' architecture, so we dont need it anymore. This also cleans up a few spurious references to X86_PC in the driver space - those certainly should be X86. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 6 +----- arch/x86/configs/i386_defconfig | 1 - arch/x86/configs/x86_64_defconfig | 1 - arch/x86/kernel/smpboot.c | 2 +- 4 files changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5bf0e0c5828..afaf2cb7c1a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -262,9 +262,6 @@ config X86_MPPARSE For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it -config X86_PC - def_bool y - config X86_NON_STANDARD bool "Support for non-standard x86 platforms" help @@ -1019,7 +1016,6 @@ config NUMA bool "Numa Memory Allocation and Scheduler Support" depends on SMP depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) - default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) help Enable NUMA (Non Uniform Memory Access) support. @@ -1122,7 +1118,7 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SPARSEMEM_ENABLE def_bool y - depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_32_NON_STANDARD + depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD select SPARSEMEM_STATIC if X86_32 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index edba00d98ac..739bce993b5 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -188,7 +188,6 @@ CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_SMP=y CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y -CONFIG_X86_PC=y # CONFIG_X86_ELAN is not set # CONFIG_X86_VOYAGER is not set # CONFIG_X86_GENERICARCH is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 322dd2748fc..02b514e8f4c 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -187,7 +187,6 @@ CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_SMP=y CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y -CONFIG_X86_PC=y # CONFIG_X86_ELAN is not set # CONFIG_X86_VOYAGER is not set # CONFIG_X86_GENERICARCH is not set diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index fc80bc18943..2912fa3a8ef 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1000,7 +1000,7 @@ static int __init smp_sanity_check(unsigned max_cpus) { preempt_disable(); -#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) +#ifndef CONFIG_X86_BIGSMP if (def_to_bigsmp && nr_cpu_ids > 8) { unsigned int cpu; unsigned nr; -- cgit v1.2.3 From 754ef0cd65faac4840ada4362bda322d9a811fbf Mon Sep 17 00:00:00 2001 From: Yasuaki Ishimatsu Date: Wed, 28 Jan 2009 12:51:09 +0900 Subject: x86: fix debug message of CPU clock speed Impact: Fixes incorrect printk LOCAL APIC is corrected by PM-Timer, when SMI occurred while LOCAL APIC is calibrated. In this case, LOCAL APIC debug message(Boot with apic=debug) is displayed correctly, however, CPU clock speed debug message is displayed wrongly . When SMI occured on my machine, which has 1.6GHz CPU, CPU clock speed is displayed 3622.0205 MHz as follow. CPU0: Intel(R) Xeon(R) CPU 5110 @ 1.60GHz stepping 06 Using local APIC timer interrupts. calibrating APIC timer ... ... lapic delta = 3773130 ... PM timer delta = 812434 APIC calibration not consistent with PM Timer: 226ms instead of 100ms APIC delta adjusted to PM-Timer: 1662420 (3773130) ..... delta 1662420 ..... mult: 71411249 ..... calibration result: 265987 ..... CPU clock speed is 3622.0205 MHz. =====> here ..... host bus clock speed is 265.0987 MHz. This patch fixes to displaying CPU clock speed correctly as follow. CPU0: Intel(R) Xeon(R) CPU 5110 @ 1.60GHz stepping 06 Using local APIC timer interrupts. calibrating APIC timer ... ... lapic delta = 3773131 ... PM timer delta = 812434 APIC calibration not consistent with PM Timer: 226ms instead of 100ms APIC delta adjusted to PM-Timer: 1662420 (3773131) TSC delta adjusted to PM-Timer: 159592409 (362220564) ..... delta 1662420 ..... mult: 71411249 ..... calibration result: 265987 ..... CPU clock speed is 1595.0924 MHz. ..... host bus clock speed is 265.0987 MHz. Signed-off-by: Yasuaki Ishimatsu Signed-off-by: H. Peter Anvin --- arch/x86/kernel/apic.c | 47 ++++++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 4b6df2469fe..7bcd746d704 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -535,7 +535,8 @@ static void __init lapic_cal_handler(struct clock_event_device *dev) } } -static int __init calibrate_by_pmtimer(long deltapm, long *delta) +static int __init +calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) { const long pm_100ms = PMTMR_TICKS_PER_SEC / 10; const long pm_thresh = pm_100ms / 100; @@ -557,18 +558,29 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta) if (deltapm > (pm_100ms - pm_thresh) && deltapm < (pm_100ms + pm_thresh)) { apic_printk(APIC_VERBOSE, "... PM timer result ok\n"); - } else { - res = (((u64)deltapm) * mult) >> 22; - do_div(res, 1000000); - pr_warning("APIC calibration not consistent " - "with PM Timer: %ldms instead of 100ms\n", - (long)res); - /* Correct the lapic counter value */ - res = (((u64)(*delta)) * pm_100ms); + return 0; + } + + res = (((u64)deltapm) * mult) >> 22; + do_div(res, 1000000); + pr_warning("APIC calibration not consistent " + "with PM Timer: %ldms instead of 100ms\n",(long)res); + + /* Correct the lapic counter value */ + res = (((u64)(*delta)) * pm_100ms); + do_div(res, deltapm); + pr_info("APIC delta adjusted to PM-Timer: " + "%lu (%ld)\n", (unsigned long)res, *delta); + *delta = (long)res; + + /* Correct the tsc counter value */ + if (cpu_has_tsc) { + res = (((u64)(*deltatsc)) * pm_100ms); do_div(res, deltapm); - pr_info("APIC delta adjusted to PM-Timer: " - "%lu (%ld)\n", (unsigned long)res, *delta); - *delta = (long)res; + apic_printk(APIC_VERBOSE, "TSC delta adjusted to " + "PM-Timer: %lu (%ld) \n", + (unsigned long)res, *deltatsc); + *deltatsc = (long)res; } return 0; @@ -579,7 +591,7 @@ static int __init calibrate_APIC_clock(void) struct clock_event_device *levt = &__get_cpu_var(lapic_events); void (*real_handler)(struct clock_event_device *dev); unsigned long deltaj; - long delta; + long delta, deltatsc; int pm_referenced = 0; local_irq_disable(); @@ -609,9 +621,11 @@ static int __init calibrate_APIC_clock(void) delta = lapic_cal_t1 - lapic_cal_t2; apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); + deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1); + /* we trust the PM based calibration if possible */ pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, - &delta); + &delta, &deltatsc); /* Calculate the scaled math multiplication factor */ lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, @@ -629,11 +643,10 @@ static int __init calibrate_APIC_clock(void) calibration_result); if (cpu_has_tsc) { - delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1); apic_printk(APIC_VERBOSE, "..... CPU clock speed is " "%ld.%04ld MHz.\n", - (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ), - (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ)); + (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ), + (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ)); } apic_printk(APIC_VERBOSE, "..... host bus clock speed is " -- cgit v1.2.3 From 39ba5d43fc9133696240fc8b6b13e7a41fea87cd Mon Sep 17 00:00:00 2001 From: Yasuaki Ishimatsu Date: Wed, 28 Jan 2009 12:52:24 +0900 Subject: x86: unify PM-Timer messages Impact: Cleans up printk formatting When LOCAL APIC was calibrated, the debug message is displayed as follows. CPU0: Intel(R) Xeon(R) CPU 5110 @ 1.60GHz stepping 06 Using local APIC timer interrupts. calibrating APIC timer ... ... lapic delta = 3773131 ... PM timer delta = 812434 APIC calibration not consistent with PM Timer: 226ms instead of 100ms APIC delta adjusted to PM-Timer: 1662420 (3773131) TSC delta adjusted to PM-Timer: 159592409 (362220564) ..... delta 1662420 ..... mult: 71411249 ..... calibration result: 265987 ..... CPU clock speed is 1595.0924 MHz. ..... host bus clock speed is 265.0987 MHz. There are three type of PM-Timer (PM-Timer, PM Timer, and PM timer), in this message. This patch unifies those messages to PM-Timer. Signed-off-by: Yasuaki Ishimatsu Signed-off-by: H. Peter Anvin --- arch/x86/kernel/apic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 7bcd746d704..0d935d218d0 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -547,7 +547,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) return -1; #endif - apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm); + apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm); /* Check, if the PM timer is available */ if (!deltapm) @@ -557,14 +557,14 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) if (deltapm > (pm_100ms - pm_thresh) && deltapm < (pm_100ms + pm_thresh)) { - apic_printk(APIC_VERBOSE, "... PM timer result ok\n"); + apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n"); return 0; } res = (((u64)deltapm) * mult) >> 22; do_div(res, 1000000); pr_warning("APIC calibration not consistent " - "with PM Timer: %ldms instead of 100ms\n",(long)res); + "with PM-Timer: %ldms instead of 100ms\n",(long)res); /* Correct the lapic counter value */ res = (((u64)(*delta)) * pm_100ms); -- cgit v1.2.3 From 36ef4944ee8118491631e317e406f9bd15e20e97 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 29 Jan 2009 19:29:24 -0800 Subject: x86, apic unification: remove left over files Impact: cleanup remove unused files Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 138 ---------------------------------- arch/x86/include/asm/bigsmp/apicdef.h | 9 --- arch/x86/include/asm/bigsmp/ipi.h | 22 ------ arch/x86/mach-default/Makefile | 5 -- 4 files changed, 174 deletions(-) delete mode 100644 arch/x86/include/asm/bigsmp/apic.h delete mode 100644 arch/x86/include/asm/bigsmp/apicdef.h delete mode 100644 arch/x86/include/asm/bigsmp/ipi.h delete mode 100644 arch/x86/mach-default/Makefile (limited to 'arch') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h deleted file mode 100644 index ee29d66cd30..00000000000 --- a/arch/x86/include/asm/bigsmp/apic.h +++ /dev/null @@ -1,138 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) - -static inline int bigsmp_apic_id_registered(void) -{ - return 1; -} - -static inline const cpumask_t *bigsmp_target_cpus(void) -{ -#ifdef CONFIG_SMP - return &cpu_online_map; -#else - return &cpumask_of_cpu(0); -#endif -} - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -static inline unsigned long -bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} - -static inline unsigned long bigsmp_check_apicid_present(int bit) -{ - return 1; -} - -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long val, id; - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - id = xapic_phys_to_log_apicid(cpu); - val |= SET_APIC_LOGICAL_ID(id); - return val; -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void bigsmp_init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_VALUE); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); -} - -static inline void bigsmp_setup_apic_routing(void) -{ - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Physflat", nr_ioapics); -} - -static inline int bigsmp_apicid_to_node(int logical_apicid) -{ - return apicid_2_node[hard_smp_processor_id()]; -} - -static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < nr_cpu_ids) - return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); - - return BAD_APICID; -} - -static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) -{ - return physid_mask_of_physid(phys_apicid); -} - -extern u8 cpu_2_logical_apicid[]; -/* Mapping from cpu number to logical apicid */ -static inline int bigsmp_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_physical_id(cpu); -} - -static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xFFL); -} - -static inline void bigsmp_setup_portio_remap(void) -{ -} - -static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -/* As we are using single CPU as destination, pick only one CPU here */ -static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); -} - -static inline unsigned int -bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - for_each_cpu_and(cpu, cpumask, andmask) { - if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; - } - if (cpu < nr_cpu_ids) - return bigsmp_cpu_to_logical_apicid(cpu); - - return BAD_APICID; -} - -static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h deleted file mode 100644 index e58dee84757..00000000000 --- a/arch/x86/include/asm/bigsmp/apicdef.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_MACH_APICDEF_H -#define __ASM_MACH_APICDEF_H - -static inline unsigned bigsmp_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0xFF; -} - -#endif diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h deleted file mode 100644 index a91db69cda6..00000000000 --- a/arch/x86/include/asm/bigsmp/ipi.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __ASM_MACH_IPI_H -#define __ASM_MACH_IPI_H - -void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); - -static inline void default_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_sequence(mask, vector); -} - -static inline void bigsmp_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself(cpu_online_mask, vector); -} - -static inline void bigsmp_send_IPI_all(int vector) -{ - default_send_IPI_mask(cpu_online_mask, vector); -} - -#endif /* __ASM_MACH_IPI_H */ diff --git a/arch/x86/mach-default/Makefile b/arch/x86/mach-default/Makefile deleted file mode 100644 index 012fe34459e..00000000000 --- a/arch/x86/mach-default/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the linux kernel. -# - -obj-y := setup.o -- cgit v1.2.3 From 1ff2f20de354a621ef4b56b9cfe6f9139a7e493b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 29 Jan 2009 19:30:04 -0800 Subject: x86: fix compiling with 64bit with def_to_bigsmp only need to do cut off with 32bit Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2912fa3a8ef..4c3cff57494 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1000,7 +1000,7 @@ static int __init smp_sanity_check(unsigned max_cpus) { preempt_disable(); -#ifndef CONFIG_X86_BIGSMP +#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32) if (def_to_bigsmp && nr_cpu_ids > 8) { unsigned int cpu; unsigned nr; -- cgit v1.2.3 From 43f39890db2959b10891cf7bbf3f53fffc8ce3bd Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 29 Jan 2009 19:31:49 -0800 Subject: x86: seperate default_send_IPI_mask_sequence/allbutself from logical Impact: 32-bit should use logical version there are two version: for default_send_IPI_mask_sequence/allbutself one in ipi.h and one in ipi.c for 32bit it seems .h version overwrote ipi.c for a while. restore it so 32 bit could use its old logical version. also remove dupicated functions in .c Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/ipi.h | 71 ++++++++++++++++--- arch/x86/kernel/bigsmp_32.c | 8 +-- arch/x86/kernel/es7000_32.c | 4 +- arch/x86/kernel/genapic_flat_64.c | 6 +- arch/x86/kernel/ipi.c | 139 +------------------------------------- arch/x86/kernel/numaq_32.c | 8 +-- arch/x86/kernel/probe_32.c | 4 +- arch/x86/kernel/summit_32.c | 6 +- 8 files changed, 77 insertions(+), 169 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index e2e8e4e0a65..aa79945445b 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -120,7 +120,7 @@ static inline void } static inline void -default_send_IPI_mask_sequence(const struct cpumask *mask, int vector) +default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) { unsigned long query_cpu; unsigned long flags; @@ -139,7 +139,7 @@ default_send_IPI_mask_sequence(const struct cpumask *mask, int vector) } static inline void -default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) +default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector) { unsigned int this_cpu = smp_processor_id(); unsigned int query_cpu; @@ -157,23 +157,72 @@ default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) local_irq_restore(flags); } +#include + +static inline void +default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector) +{ + unsigned long flags; + unsigned int query_cpu; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicasts to each CPU instead. This + * should be modified to do 1 message per cluster ID - mbligh + */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector, + apic->dest_logical); + local_irq_restore(flags); +} + +static inline void +default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector) +{ + unsigned long flags; + unsigned int query_cpu; + unsigned int this_cpu = smp_processor_id(); + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector, + apic->dest_logical); + } + local_irq_restore(flags); +} /* Avoid include hell */ #define NMI_VECTOR 0x02 -void default_send_IPI_mask_bitmask(const struct cpumask *mask, int vector); -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); - extern int no_broadcast; -#ifdef CONFIG_X86_64 -#include -#else -static inline void default_send_IPI_mask(const struct cpumask *mask, int vector) +#ifndef CONFIG_X86_64 +/* + * This is only used on smaller machines. + */ +static inline void default_send_IPI_mask_bitmask_logical(const struct cpumask *cpumask, int vector) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + unsigned long flags; + + local_irq_save(flags); + WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); + __default_send_IPI_dest_field(mask, vector, apic->dest_logical); + local_irq_restore(flags); +} + +static inline void default_send_IPI_mask_logical(const struct cpumask *mask, int vector) { - default_send_IPI_mask_bitmask(mask, vector); + default_send_IPI_mask_bitmask_logical(mask, vector); } -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); #endif static inline void __default_local_send_IPI_allbutself(int vector) diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index b1f91931003..ab645c93a6e 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -154,17 +155,14 @@ static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) return cpuid_apic >> index_msb; } -void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); - static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) { - default_send_IPI_mask_sequence(mask, vector); + default_send_IPI_mask_sequence_phys(mask, vector); } static inline void bigsmp_send_IPI_allbutself(int vector) { - default_send_IPI_mask_allbutself(cpu_online_mask, vector); + default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } static inline void bigsmp_send_IPI_all(int vector) diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 078364ccfa0..b5b50e8b94a 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -451,12 +451,12 @@ static int es7000_check_dsdt(void) static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) { - default_send_IPI_mask_sequence(mask, vector); + default_send_IPI_mask_sequence_phys(mask, vector); } static void es7000_send_IPI_allbutself(int vector) { - default_send_IPI_mask_allbutself(cpu_online_mask, vector); + default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } static void es7000_send_IPI_all(int vector) diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 19bffb3f732..249d2d3c034 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -267,18 +267,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) { - default_send_IPI_mask_sequence(cpumask, vector); + default_send_IPI_mask_sequence_phys(cpumask, vector); } static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { - default_send_IPI_mask_allbutself(cpumask, vector); + default_send_IPI_mask_allbutself_phys(cpumask, vector); } static void physflat_send_IPI_allbutself(int vector) { - default_send_IPI_mask_allbutself(cpu_online_mask, vector); + default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } static void physflat_send_IPI_all(int vector) diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 0893fa14458..339f4f3feee 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -17,148 +17,13 @@ #include #include #include +#include #ifdef CONFIG_X86_32 -#include - -/* - * the following functions deal with sending IPIs between CPUs. - * - * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. - */ - -static inline int __prepare_ICR(unsigned int shortcut, int vector) -{ - unsigned int icr = shortcut | apic->dest_logical; - - switch (vector) { - default: - icr |= APIC_DM_FIXED | vector; - break; - case NMI_VECTOR: - icr |= APIC_DM_NMI; - break; - } - return icr; -} - -static inline int __prepare_ICR2(unsigned int mask) -{ - return SET_APIC_DEST_FIELD(mask); -} - -void __default_send_IPI_shortcut(unsigned int shortcut, int vector) -{ - /* - * Subtle. In the case of the 'never do double writes' workaround - * we have to lock out interrupts to be safe. As we don't care - * of the value read we use an atomic rmw access to avoid costly - * cli/sti. Otherwise we use an even cheaper single atomic write - * to the APIC. - */ - unsigned int cfg; - - /* - * Wait for idle. - */ - apic_wait_icr_idle(); - - /* - * No need to touch the target chip field - */ - cfg = __prepare_ICR(shortcut, vector); - - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - apic_write(APIC_ICR, cfg); -} void default_send_IPI_self(int vector) { - __default_send_IPI_shortcut(APIC_DEST_SELF, vector); -} - -/* - * This is used to send an IPI with no shorthand notation (the destination is - * specified in bits 56 to 63 of the ICR). - */ -static inline void __default_send_IPI_dest_field(unsigned long mask, int vector) -{ - unsigned long cfg; - - /* - * Wait for idle. - */ - if (unlikely(vector == NMI_VECTOR)) - safe_apic_wait_icr_idle(); - else - apic_wait_icr_idle(); - - /* - * prepare target chip field - */ - cfg = __prepare_ICR2(mask); - apic_write(APIC_ICR2, cfg); - - /* - * program the ICR - */ - cfg = __prepare_ICR(0, vector); - - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - apic_write(APIC_ICR, cfg); -} - -/* - * This is only used on smaller machines. - */ -void default_send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) -{ - unsigned long mask = cpumask_bits(cpumask)[0]; - unsigned long flags; - - local_irq_save(flags); - WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); - __default_send_IPI_dest_field(mask, vector); - local_irq_restore(flags); -} - -void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector) -{ - unsigned long flags; - unsigned int query_cpu; - - /* - * Hack. The clustered APIC addressing mode doesn't allow us to send - * to an arbitrary mask, so I do a unicasts to each CPU instead. This - * should be modified to do 1 message per cluster ID - mbligh - */ - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) - __default_send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), vector); - local_irq_restore(flags); -} - -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) -{ - unsigned long flags; - unsigned int query_cpu; - unsigned int this_cpu = smp_processor_id(); - - /* See Hack comment above */ - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - if (query_cpu == this_cpu) - continue; - __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector); - } - local_irq_restore(flags); + __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); } /* must come after the send_IPI functions above for inlining */ diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 83bb05524f4..c143b329216 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -302,6 +302,7 @@ int __init get_memcfg_numaq(void) #include #include #include +#include #include #include #include @@ -319,17 +320,14 @@ static inline unsigned int numaq_get_apic_id(unsigned long x) return (x >> 24) & 0x0F; } -void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector); -void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); - static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) { - default_send_IPI_mask_sequence(mask, vector); + default_send_IPI_mask_sequence_logical(mask, vector); } static inline void numaq_send_IPI_allbutself(int vector) { - default_send_IPI_mask_allbutself(cpu_online_mask, vector); + default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector); } static inline void numaq_send_IPI_all(int vector) diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index b5db26f8e06..3f12a401182 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -112,8 +112,8 @@ struct genapic apic_default = { .cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, - .send_IPI_mask = default_send_IPI_mask, - .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself, + .send_IPI_mask = default_send_IPI_mask_logical, + .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = NULL, diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 84ff9ebbcc9..ecb41b9d7aa 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -54,12 +55,9 @@ static inline unsigned summit_get_apic_id(unsigned long x) return (x >> 24) & 0xFF; } -void default_send_IPI_mask_sequence(const cpumask_t *mask, int vector); -void default_send_IPI_mask_allbutself(const cpumask_t *mask, int vector); - static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) { - default_send_IPI_mask_sequence(mask, vector); + default_send_IPI_mask_sequence_logical(mask, vector); } static inline void summit_send_IPI_allbutself(int vector) -- cgit v1.2.3 From 26f7ef14a76b0e590a3797fd7b2f3cee868d9664 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 29 Jan 2009 14:19:22 -0800 Subject: x86: don't treat bigsmp as non-standard just like 64 bit switch from flat logical APIC messages to flat physical mode automatically. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 15 +++++++-------- arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/mpparse.c | 4 ++-- arch/x86/kernel/setup.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- 5 files changed, 12 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index afaf2cb7c1a..c6e567bb649 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -262,6 +262,12 @@ config X86_MPPARSE For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it +config X86_BIGSMP + bool "Support for big SMP systems with more than 8 CPUs" + depends on X86_32 && SMP + help + This option is needed for the systems that have more than 8 CPUs + config X86_NON_STANDARD bool "Support for non-standard x86 platforms" help @@ -338,13 +344,6 @@ config X86_32_NON_STANDARD if you select them all, kernel will probe it one by one. and will fallback to default. -config X86_BIGSMP - bool "Support for big SMP systems with more than 8 CPUs" - depends on X86_32_NON_STANDARD - help - This option is needed for the systems that have more than 8 CPUs - and if the system is not of any sub-arch type above. - config X86_NUMAQ bool "NUMAQ (IBM/Sequent)" depends on X86_32_NON_STANDARD @@ -366,7 +365,7 @@ config X86_SUMMIT config X86_ES7000 bool "Support for Unisys ES7000 IA32 series" - depends on X86_32_NON_STANDARD + depends on X86_32_NON_STANDARD && X86_BIGSMP help Support for Unisys ES7000 systems. Say 'Y' here if this kernel is supposed to run on an IA32-based Unisys ES7000 system. diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 7352c60f29d..3efa996b036 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1335,7 +1335,7 @@ static void __init acpi_process_madt(void) if (!error) { acpi_lapic = 1; -#ifdef CONFIG_X86_32_NON_STANDARD +#ifdef CONFIG_X86_BIGSMP generic_bigsmp_probe(); #endif /* diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 89aaced51bd..b46ca7d31fe 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -372,8 +372,8 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) (*x86_quirks->mpc_record)++; } -#ifdef CONFIG_X86_32_NON_STANDARD - generic_bigsmp_probe(); +#ifdef CONFIG_X86_BIGSMP + generic_bigsmp_probe(); #endif if (apic->setup_apic_routing) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index f64e1a487c9..df64afff580 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -936,7 +936,7 @@ void __init setup_arch(char **cmdline_p) map_vsyscall(); #endif -#ifdef CONFIG_X86_32_NON_STANDARD +#if defined(CONFIG_X86_32_NON_STANDARD) || defined(CONFIG_X86_BIGSMP) generic_apic_probe(); #endif diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 4c3cff57494..1268a862abb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1007,7 +1007,7 @@ static int __init smp_sanity_check(unsigned max_cpus) printk(KERN_WARNING "More than 8 CPUs detected - skipping them.\n" - "Use CONFIG_X86_32_NON_STANDARD and CONFIG_X86_BIGSMP.\n"); + "Use CONFIG_X86_BIGSMP.\n"); nr = 0; for_each_present_cpu(cpu) { -- cgit v1.2.3 From 4d87c5bec5389625d80b71108795aecf82cd670d Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 29 Jan 2009 14:29:10 -0800 Subject: fix "sparseirq: use kstat_irqs_cpu on non-x86 architectures too" Repair 0b0f0b1c2c87de299df6f92a8ffc0a73bd1bb960 arch/alpha/kernel/irq.c: In function 'show_interrupts': arch/alpha/kernel/irq.c:93: error: 'i' undeclared (first use in this function) arch/alpha/kernel/irq.c:93: error: (Each undeclared identifier is reported only once arch/alpha/kernel/irq.c:93: error: for each function it appears in.) Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/alpha/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 430550bd1eb..d3812eb8401 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -90,7 +90,7 @@ show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_irqs(irq)); #else for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); #endif seq_printf(p, " %14s", irq_desc[irq].chip->typename); seq_printf(p, " %c%s", -- cgit v1.2.3 From 6b64ee02da20d6c0d97115e0b1ab47f9fa2f0d8f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 30 Jan 2009 23:42:18 +0100 Subject: x86, apic, 32-bit: add self-IPI methods Impact: fix rare crash on 32-bit The 32-bit APIC drivers had their send_IPI_self vectors set to NULL, but ioapic_retrigger_irq() depends on it being always set. Fix it. Signed-off-by: Ingo Molnar --- arch/x86/kernel/bigsmp_32.c | 2 +- arch/x86/kernel/es7000_32.c | 2 +- arch/x86/kernel/numaq_32.c | 2 +- arch/x86/kernel/probe_32.c | 2 +- arch/x86/kernel/summit_32.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index ab645c93a6e..47a62f46afd 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -252,7 +252,7 @@ struct genapic apic_bigsmp = { .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = bigsmp_send_IPI_allbutself, .send_IPI_all = bigsmp_send_IPI_all, - .send_IPI_self = NULL, + .send_IPI_self = default_send_IPI_self, .wakeup_cpu = NULL, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index b5b50e8b94a..d6184c12a18 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -786,7 +786,7 @@ struct genapic apic_es7000 = { .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = es7000_send_IPI_allbutself, .send_IPI_all = es7000_send_IPI_all, - .send_IPI_self = NULL, + .send_IPI_self = default_send_IPI_self, .wakeup_cpu = NULL, diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index c143b329216..947748e1721 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -554,7 +554,7 @@ struct genapic apic_numaq = { .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = numaq_send_IPI_allbutself, .send_IPI_all = numaq_send_IPI_all, - .send_IPI_self = NULL, + .send_IPI_self = default_send_IPI_self, .wakeup_cpu = NULL, .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index 3f12a401182..22337b75de6 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -116,7 +116,7 @@ struct genapic apic_default = { .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, - .send_IPI_self = NULL, + .send_IPI_self = default_send_IPI_self, .wakeup_cpu = NULL, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index ecb41b9d7aa..1e733eff9b3 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -588,7 +588,7 @@ struct genapic apic_summit = { .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = summit_send_IPI_allbutself, .send_IPI_all = summit_send_IPI_all, - .send_IPI_self = NULL, + .send_IPI_self = default_send_IPI_self, .wakeup_cpu = NULL, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, -- cgit v1.2.3 From 319f3ba52c71630865b10ac3b99dd020440d681d Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Jan 2009 14:35:01 -0800 Subject: xen: move remaining mmu-related stuff into mmu.c Impact: Cleanup Move remaining mmu-related stuff into mmu.c. A general cleanup, and lay the groundwork for later patches. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/xen/enlighten.c | 731 +---------------------------------------------- arch/x86/xen/mmu.c | 700 +++++++++++++++++++++++++++++++++++++++++++++ arch/x86/xen/mmu.h | 3 + arch/x86/xen/xen-ops.h | 8 + 4 files changed, 712 insertions(+), 730 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 6b3f7eef57e..0cd2a165f17 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -61,35 +61,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); enum xen_domain_type xen_domain_type = XEN_NATIVE; EXPORT_SYMBOL_GPL(xen_domain_type); -/* - * Identity map, in addition to plain kernel map. This needs to be - * large enough to allocate page table pages to allocate the rest. - * Each page can map 2MB. - */ -static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss; - -#ifdef CONFIG_X86_64 -/* l3 pud for userspace vsyscall mapping */ -static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; -#endif /* CONFIG_X86_64 */ - -/* - * Note about cr3 (pagetable base) values: - * - * xen_cr3 contains the current logical cr3 value; it contains the - * last set cr3. This may not be the current effective cr3, because - * its update may be being lazily deferred. However, a vcpu looking - * at its own cr3 can use this value knowing that it everything will - * be self-consistent. - * - * xen_current_cr3 contains the actual vcpu cr3; it is set once the - * hypercall to set the vcpu cr3 is complete (so it may be a little - * out of date, but it will never be set early). If one vcpu is - * looking at another vcpu's cr3 value, it should use this variable. - */ -DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ -DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ - struct start_info *xen_start_info; EXPORT_SYMBOL_GPL(xen_start_info); @@ -237,7 +208,7 @@ static unsigned long xen_get_debugreg(int reg) return HYPERVISOR_get_debugreg(reg); } -static void xen_leave_lazy(void) +void xen_leave_lazy(void) { paravirt_leave_lazy(paravirt_get_lazy_mode()); xen_mc_flush(); @@ -598,76 +569,6 @@ static struct apic_ops xen_basic_apic_ops = { #endif -static void xen_flush_tlb(void) -{ - struct mmuext_op *op; - struct multicall_space mcs; - - preempt_disable(); - - mcs = xen_mc_entry(sizeof(*op)); - - op = mcs.args; - op->cmd = MMUEXT_TLB_FLUSH_LOCAL; - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); - - xen_mc_issue(PARAVIRT_LAZY_MMU); - - preempt_enable(); -} - -static void xen_flush_tlb_single(unsigned long addr) -{ - struct mmuext_op *op; - struct multicall_space mcs; - - preempt_disable(); - - mcs = xen_mc_entry(sizeof(*op)); - op = mcs.args; - op->cmd = MMUEXT_INVLPG_LOCAL; - op->arg1.linear_addr = addr & PAGE_MASK; - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); - - xen_mc_issue(PARAVIRT_LAZY_MMU); - - preempt_enable(); -} - -static void xen_flush_tlb_others(const struct cpumask *cpus, - struct mm_struct *mm, unsigned long va) -{ - struct { - struct mmuext_op op; - DECLARE_BITMAP(mask, NR_CPUS); - } *args; - struct multicall_space mcs; - - BUG_ON(cpumask_empty(cpus)); - BUG_ON(!mm); - - mcs = xen_mc_entry(sizeof(*args)); - args = mcs.args; - args->op.arg2.vcpumask = to_cpumask(args->mask); - - /* Remove us, and any offline CPUS. */ - cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); - if (unlikely(cpumask_empty(to_cpumask(args->mask)))) - goto issue; - - if (va == TLB_FLUSH_ALL) { - args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; - } else { - args->op.cmd = MMUEXT_INVLPG_MULTI; - args->op.arg1.linear_addr = va; - } - - MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); - -issue: - xen_mc_issue(PARAVIRT_LAZY_MMU); -} static void xen_clts(void) { @@ -693,21 +594,6 @@ static void xen_write_cr0(unsigned long cr0) xen_mc_issue(PARAVIRT_LAZY_CPU); } -static void xen_write_cr2(unsigned long cr2) -{ - percpu_read(xen_vcpu)->arch.cr2 = cr2; -} - -static unsigned long xen_read_cr2(void) -{ - return percpu_read(xen_vcpu)->arch.cr2; -} - -static unsigned long xen_read_cr2_direct(void) -{ - return percpu_read(xen_vcpu_info.arch.cr2); -} - static void xen_write_cr4(unsigned long cr4) { cr4 &= ~X86_CR4_PGE; @@ -716,71 +602,6 @@ static void xen_write_cr4(unsigned long cr4) native_write_cr4(cr4); } -static unsigned long xen_read_cr3(void) -{ - return percpu_read(xen_cr3); -} - -static void set_current_cr3(void *v) -{ - percpu_write(xen_current_cr3, (unsigned long)v); -} - -static void __xen_write_cr3(bool kernel, unsigned long cr3) -{ - struct mmuext_op *op; - struct multicall_space mcs; - unsigned long mfn; - - if (cr3) - mfn = pfn_to_mfn(PFN_DOWN(cr3)); - else - mfn = 0; - - WARN_ON(mfn == 0 && kernel); - - mcs = __xen_mc_entry(sizeof(*op)); - - op = mcs.args; - op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; - op->arg1.mfn = mfn; - - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); - - if (kernel) { - percpu_write(xen_cr3, cr3); - - /* Update xen_current_cr3 once the batch has actually - been submitted. */ - xen_mc_callback(set_current_cr3, (void *)cr3); - } -} - -static void xen_write_cr3(unsigned long cr3) -{ - BUG_ON(preemptible()); - - xen_mc_batch(); /* disables interrupts */ - - /* Update while interrupts are disabled, so its atomic with - respect to ipis */ - percpu_write(xen_cr3, cr3); - - __xen_write_cr3(true, cr3); - -#ifdef CONFIG_X86_64 - { - pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); - if (user_pgd) - __xen_write_cr3(false, __pa(user_pgd)); - else - __xen_write_cr3(false, 0); - } -#endif - - xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ -} - static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; @@ -822,185 +643,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) return ret; } -/* Early in boot, while setting up the initial pagetable, assume - everything is pinned. */ -static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) -{ -#ifdef CONFIG_FLATMEM - BUG_ON(mem_map); /* should only be used early */ -#endif - make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); -} - -/* Early release_pte assumes that all pts are pinned, since there's - only init_mm and anything attached to that is pinned. */ -static void xen_release_pte_init(unsigned long pfn) -{ - make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); -} - -static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) -{ - struct mmuext_op op; - op.cmd = cmd; - op.arg1.mfn = pfn_to_mfn(pfn); - if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) - BUG(); -} - -/* This needs to make sure the new pte page is pinned iff its being - attached to a pinned pagetable. */ -static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) -{ - struct page *page = pfn_to_page(pfn); - - if (PagePinned(virt_to_page(mm->pgd))) { - SetPagePinned(page); - - vm_unmap_aliases(); - if (!PageHighMem(page)) { - make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); - if (level == PT_PTE && USE_SPLIT_PTLOCKS) - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); - } else { - /* make sure there are no stray mappings of - this page */ - kmap_flush_unused(); - } - } -} - -static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) -{ - xen_alloc_ptpage(mm, pfn, PT_PTE); -} - -static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) -{ - xen_alloc_ptpage(mm, pfn, PT_PMD); -} - -static int xen_pgd_alloc(struct mm_struct *mm) -{ - pgd_t *pgd = mm->pgd; - int ret = 0; - - BUG_ON(PagePinned(virt_to_page(pgd))); - -#ifdef CONFIG_X86_64 - { - struct page *page = virt_to_page(pgd); - pgd_t *user_pgd; - - BUG_ON(page->private != 0); - - ret = -ENOMEM; - - user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - page->private = (unsigned long)user_pgd; - - if (user_pgd != NULL) { - user_pgd[pgd_index(VSYSCALL_START)] = - __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); - ret = 0; - } - - BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); - } -#endif - - return ret; -} - -static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ -#ifdef CONFIG_X86_64 - pgd_t *user_pgd = xen_get_user_pgd(pgd); - - if (user_pgd) - free_page((unsigned long)user_pgd); -#endif -} - -/* This should never happen until we're OK to use struct page */ -static void xen_release_ptpage(unsigned long pfn, unsigned level) -{ - struct page *page = pfn_to_page(pfn); - - if (PagePinned(page)) { - if (!PageHighMem(page)) { - if (level == PT_PTE && USE_SPLIT_PTLOCKS) - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); - make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); - } - ClearPagePinned(page); - } -} - -static void xen_release_pte(unsigned long pfn) -{ - xen_release_ptpage(pfn, PT_PTE); -} - -static void xen_release_pmd(unsigned long pfn) -{ - xen_release_ptpage(pfn, PT_PMD); -} - -#if PAGETABLE_LEVELS == 4 -static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) -{ - xen_alloc_ptpage(mm, pfn, PT_PUD); -} - -static void xen_release_pud(unsigned long pfn) -{ - xen_release_ptpage(pfn, PT_PUD); -} -#endif - -#ifdef CONFIG_HIGHPTE -static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) -{ - pgprot_t prot = PAGE_KERNEL; - - if (PagePinned(page)) - prot = PAGE_KERNEL_RO; - - if (0 && PageHighMem(page)) - printk("mapping highpte %lx type %d prot %s\n", - page_to_pfn(page), type, - (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); - - return kmap_atomic_prot(page, type, prot); -} -#endif - -#ifdef CONFIG_X86_32 -static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) -{ - /* If there's an existing pte, then don't allow _PAGE_RW to be set */ - if (pte_val_ma(*ptep) & _PAGE_PRESENT) - pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & - pte_val_ma(pte)); - - return pte; -} - -/* Init-time set_pte while constructing initial pagetables, which - doesn't allow RO pagetable pages to be remapped RW */ -static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) -{ - pte = mask_rw_pte(ptep, pte); - - xen_set_pte(ptep, pte); -} -#endif - -static __init void xen_pagetable_setup_start(pgd_t *base) -{ -} - void xen_setup_shared_info(void) { if (!xen_feature(XENFEAT_auto_translated_physmap)) { @@ -1021,37 +663,6 @@ void xen_setup_shared_info(void) xen_setup_mfn_list_list(); } -static __init void xen_pagetable_setup_done(pgd_t *base) -{ - xen_setup_shared_info(); -} - -static __init void xen_post_allocator_init(void) -{ - pv_mmu_ops.set_pte = xen_set_pte; - pv_mmu_ops.set_pmd = xen_set_pmd; - pv_mmu_ops.set_pud = xen_set_pud; -#if PAGETABLE_LEVELS == 4 - pv_mmu_ops.set_pgd = xen_set_pgd; -#endif - - /* This will work as long as patching hasn't happened yet - (which it hasn't) */ - pv_mmu_ops.alloc_pte = xen_alloc_pte; - pv_mmu_ops.alloc_pmd = xen_alloc_pmd; - pv_mmu_ops.release_pte = xen_release_pte; - pv_mmu_ops.release_pmd = xen_release_pmd; -#if PAGETABLE_LEVELS == 4 - pv_mmu_ops.alloc_pud = xen_alloc_pud; - pv_mmu_ops.release_pud = xen_release_pud; -#endif - -#ifdef CONFIG_X86_64 - SetPagePinned(virt_to_page(level3_user_vsyscall)); -#endif - xen_mark_init_mm_pinned(); -} - /* This is called once we have the cpu_possible_map */ void xen_setup_vcpu_info_placement(void) { @@ -1126,49 +737,6 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, return ret; } -static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) -{ - pte_t pte; - - phys >>= PAGE_SHIFT; - - switch (idx) { - case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: -#ifdef CONFIG_X86_F00F_BUG - case FIX_F00F_IDT: -#endif -#ifdef CONFIG_X86_32 - case FIX_WP_TEST: - case FIX_VDSO: -# ifdef CONFIG_HIGHMEM - case FIX_KMAP_BEGIN ... FIX_KMAP_END: -# endif -#else - case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: -#endif -#ifdef CONFIG_X86_LOCAL_APIC - case FIX_APIC_BASE: /* maps dummy local APIC */ -#endif - pte = pfn_pte(phys, prot); - break; - - default: - pte = mfn_pte(phys, prot); - break; - } - - __native_set_fixmap(idx, pte); - -#ifdef CONFIG_X86_64 - /* Replicate changes to map the vsyscall page into the user - pagetable vsyscall mapping. */ - if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { - unsigned long vaddr = __fix_to_virt(idx); - set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); - } -#endif -} - static const struct pv_info xen_info __initdata = { .paravirt_enabled = 1, .shared_kernel_pmd = 0, @@ -1264,86 +832,6 @@ static const struct pv_apic_ops xen_apic_ops __initdata = { #endif }; -static const struct pv_mmu_ops xen_mmu_ops __initdata = { - .pagetable_setup_start = xen_pagetable_setup_start, - .pagetable_setup_done = xen_pagetable_setup_done, - - .read_cr2 = xen_read_cr2, - .write_cr2 = xen_write_cr2, - - .read_cr3 = xen_read_cr3, - .write_cr3 = xen_write_cr3, - - .flush_tlb_user = xen_flush_tlb, - .flush_tlb_kernel = xen_flush_tlb, - .flush_tlb_single = xen_flush_tlb_single, - .flush_tlb_others = xen_flush_tlb_others, - - .pte_update = paravirt_nop, - .pte_update_defer = paravirt_nop, - - .pgd_alloc = xen_pgd_alloc, - .pgd_free = xen_pgd_free, - - .alloc_pte = xen_alloc_pte_init, - .release_pte = xen_release_pte_init, - .alloc_pmd = xen_alloc_pte_init, - .alloc_pmd_clone = paravirt_nop, - .release_pmd = xen_release_pte_init, - -#ifdef CONFIG_HIGHPTE - .kmap_atomic_pte = xen_kmap_atomic_pte, -#endif - -#ifdef CONFIG_X86_64 - .set_pte = xen_set_pte, -#else - .set_pte = xen_set_pte_init, -#endif - .set_pte_at = xen_set_pte_at, - .set_pmd = xen_set_pmd_hyper, - - .ptep_modify_prot_start = __ptep_modify_prot_start, - .ptep_modify_prot_commit = __ptep_modify_prot_commit, - - .pte_val = xen_pte_val, - .pgd_val = xen_pgd_val, - - .make_pte = xen_make_pte, - .make_pgd = xen_make_pgd, - -#ifdef CONFIG_X86_PAE - .set_pte_atomic = xen_set_pte_atomic, - .set_pte_present = xen_set_pte_at, - .pte_clear = xen_pte_clear, - .pmd_clear = xen_pmd_clear, -#endif /* CONFIG_X86_PAE */ - .set_pud = xen_set_pud_hyper, - - .make_pmd = xen_make_pmd, - .pmd_val = xen_pmd_val, - -#if PAGETABLE_LEVELS == 4 - .pud_val = xen_pud_val, - .make_pud = xen_make_pud, - .set_pgd = xen_set_pgd_hyper, - - .alloc_pud = xen_alloc_pte_init, - .release_pud = xen_release_pte_init, -#endif /* PAGETABLE_LEVELS == 4 */ - - .activate_mm = xen_activate_mm, - .dup_mmap = xen_dup_mmap, - .exit_mmap = xen_exit_mmap, - - .lazy_mode = { - .enter = paravirt_enter_lazy_mmu, - .leave = xen_leave_lazy, - }, - - .set_fixmap = xen_set_fixmap, -}; - static void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; @@ -1386,223 +874,6 @@ static const struct machine_ops __initdata xen_machine_ops = { }; -static void __init xen_reserve_top(void) -{ -#ifdef CONFIG_X86_32 - unsigned long top = HYPERVISOR_VIRT_START; - struct xen_platform_parameters pp; - - if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) - top = pp.virt_start; - - reserve_top_address(-top); -#endif /* CONFIG_X86_32 */ -} - -/* - * Like __va(), but returns address in the kernel mapping (which is - * all we have until the physical memory mapping has been set up. - */ -static void *__ka(phys_addr_t paddr) -{ -#ifdef CONFIG_X86_64 - return (void *)(paddr + __START_KERNEL_map); -#else - return __va(paddr); -#endif -} - -/* Convert a machine address to physical address */ -static unsigned long m2p(phys_addr_t maddr) -{ - phys_addr_t paddr; - - maddr &= PTE_PFN_MASK; - paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; - - return paddr; -} - -/* Convert a machine address to kernel virtual */ -static void *m2v(phys_addr_t maddr) -{ - return __ka(m2p(maddr)); -} - -static void set_page_prot(void *addr, pgprot_t prot) -{ - unsigned long pfn = __pa(addr) >> PAGE_SHIFT; - pte_t pte = pfn_pte(pfn, prot); - - if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) - BUG(); -} - -static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) -{ - unsigned pmdidx, pteidx; - unsigned ident_pte; - unsigned long pfn; - - ident_pte = 0; - pfn = 0; - for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { - pte_t *pte_page; - - /* Reuse or allocate a page of ptes */ - if (pmd_present(pmd[pmdidx])) - pte_page = m2v(pmd[pmdidx].pmd); - else { - /* Check for free pte pages */ - if (ident_pte == ARRAY_SIZE(level1_ident_pgt)) - break; - - pte_page = &level1_ident_pgt[ident_pte]; - ident_pte += PTRS_PER_PTE; - - pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); - } - - /* Install mappings */ - for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { - pte_t pte; - - if (pfn > max_pfn_mapped) - max_pfn_mapped = pfn; - - if (!pte_none(pte_page[pteidx])) - continue; - - pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); - pte_page[pteidx] = pte; - } - } - - for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) - set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); - - set_page_prot(pmd, PAGE_KERNEL_RO); -} - -#ifdef CONFIG_X86_64 -static void convert_pfn_mfn(void *v) -{ - pte_t *pte = v; - int i; - - /* All levels are converted the same way, so just treat them - as ptes. */ - for (i = 0; i < PTRS_PER_PTE; i++) - pte[i] = xen_make_pte(pte[i].pte); -} - -/* - * Set up the inital kernel pagetable. - * - * We can construct this by grafting the Xen provided pagetable into - * head_64.S's preconstructed pagetables. We copy the Xen L2's into - * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This - * means that only the kernel has a physical mapping to start with - - * but that's enough to get __va working. We need to fill in the rest - * of the physical mapping once some sort of allocator has been set - * up. - */ -static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, - unsigned long max_pfn) -{ - pud_t *l3; - pmd_t *l2; - - /* Zap identity mapping */ - init_level4_pgt[0] = __pgd(0); - - /* Pre-constructed entries are in pfn, so convert to mfn */ - convert_pfn_mfn(init_level4_pgt); - convert_pfn_mfn(level3_ident_pgt); - convert_pfn_mfn(level3_kernel_pgt); - - l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); - l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); - - memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); - memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); - - l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); - l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); - memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); - - /* Set up identity map */ - xen_map_identity_early(level2_ident_pgt, max_pfn); - - /* Make pagetable pieces RO */ - set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); - set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); - set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); - set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); - set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); - set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); - - /* Pin down new L4 */ - pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, - PFN_DOWN(__pa_symbol(init_level4_pgt))); - - /* Unpin Xen-provided one */ - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); - - /* Switch over */ - pgd = init_level4_pgt; - - /* - * At this stage there can be no user pgd, and no page - * structure to attach it to, so make sure we just set kernel - * pgd. - */ - xen_mc_batch(); - __xen_write_cr3(true, __pa(pgd)); - xen_mc_issue(PARAVIRT_LAZY_CPU); - - reserve_early(__pa(xen_start_info->pt_base), - __pa(xen_start_info->pt_base + - xen_start_info->nr_pt_frames * PAGE_SIZE), - "XEN PAGETABLES"); - - return pgd; -} -#else /* !CONFIG_X86_64 */ -static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss; - -static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, - unsigned long max_pfn) -{ - pmd_t *kernel_pmd; - - init_pg_tables_start = __pa(pgd); - init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; - max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024); - - kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); - memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); - - xen_map_identity_early(level2_kernel_pgt, max_pfn); - - memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); - set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], - __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); - - set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); - set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); - set_page_prot(empty_zero_page, PAGE_KERNEL_RO); - - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); - - xen_write_cr3(__pa(swapper_pg_dir)); - - pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); - - return swapper_pg_dir; -} -#endif /* CONFIG_X86_64 */ - /* First C function to be called on Xen boot */ asmlinkage void __init xen_start_kernel(void) { diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 98cb9869eb2..94e452c0b00 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -55,6 +56,8 @@ #include #include +#include +#include #include "multicalls.h" #include "mmu.h" @@ -114,6 +117,37 @@ static inline void check_zero(void) #endif /* CONFIG_XEN_DEBUG_FS */ + +/* + * Identity map, in addition to plain kernel map. This needs to be + * large enough to allocate page table pages to allocate the rest. + * Each page can map 2MB. + */ +static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss; + +#ifdef CONFIG_X86_64 +/* l3 pud for userspace vsyscall mapping */ +static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; +#endif /* CONFIG_X86_64 */ + +/* + * Note about cr3 (pagetable base) values: + * + * xen_cr3 contains the current logical cr3 value; it contains the + * last set cr3. This may not be the current effective cr3, because + * its update may be being lazily deferred. However, a vcpu looking + * at its own cr3 can use this value knowing that it everything will + * be self-consistent. + * + * xen_current_cr3 contains the actual vcpu cr3; it is set once the + * hypercall to set the vcpu cr3 is complete (so it may be a little + * out of date, but it will never be set early). If one vcpu is + * looking at another vcpu's cr3 value, it should use this variable. + */ +DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ +DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ + + /* * Just beyond the highest usermode address. STACK_TOP_MAX has a * redzone above it, so round it up to a PGD boundary. @@ -1152,6 +1186,672 @@ void xen_exit_mmap(struct mm_struct *mm) spin_unlock(&mm->page_table_lock); } +static __init void xen_pagetable_setup_start(pgd_t *base) +{ +} + +static __init void xen_pagetable_setup_done(pgd_t *base) +{ + xen_setup_shared_info(); +} + +static void xen_write_cr2(unsigned long cr2) +{ + percpu_read(xen_vcpu)->arch.cr2 = cr2; +} + +static unsigned long xen_read_cr2(void) +{ + return percpu_read(xen_vcpu)->arch.cr2; +} + +unsigned long xen_read_cr2_direct(void) +{ + return percpu_read(xen_vcpu_info.arch.cr2); +} + +static void xen_flush_tlb(void) +{ + struct mmuext_op *op; + struct multicall_space mcs; + + preempt_disable(); + + mcs = xen_mc_entry(sizeof(*op)); + + op = mcs.args; + op->cmd = MMUEXT_TLB_FLUSH_LOCAL; + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + + preempt_enable(); +} + +static void xen_flush_tlb_single(unsigned long addr) +{ + struct mmuext_op *op; + struct multicall_space mcs; + + preempt_disable(); + + mcs = xen_mc_entry(sizeof(*op)); + op = mcs.args; + op->cmd = MMUEXT_INVLPG_LOCAL; + op->arg1.linear_addr = addr & PAGE_MASK; + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + + preempt_enable(); +} + +static void xen_flush_tlb_others(const struct cpumask *cpus, + struct mm_struct *mm, unsigned long va) +{ + struct { + struct mmuext_op op; + DECLARE_BITMAP(mask, NR_CPUS); + } *args; + struct multicall_space mcs; + + BUG_ON(cpumask_empty(cpus)); + BUG_ON(!mm); + + mcs = xen_mc_entry(sizeof(*args)); + args = mcs.args; + args->op.arg2.vcpumask = to_cpumask(args->mask); + + /* Remove us, and any offline CPUS. */ + cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); + if (unlikely(cpumask_empty(to_cpumask(args->mask)))) + goto issue; + + if (va == TLB_FLUSH_ALL) { + args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; + } else { + args->op.cmd = MMUEXT_INVLPG_MULTI; + args->op.arg1.linear_addr = va; + } + + MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); + +issue: + xen_mc_issue(PARAVIRT_LAZY_MMU); +} + +static unsigned long xen_read_cr3(void) +{ + return percpu_read(xen_cr3); +} + +static void set_current_cr3(void *v) +{ + percpu_write(xen_current_cr3, (unsigned long)v); +} + +static void __xen_write_cr3(bool kernel, unsigned long cr3) +{ + struct mmuext_op *op; + struct multicall_space mcs; + unsigned long mfn; + + if (cr3) + mfn = pfn_to_mfn(PFN_DOWN(cr3)); + else + mfn = 0; + + WARN_ON(mfn == 0 && kernel); + + mcs = __xen_mc_entry(sizeof(*op)); + + op = mcs.args; + op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; + op->arg1.mfn = mfn; + + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + if (kernel) { + percpu_write(xen_cr3, cr3); + + /* Update xen_current_cr3 once the batch has actually + been submitted. */ + xen_mc_callback(set_current_cr3, (void *)cr3); + } +} + +static void xen_write_cr3(unsigned long cr3) +{ + BUG_ON(preemptible()); + + xen_mc_batch(); /* disables interrupts */ + + /* Update while interrupts are disabled, so its atomic with + respect to ipis */ + percpu_write(xen_cr3, cr3); + + __xen_write_cr3(true, cr3); + +#ifdef CONFIG_X86_64 + { + pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); + if (user_pgd) + __xen_write_cr3(false, __pa(user_pgd)); + else + __xen_write_cr3(false, 0); + } +#endif + + xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ +} + +static int xen_pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd = mm->pgd; + int ret = 0; + + BUG_ON(PagePinned(virt_to_page(pgd))); + +#ifdef CONFIG_X86_64 + { + struct page *page = virt_to_page(pgd); + pgd_t *user_pgd; + + BUG_ON(page->private != 0); + + ret = -ENOMEM; + + user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + page->private = (unsigned long)user_pgd; + + if (user_pgd != NULL) { + user_pgd[pgd_index(VSYSCALL_START)] = + __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); + ret = 0; + } + + BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); + } +#endif + + return ret; +} + +static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ +#ifdef CONFIG_X86_64 + pgd_t *user_pgd = xen_get_user_pgd(pgd); + + if (user_pgd) + free_page((unsigned long)user_pgd); +#endif +} + + +/* Early in boot, while setting up the initial pagetable, assume + everything is pinned. */ +static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) +{ +#ifdef CONFIG_FLATMEM + BUG_ON(mem_map); /* should only be used early */ +#endif + make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); +} + +/* Early release_pte assumes that all pts are pinned, since there's + only init_mm and anything attached to that is pinned. */ +static void xen_release_pte_init(unsigned long pfn) +{ + make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); +} + +static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +{ + struct mmuext_op op; + op.cmd = cmd; + op.arg1.mfn = pfn_to_mfn(pfn); + if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) + BUG(); +} + +/* This needs to make sure the new pte page is pinned iff its being + attached to a pinned pagetable. */ +static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) +{ + struct page *page = pfn_to_page(pfn); + + if (PagePinned(virt_to_page(mm->pgd))) { + SetPagePinned(page); + + vm_unmap_aliases(); + if (!PageHighMem(page)) { + make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); + if (level == PT_PTE && USE_SPLIT_PTLOCKS) + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + } else { + /* make sure there are no stray mappings of + this page */ + kmap_flush_unused(); + } + } +} + +static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) +{ + xen_alloc_ptpage(mm, pfn, PT_PTE); +} + +static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) +{ + xen_alloc_ptpage(mm, pfn, PT_PMD); +} + +/* This should never happen until we're OK to use struct page */ +static void xen_release_ptpage(unsigned long pfn, unsigned level) +{ + struct page *page = pfn_to_page(pfn); + + if (PagePinned(page)) { + if (!PageHighMem(page)) { + if (level == PT_PTE && USE_SPLIT_PTLOCKS) + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); + make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); + } + ClearPagePinned(page); + } +} + +static void xen_release_pte(unsigned long pfn) +{ + xen_release_ptpage(pfn, PT_PTE); +} + +static void xen_release_pmd(unsigned long pfn) +{ + xen_release_ptpage(pfn, PT_PMD); +} + +#if PAGETABLE_LEVELS == 4 +static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) +{ + xen_alloc_ptpage(mm, pfn, PT_PUD); +} + +static void xen_release_pud(unsigned long pfn) +{ + xen_release_ptpage(pfn, PT_PUD); +} +#endif + +void __init xen_reserve_top(void) +{ +#ifdef CONFIG_X86_32 + unsigned long top = HYPERVISOR_VIRT_START; + struct xen_platform_parameters pp; + + if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) + top = pp.virt_start; + + reserve_top_address(-top); +#endif /* CONFIG_X86_32 */ +} + +/* + * Like __va(), but returns address in the kernel mapping (which is + * all we have until the physical memory mapping has been set up. + */ +static void *__ka(phys_addr_t paddr) +{ +#ifdef CONFIG_X86_64 + return (void *)(paddr + __START_KERNEL_map); +#else + return __va(paddr); +#endif +} + +/* Convert a machine address to physical address */ +static unsigned long m2p(phys_addr_t maddr) +{ + phys_addr_t paddr; + + maddr &= PTE_PFN_MASK; + paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; + + return paddr; +} + +/* Convert a machine address to kernel virtual */ +static void *m2v(phys_addr_t maddr) +{ + return __ka(m2p(maddr)); +} + +static void set_page_prot(void *addr, pgprot_t prot) +{ + unsigned long pfn = __pa(addr) >> PAGE_SHIFT; + pte_t pte = pfn_pte(pfn, prot); + + if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) + BUG(); +} + +static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) +{ + unsigned pmdidx, pteidx; + unsigned ident_pte; + unsigned long pfn; + + ident_pte = 0; + pfn = 0; + for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { + pte_t *pte_page; + + /* Reuse or allocate a page of ptes */ + if (pmd_present(pmd[pmdidx])) + pte_page = m2v(pmd[pmdidx].pmd); + else { + /* Check for free pte pages */ + if (ident_pte == ARRAY_SIZE(level1_ident_pgt)) + break; + + pte_page = &level1_ident_pgt[ident_pte]; + ident_pte += PTRS_PER_PTE; + + pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); + } + + /* Install mappings */ + for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { + pte_t pte; + + if (pfn > max_pfn_mapped) + max_pfn_mapped = pfn; + + if (!pte_none(pte_page[pteidx])) + continue; + + pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); + pte_page[pteidx] = pte; + } + } + + for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) + set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); + + set_page_prot(pmd, PAGE_KERNEL_RO); +} + +#ifdef CONFIG_X86_64 +static void convert_pfn_mfn(void *v) +{ + pte_t *pte = v; + int i; + + /* All levels are converted the same way, so just treat them + as ptes. */ + for (i = 0; i < PTRS_PER_PTE; i++) + pte[i] = xen_make_pte(pte[i].pte); +} + +/* + * Set up the inital kernel pagetable. + * + * We can construct this by grafting the Xen provided pagetable into + * head_64.S's preconstructed pagetables. We copy the Xen L2's into + * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This + * means that only the kernel has a physical mapping to start with - + * but that's enough to get __va working. We need to fill in the rest + * of the physical mapping once some sort of allocator has been set + * up. + */ +__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, + unsigned long max_pfn) +{ + pud_t *l3; + pmd_t *l2; + + /* Zap identity mapping */ + init_level4_pgt[0] = __pgd(0); + + /* Pre-constructed entries are in pfn, so convert to mfn */ + convert_pfn_mfn(init_level4_pgt); + convert_pfn_mfn(level3_ident_pgt); + convert_pfn_mfn(level3_kernel_pgt); + + l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); + l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); + + memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); + memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); + + l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); + l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); + memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); + + /* Set up identity map */ + xen_map_identity_early(level2_ident_pgt, max_pfn); + + /* Make pagetable pieces RO */ + set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); + + /* Pin down new L4 */ + pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, + PFN_DOWN(__pa_symbol(init_level4_pgt))); + + /* Unpin Xen-provided one */ + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); + + /* Switch over */ + pgd = init_level4_pgt; + + /* + * At this stage there can be no user pgd, and no page + * structure to attach it to, so make sure we just set kernel + * pgd. + */ + xen_mc_batch(); + __xen_write_cr3(true, __pa(pgd)); + xen_mc_issue(PARAVIRT_LAZY_CPU); + + reserve_early(__pa(xen_start_info->pt_base), + __pa(xen_start_info->pt_base + + xen_start_info->nr_pt_frames * PAGE_SIZE), + "XEN PAGETABLES"); + + return pgd; +} +#else /* !CONFIG_X86_64 */ +static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss; + +__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, + unsigned long max_pfn) +{ + pmd_t *kernel_pmd; + + init_pg_tables_start = __pa(pgd); + init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; + max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024); + + kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); + memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); + + xen_map_identity_early(level2_kernel_pgt, max_pfn); + + memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); + set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], + __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); + + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); + set_page_prot(empty_zero_page, PAGE_KERNEL_RO); + + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); + + xen_write_cr3(__pa(swapper_pg_dir)); + + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); + + return swapper_pg_dir; +} +#endif /* CONFIG_X86_64 */ + +static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) +{ + pte_t pte; + + phys >>= PAGE_SHIFT; + + switch (idx) { + case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: +#ifdef CONFIG_X86_F00F_BUG + case FIX_F00F_IDT: +#endif +#ifdef CONFIG_X86_32 + case FIX_WP_TEST: + case FIX_VDSO: +# ifdef CONFIG_HIGHMEM + case FIX_KMAP_BEGIN ... FIX_KMAP_END: +# endif +#else + case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: +#endif +#ifdef CONFIG_X86_LOCAL_APIC + case FIX_APIC_BASE: /* maps dummy local APIC */ +#endif + pte = pfn_pte(phys, prot); + break; + + default: + pte = mfn_pte(phys, prot); + break; + } + + __native_set_fixmap(idx, pte); + +#ifdef CONFIG_X86_64 + /* Replicate changes to map the vsyscall page into the user + pagetable vsyscall mapping. */ + if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { + unsigned long vaddr = __fix_to_virt(idx); + set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); + } +#endif +} + +__init void xen_post_allocator_init(void) +{ + pv_mmu_ops.set_pte = xen_set_pte; + pv_mmu_ops.set_pmd = xen_set_pmd; + pv_mmu_ops.set_pud = xen_set_pud; +#if PAGETABLE_LEVELS == 4 + pv_mmu_ops.set_pgd = xen_set_pgd; +#endif + + /* This will work as long as patching hasn't happened yet + (which it hasn't) */ + pv_mmu_ops.alloc_pte = xen_alloc_pte; + pv_mmu_ops.alloc_pmd = xen_alloc_pmd; + pv_mmu_ops.release_pte = xen_release_pte; + pv_mmu_ops.release_pmd = xen_release_pmd; +#if PAGETABLE_LEVELS == 4 + pv_mmu_ops.alloc_pud = xen_alloc_pud; + pv_mmu_ops.release_pud = xen_release_pud; +#endif + +#ifdef CONFIG_X86_64 + SetPagePinned(virt_to_page(level3_user_vsyscall)); +#endif + xen_mark_init_mm_pinned(); +} + + +const struct pv_mmu_ops xen_mmu_ops __initdata = { + .pagetable_setup_start = xen_pagetable_setup_start, + .pagetable_setup_done = xen_pagetable_setup_done, + + .read_cr2 = xen_read_cr2, + .write_cr2 = xen_write_cr2, + + .read_cr3 = xen_read_cr3, + .write_cr3 = xen_write_cr3, + + .flush_tlb_user = xen_flush_tlb, + .flush_tlb_kernel = xen_flush_tlb, + .flush_tlb_single = xen_flush_tlb_single, + .flush_tlb_others = xen_flush_tlb_others, + + .pte_update = paravirt_nop, + .pte_update_defer = paravirt_nop, + + .pgd_alloc = xen_pgd_alloc, + .pgd_free = xen_pgd_free, + + .alloc_pte = xen_alloc_pte_init, + .release_pte = xen_release_pte_init, + .alloc_pmd = xen_alloc_pte_init, + .alloc_pmd_clone = paravirt_nop, + .release_pmd = xen_release_pte_init, + +#ifdef CONFIG_HIGHPTE + .kmap_atomic_pte = xen_kmap_atomic_pte, +#endif + +#ifdef CONFIG_X86_64 + .set_pte = xen_set_pte, +#else + .set_pte = xen_set_pte_init, +#endif + .set_pte_at = xen_set_pte_at, + .set_pmd = xen_set_pmd_hyper, + + .ptep_modify_prot_start = __ptep_modify_prot_start, + .ptep_modify_prot_commit = __ptep_modify_prot_commit, + + .pte_val = xen_pte_val, + .pgd_val = xen_pgd_val, + + .make_pte = xen_make_pte, + .make_pgd = xen_make_pgd, + +#ifdef CONFIG_X86_PAE + .set_pte_atomic = xen_set_pte_atomic, + .set_pte_present = xen_set_pte_at, + .pte_clear = xen_pte_clear, + .pmd_clear = xen_pmd_clear, +#endif /* CONFIG_X86_PAE */ + .set_pud = xen_set_pud_hyper, + + .make_pmd = xen_make_pmd, + .pmd_val = xen_pmd_val, + +#if PAGETABLE_LEVELS == 4 + .pud_val = xen_pud_val, + .make_pud = xen_make_pud, + .set_pgd = xen_set_pgd_hyper, + + .alloc_pud = xen_alloc_pte_init, + .release_pud = xen_release_pte_init, +#endif /* PAGETABLE_LEVELS == 4 */ + + .activate_mm = xen_activate_mm, + .dup_mmap = xen_dup_mmap, + .exit_mmap = xen_exit_mmap, + + .lazy_mode = { + .enter = paravirt_enter_lazy_mmu, + .leave = xen_leave_lazy, + }, + + .set_fixmap = xen_set_fixmap, +}; + + #ifdef CONFIG_XEN_DEBUG_FS static struct dentry *d_mmu_debug; diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index 98d71659da5..24d1b44a337 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h @@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); +unsigned long xen_read_cr2_direct(void); + +extern const struct pv_mmu_ops xen_mmu_ops; #endif /* _XEN_MMU_H */ diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index c1f8faf0a2c..11913fc94c1 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -13,6 +13,7 @@ extern const char xen_failsafe_callback[]; struct trap_info; void xen_copy_trap_info(struct trap_info *traps); +DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info); DECLARE_PER_CPU(unsigned long, xen_cr3); DECLARE_PER_CPU(unsigned long, xen_current_cr3); @@ -22,6 +23,13 @@ extern struct shared_info *HYPERVISOR_shared_info; void xen_setup_mfn_list_list(void); void xen_setup_shared_info(void); +void xen_setup_machphys_mapping(void); +pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); +void xen_ident_map_ISA(void); +void xen_reserve_top(void); + +void xen_leave_lazy(void); +void xen_post_allocator_init(void); char * __init xen_memory_setup(void); void __init xen_arch_setup(void); -- cgit v1.2.3 From 41edafdb78feac1d1f8823846209975fde990633 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Jan 2009 14:35:02 -0800 Subject: x86/pvops: add a paravirt_ident functions to allow special patching Impact: Optimization Several paravirt ops implementations simply return their arguments, the most obvious being the make_pte/pte_val class of operations on native. On 32-bit, the identity function is literally a no-op, as the calling convention uses the same registers for the first argument and return. On 64-bit, it can be implemented with a single "mov". This patch adds special identity functions for 32 and 64 bit argument, and machinery to recognize them and replace them with either nops or a mov as appropriate. At the moment, the only users for the identity functions are the pagetable entry conversion functions. The result is a measureable improvement on pagetable-heavy benchmarks (2-3%, reducing the pvops overhead from 5 to 2%). Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 5 +++ arch/x86/kernel/paravirt.c | 75 ++++++++++++++++++++++++++++++++----- arch/x86/kernel/paravirt_patch_32.c | 12 ++++++ arch/x86/kernel/paravirt_patch_64.c | 15 ++++++++ 4 files changed, 98 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 17577888709..961d10c12f1 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -388,6 +388,8 @@ extern struct pv_lock_ops pv_lock_ops; asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") unsigned paravirt_patch_nop(void); +unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); +unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); unsigned paravirt_patch_ignore(unsigned len); unsigned paravirt_patch_call(void *insnbuf, const void *target, u16 tgt_clobbers, @@ -1371,6 +1373,9 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, } void _paravirt_nop(void); +u32 _paravirt_ident_32(u32); +u64 _paravirt_ident_64(u64); + #define paravirt_nop ((void *)_paravirt_nop) void paravirt_use_bytelocks(void); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 202514be592..dd25e2b1593 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -44,6 +44,17 @@ void _paravirt_nop(void) { } +/* identity function, which can be inlined */ +u32 _paravirt_ident_32(u32 x) +{ + return x; +} + +u64 _paravirt_ident_64(u64 x) +{ + return x; +} + static void __init default_banner(void) { printk(KERN_INFO "Booting paravirtualized kernel on %s\n", @@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, if (opfunc == NULL) /* If there's no function, patch it with a ud2a (BUG) */ ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); - else if (opfunc == paravirt_nop) + else if (opfunc == _paravirt_nop) /* If the operation is a nop, then nop the callsite */ ret = paravirt_patch_nop(); + + /* identity functions just return their single argument */ + else if (opfunc == _paravirt_ident_32) + ret = paravirt_patch_ident_32(insnbuf, len); + else if (opfunc == _paravirt_ident_64) + ret = paravirt_patch_ident_64(insnbuf, len); + else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || @@ -373,6 +391,45 @@ struct pv_apic_ops pv_apic_ops = { #endif }; +typedef pte_t make_pte_t(pteval_t); +typedef pmd_t make_pmd_t(pmdval_t); +typedef pud_t make_pud_t(pudval_t); +typedef pgd_t make_pgd_t(pgdval_t); + +typedef pteval_t pte_val_t(pte_t); +typedef pmdval_t pmd_val_t(pmd_t); +typedef pudval_t pud_val_t(pud_t); +typedef pgdval_t pgd_val_t(pgd_t); + + +#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) +/* 32-bit pagetable entries */ +#define paravirt_native_make_pte (make_pte_t *)_paravirt_ident_32 +#define paravirt_native_pte_val (pte_val_t *)_paravirt_ident_32 + +#define paravirt_native_make_pmd (make_pmd_t *)_paravirt_ident_32 +#define paravirt_native_pmd_val (pmd_val_t *)_paravirt_ident_32 + +#define paravirt_native_make_pud (make_pud_t *)_paravirt_ident_32 +#define paravirt_native_pud_val (pud_val_t *)_paravirt_ident_32 + +#define paravirt_native_make_pgd (make_pgd_t *)_paravirt_ident_32 +#define paravirt_native_pgd_val (pgd_val_t *)_paravirt_ident_32 +#else +/* 64-bit pagetable entries */ +#define paravirt_native_make_pte (make_pte_t *)_paravirt_ident_64 +#define paravirt_native_pte_val (pte_val_t *)_paravirt_ident_64 + +#define paravirt_native_make_pmd (make_pmd_t *)_paravirt_ident_64 +#define paravirt_native_pmd_val (pmd_val_t *)_paravirt_ident_64 + +#define paravirt_native_make_pud (make_pud_t *)_paravirt_ident_64 +#define paravirt_native_pud_val (pud_val_t *)_paravirt_ident_64 + +#define paravirt_native_make_pgd (make_pgd_t *)_paravirt_ident_64 +#define paravirt_native_pgd_val (pgd_val_t *)_paravirt_ident_64 +#endif + struct pv_mmu_ops pv_mmu_ops = { #ifndef CONFIG_X86_64 .pagetable_setup_start = native_pagetable_setup_start, @@ -424,21 +481,21 @@ struct pv_mmu_ops pv_mmu_ops = { .pmd_clear = native_pmd_clear, #endif .set_pud = native_set_pud, - .pmd_val = native_pmd_val, - .make_pmd = native_make_pmd, + .pmd_val = paravirt_native_pmd_val, + .make_pmd = paravirt_native_make_pmd, #if PAGETABLE_LEVELS == 4 - .pud_val = native_pud_val, - .make_pud = native_make_pud, + .pud_val = paravirt_native_pud_val, + .make_pud = paravirt_native_make_pud, .set_pgd = native_set_pgd, #endif #endif /* PAGETABLE_LEVELS >= 3 */ - .pte_val = native_pte_val, - .pgd_val = native_pgd_val, + .pte_val = paravirt_native_pte_val, + .pgd_val = paravirt_native_pgd_val, - .make_pte = native_make_pte, - .make_pgd = native_make_pgd, + .make_pte = paravirt_native_make_pte, + .make_pgd = paravirt_native_make_pgd, .dup_mmap = paravirt_nop, .exit_mmap = paravirt_nop, diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 9fe644f4861..d9f32e6d6ab 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); DEF_NATIVE(pv_cpu_ops, clts, "clts"); DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); +unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) +{ + /* arg in %eax, return in %eax */ + return 0; +} + +unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) +{ + /* arg in %edx:%eax, return in %edx:%eax */ + return 0; +} + unsigned native_patch(u8 type, u16 clobbers, void *ibuf, unsigned long addr, unsigned len) { diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 061d01df9ae..3f08f34f93e 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); +DEF_NATIVE(, mov32, "mov %edi, %eax"); +DEF_NATIVE(, mov64, "mov %rdi, %rax"); + +unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) +{ + return paravirt_patch_insns(insnbuf, len, + start__mov32, end__mov32); +} + +unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) +{ + return paravirt_patch_insns(insnbuf, len, + start__mov64, end__mov64); +} + unsigned native_patch(u8 type, u16 clobbers, void *ibuf, unsigned long addr, unsigned len) { -- cgit v1.2.3 From b8aa287f77be943e37a84fa4657e27df95269bfb Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Jan 2009 14:35:03 -0800 Subject: x86: fix paravirt clobber in entry_64.S Impact: Fix latent bug The clobber is trying to say that anything except RDI is available for clobbering, but actually clobbers everything. This hasn't mattered because the clobbers were basically ignored, but subsequent patches will rely on them. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/kernel/entry_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index a52703864a1..e4c9710cae5 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1140,7 +1140,7 @@ ENTRY(native_load_gs_index) CFI_STARTPROC pushf CFI_ADJUST_CFA_OFFSET 8 - DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) + DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) SWAPGS gs_change: movl %edi,%gs -- cgit v1.2.3 From 9104a18dcdd8dfefdddca8ce44988563f13ed3c4 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Jan 2009 14:35:04 -0800 Subject: x86/paravirt: selectively save/restore regs around pvops calls Impact: Optimization Each asm paravirt-ops call says what registers are available for clobbering. This patch makes use of this to selectively save/restore registers around each pvops call. In many cases this significantly shrinks code size. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 100 ++++++++++++++++++++++++++-------------- 1 file changed, 65 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 961d10c12f1..dcce961262b 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -12,19 +12,29 @@ #define CLBR_EAX (1 << 0) #define CLBR_ECX (1 << 1) #define CLBR_EDX (1 << 2) +#define CLBR_EDI (1 << 3) -#ifdef CONFIG_X86_64 -#define CLBR_RSI (1 << 3) -#define CLBR_RDI (1 << 4) +#ifdef CONFIG_X86_32 +/* CLBR_ANY should match all regs platform has. For i386, that's just it */ +#define CLBR_ANY ((1 << 4) - 1) +#else +#define CLBR_RAX CLBR_EAX +#define CLBR_RCX CLBR_ECX +#define CLBR_RDX CLBR_EDX +#define CLBR_RDI CLBR_EDI +#define CLBR_RSI (1 << 4) #define CLBR_R8 (1 << 5) #define CLBR_R9 (1 << 6) #define CLBR_R10 (1 << 7) #define CLBR_R11 (1 << 8) #define CLBR_ANY ((1 << 9) - 1) + +#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ + CLBR_RCX | CLBR_R8 | CLBR_R9) +#define CLBR_RET_REG (CLBR_RAX | CLBR_RDX) +#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) + #include -#else -/* CLBR_ANY should match all regs platform has. For i386, that's just it */ -#define CLBR_ANY ((1 << 3) - 1) #endif /* X86_64 */ #ifndef __ASSEMBLY__ @@ -1530,33 +1540,49 @@ static inline unsigned long __raw_local_irq_save(void) .popsection +#define COND_PUSH(set, mask, reg) \ + .if ((~set) & mask); push %reg; .endif +#define COND_POP(set, mask, reg) \ + .if ((~set) & mask); pop %reg; .endif + #ifdef CONFIG_X86_64 -#define PV_SAVE_REGS \ - push %rax; \ - push %rcx; \ - push %rdx; \ - push %rsi; \ - push %rdi; \ - push %r8; \ - push %r9; \ - push %r10; \ - push %r11 -#define PV_RESTORE_REGS \ - pop %r11; \ - pop %r10; \ - pop %r9; \ - pop %r8; \ - pop %rdi; \ - pop %rsi; \ - pop %rdx; \ - pop %rcx; \ - pop %rax + +#define PV_SAVE_REGS(set) \ + COND_PUSH(set, CLBR_RAX, rax); \ + COND_PUSH(set, CLBR_RCX, rcx); \ + COND_PUSH(set, CLBR_RDX, rdx); \ + COND_PUSH(set, CLBR_RSI, rsi); \ + COND_PUSH(set, CLBR_RDI, rdi); \ + COND_PUSH(set, CLBR_R8, r8); \ + COND_PUSH(set, CLBR_R9, r9); \ + COND_PUSH(set, CLBR_R10, r10); \ + COND_PUSH(set, CLBR_R11, r11) +#define PV_RESTORE_REGS(set) \ + COND_POP(set, CLBR_R11, r11); \ + COND_POP(set, CLBR_R10, r10); \ + COND_POP(set, CLBR_R9, r9); \ + COND_POP(set, CLBR_R8, r8); \ + COND_POP(set, CLBR_RDI, rdi); \ + COND_POP(set, CLBR_RSI, rsi); \ + COND_POP(set, CLBR_RDX, rdx); \ + COND_POP(set, CLBR_RCX, rcx); \ + COND_POP(set, CLBR_RAX, rax) + #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) #define PARA_INDIRECT(addr) *addr(%rip) #else -#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx -#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax +#define PV_SAVE_REGS(set) \ + COND_PUSH(set, CLBR_EAX, eax); \ + COND_PUSH(set, CLBR_EDI, edi); \ + COND_PUSH(set, CLBR_ECX, ecx); \ + COND_PUSH(set, CLBR_EDX, edx) +#define PV_RESTORE_REGS(set) \ + COND_POP(set, CLBR_EDX, edx); \ + COND_POP(set, CLBR_ECX, ecx); \ + COND_POP(set, CLBR_EDI, edi); \ + COND_POP(set, CLBR_EAX, eax) + #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) #define PARA_INDIRECT(addr) *%cs:addr @@ -1568,15 +1594,15 @@ static inline unsigned long __raw_local_irq_save(void) #define DISABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ - PV_SAVE_REGS; \ + PV_SAVE_REGS(clobbers); \ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ - PV_RESTORE_REGS;) \ + PV_RESTORE_REGS(clobbers);) #define ENABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ - PV_SAVE_REGS; \ + PV_SAVE_REGS(clobbers); \ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ - PV_RESTORE_REGS;) + PV_RESTORE_REGS(clobbers);) #define USERGS_SYSRET32 \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ @@ -1606,11 +1632,15 @@ static inline unsigned long __raw_local_irq_save(void) PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ swapgs) +/* + * Note: swapgs is very special, and in practise is either going to be + * implemented with a single "swapgs" instruction or something very + * special. Either way, we don't need to save any registers for + * it. + */ #define SWAPGS \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ - PV_SAVE_REGS; \ - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ - PV_RESTORE_REGS \ + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ ) #define GET_CR2_INTO_RCX \ -- cgit v1.2.3 From ecb93d1ccd0aac63f03be2db3cac3fa974716f4c Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Jan 2009 14:35:05 -0800 Subject: x86/paravirt: add register-saving thunks to reduce caller register pressure Impact: Optimization One of the problems with inserting a pile of C calls where previously there were none is that the register pressure is greatly increased. The C calling convention says that the caller must expect a certain set of registers may be trashed by the callee, and that the callee can use those registers without restriction. This includes the function argument registers, and several others. This patch seeks to alleviate this pressure by introducing wrapper thunks that will do the register saving/restoring, so that the callsite doesn't need to worry about it, but the callee function can be conventional compiler-generated code. In many cases (particularly performance-sensitive cases) the callee will be in assembler anyway, and need not use the compiler's calling convention. Standard calling convention is: arguments return scratch x86-32 eax edx ecx eax ? x86-64 rdi rsi rdx rcx rax r8 r9 r10 r11 The thunk preserves all argument and scratch registers. The return register is not preserved, and is available as a scratch register for unwrapped callee code (and of course the return value). Wrapped function pointers are themselves wrapped in a struct paravirt_callee_save structure, in order to get some warning from the compiler when functions with mismatched calling conventions are used. The most common paravirt ops, both statically and dynamically, are interrupt enable/disable/save/restore, so handle them first. This is particularly easy since their calls are handled specially anyway. XXX Deal with VMI. What's their calling convention? Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 126 +++++++++++++++++++++++++++++++--------- arch/x86/kernel/paravirt.c | 8 +-- arch/x86/kernel/vsmp_64.c | 12 ++-- arch/x86/lguest/boot.c | 13 +++-- arch/x86/xen/enlighten.c | 8 +-- arch/x86/xen/irq.c | 14 +++-- 6 files changed, 132 insertions(+), 49 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index dcce961262b..f9107b88631 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -17,6 +17,10 @@ #ifdef CONFIG_X86_32 /* CLBR_ANY should match all regs platform has. For i386, that's just it */ #define CLBR_ANY ((1 << 4) - 1) + +#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) +#define CLBR_RET_REG (CLBR_EAX) +#define CLBR_SCRATCH (0) #else #define CLBR_RAX CLBR_EAX #define CLBR_RCX CLBR_ECX @@ -27,16 +31,19 @@ #define CLBR_R9 (1 << 6) #define CLBR_R10 (1 << 7) #define CLBR_R11 (1 << 8) + #define CLBR_ANY ((1 << 9) - 1) #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ CLBR_RCX | CLBR_R8 | CLBR_R9) -#define CLBR_RET_REG (CLBR_RAX | CLBR_RDX) +#define CLBR_RET_REG (CLBR_RAX) #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) #include #endif /* X86_64 */ +#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) + #ifndef __ASSEMBLY__ #include #include @@ -50,6 +57,14 @@ struct tss_struct; struct mm_struct; struct desc_struct; +/* + * Wrapper type for pointers to code which uses the non-standard + * calling convention. See PV_CALL_SAVE_REGS_THUNK below. + */ +struct paravirt_callee_save { + void *func; +}; + /* general info */ struct pv_info { unsigned int kernel_rpl; @@ -199,11 +214,15 @@ struct pv_irq_ops { * expected to use X86_EFLAGS_IF; all other bits * returned from save_fl are undefined, and may be ignored by * restore_fl. + * + * NOTE: These functions callers expect the callee to preserve + * more registers than the standard C calling convention. */ - unsigned long (*save_fl)(void); - void (*restore_fl)(unsigned long); - void (*irq_disable)(void); - void (*irq_enable)(void); + struct paravirt_callee_save save_fl; + struct paravirt_callee_save restore_fl; + struct paravirt_callee_save irq_disable; + struct paravirt_callee_save irq_enable; + void (*safe_halt)(void); void (*halt)(void); @@ -1437,12 +1456,37 @@ extern struct paravirt_patch_site __parainstructions[], __parainstructions_end[]; #ifdef CONFIG_X86_32 -#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" -#define PV_RESTORE_REGS "popl %%edx; popl %%ecx" +#define PV_SAVE_REGS "pushl %ecx; pushl %edx;" +#define PV_RESTORE_REGS "popl %edx; popl %ecx;" + +/* save and restore all caller-save registers, except return value */ +#define PV_SAVE_ALL_CALLER_REGS PV_SAVE_REGS +#define PV_RESTORE_ALL_CALLER_REGS PV_RESTORE_REGS + #define PV_FLAGS_ARG "0" #define PV_EXTRA_CLOBBERS #define PV_VEXTRA_CLOBBERS #else +/* save and restore all caller-save registers, except return value */ +#define PV_SAVE_ALL_CALLER_REGS \ + "push %rcx;" \ + "push %rdx;" \ + "push %rsi;" \ + "push %rdi;" \ + "push %r8;" \ + "push %r9;" \ + "push %r10;" \ + "push %r11;" +#define PV_RESTORE_ALL_CALLER_REGS \ + "pop %r11;" \ + "pop %r10;" \ + "pop %r9;" \ + "pop %r8;" \ + "pop %rdi;" \ + "pop %rsi;" \ + "pop %rdx;" \ + "pop %rcx;" + /* We save some registers, but all of them, that's too much. We clobber all * caller saved registers but the argument parameter */ #define PV_SAVE_REGS "pushq %%rdi;" @@ -1452,52 +1496,76 @@ extern struct paravirt_patch_site __parainstructions[], #define PV_FLAGS_ARG "D" #endif +/* + * Generate a thunk around a function which saves all caller-save + * registers except for the return value. This allows C functions to + * be called from assembler code where fewer than normal registers are + * available. It may also help code generation around calls from C + * code if the common case doesn't use many registers. + * + * When a callee is wrapped in a thunk, the caller can assume that all + * arg regs and all scratch registers are preserved across the + * call. The return value in rax/eax will not be saved, even for void + * functions. + */ +#define PV_CALLEE_SAVE_REGS_THUNK(func) \ + extern typeof(func) __raw_callee_save_##func; \ + static void *__##func##__ __used = func; \ + \ + asm(".pushsection .text;" \ + "__raw_callee_save_" #func ": " \ + PV_SAVE_ALL_CALLER_REGS \ + "call " #func ";" \ + PV_RESTORE_ALL_CALLER_REGS \ + "ret;" \ + ".popsection") + +/* Get a reference to a callee-save function */ +#define PV_CALLEE_SAVE(func) \ + ((struct paravirt_callee_save) { __raw_callee_save_##func }) + +/* Promise that "func" already uses the right calling convention */ +#define __PV_IS_CALLEE_SAVE(func) \ + ((struct paravirt_callee_save) { func }) + static inline unsigned long __raw_local_save_flags(void) { unsigned long f; - asm volatile(paravirt_alt(PV_SAVE_REGS - PARAVIRT_CALL - PV_RESTORE_REGS) + asm volatile(paravirt_alt(PARAVIRT_CALL) : "=a"(f) : paravirt_type(pv_irq_ops.save_fl), paravirt_clobber(CLBR_EAX) - : "memory", "cc" PV_VEXTRA_CLOBBERS); + : "memory", "cc"); return f; } static inline void raw_local_irq_restore(unsigned long f) { - asm volatile(paravirt_alt(PV_SAVE_REGS - PARAVIRT_CALL - PV_RESTORE_REGS) + asm volatile(paravirt_alt(PARAVIRT_CALL) : "=a"(f) : PV_FLAGS_ARG(f), paravirt_type(pv_irq_ops.restore_fl), paravirt_clobber(CLBR_EAX) - : "memory", "cc" PV_EXTRA_CLOBBERS); + : "memory", "cc"); } static inline void raw_local_irq_disable(void) { - asm volatile(paravirt_alt(PV_SAVE_REGS - PARAVIRT_CALL - PV_RESTORE_REGS) + asm volatile(paravirt_alt(PARAVIRT_CALL) : : paravirt_type(pv_irq_ops.irq_disable), paravirt_clobber(CLBR_EAX) - : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); + : "memory", "eax", "cc"); } static inline void raw_local_irq_enable(void) { - asm volatile(paravirt_alt(PV_SAVE_REGS - PARAVIRT_CALL - PV_RESTORE_REGS) + asm volatile(paravirt_alt(PARAVIRT_CALL) : : paravirt_type(pv_irq_ops.irq_enable), paravirt_clobber(CLBR_EAX) - : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); + : "memory", "eax", "cc"); } static inline unsigned long __raw_local_irq_save(void) @@ -1541,9 +1609,9 @@ static inline unsigned long __raw_local_irq_save(void) #define COND_PUSH(set, mask, reg) \ - .if ((~set) & mask); push %reg; .endif + .if ((~(set)) & mask); push %reg; .endif #define COND_POP(set, mask, reg) \ - .if ((~set) & mask); pop %reg; .endif + .if ((~(set)) & mask); pop %reg; .endif #ifdef CONFIG_X86_64 @@ -1594,15 +1662,15 @@ static inline unsigned long __raw_local_irq_save(void) #define DISABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ - PV_SAVE_REGS(clobbers); \ + PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ - PV_RESTORE_REGS(clobbers);) + PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) #define ENABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ - PV_SAVE_REGS(clobbers); \ + PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ - PV_RESTORE_REGS(clobbers);) + PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) #define USERGS_SYSRET32 \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index dd25e2b1593..8adb6b5aa42 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -310,10 +310,10 @@ struct pv_time_ops pv_time_ops = { struct pv_irq_ops pv_irq_ops = { .init_IRQ = native_init_IRQ, - .save_fl = native_save_fl, - .restore_fl = native_restore_fl, - .irq_disable = native_irq_disable, - .irq_enable = native_irq_enable, + .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), + .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), + .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), + .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), .safe_halt = native_safe_halt, .halt = native_halt, #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index a688f3bfaec..c609205df59 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void) flags &= ~X86_EFLAGS_IF; return flags; } +PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); static void vsmp_restore_fl(unsigned long flags) { @@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags) flags |= X86_EFLAGS_AC; native_restore_fl(flags); } +PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); static void vsmp_irq_disable(void) { @@ -53,6 +55,7 @@ static void vsmp_irq_disable(void) native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); } +PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); static void vsmp_irq_enable(void) { @@ -60,6 +63,7 @@ static void vsmp_irq_enable(void) native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); } +PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable); static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, unsigned long addr, unsigned len) @@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void) cap, ctl); if (cap & ctl & (1 << 4)) { /* Setup irq ops and turn on vSMP IRQ fastpath handling */ - pv_irq_ops.irq_disable = vsmp_irq_disable; - pv_irq_ops.irq_enable = vsmp_irq_enable; - pv_irq_ops.save_fl = vsmp_save_fl; - pv_irq_ops.restore_fl = vsmp_restore_fl; + pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); + pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); + pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); + pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); pv_init_ops.patch = vsmp_patch; ctl &= ~(1 << 4); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 92f1c6f3e19..19e33b6cd59 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -173,24 +173,29 @@ static unsigned long save_fl(void) { return lguest_data.irq_enabled; } +PV_CALLEE_SAVE_REGS_THUNK(save_fl); /* restore_flags() just sets the flags back to the value given. */ static void restore_fl(unsigned long flags) { lguest_data.irq_enabled = flags; } +PV_CALLEE_SAVE_REGS_THUNK(restore_fl); /* Interrupts go off... */ static void irq_disable(void) { lguest_data.irq_enabled = 0; } +PV_CALLEE_SAVE_REGS_THUNK(irq_disable); /* Interrupts go on... */ static void irq_enable(void) { lguest_data.irq_enabled = X86_EFLAGS_IF; } +PV_CALLEE_SAVE_REGS_THUNK(irq_enable); + /*:*/ /*M:003 Note that we don't check for outstanding interrupts when we re-enable * them (or when we unmask an interrupt). This seems to work for the moment, @@ -984,10 +989,10 @@ __init void lguest_init(void) /* interrupt-related operations */ pv_irq_ops.init_IRQ = lguest_init_IRQ; - pv_irq_ops.save_fl = save_fl; - pv_irq_ops.restore_fl = restore_fl; - pv_irq_ops.irq_disable = irq_disable; - pv_irq_ops.irq_enable = irq_enable; + pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); + pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl); + pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable); + pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable); pv_irq_ops.safe_halt = lguest_safe_halt; /* init-time operations */ diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0cd2a165f17..ff6d530ccc7 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -676,10 +676,10 @@ void xen_setup_vcpu_info_placement(void) if (have_vcpu_info_placement) { printk(KERN_INFO "Xen: using vcpu_info placement\n"); - pv_irq_ops.save_fl = xen_save_fl_direct; - pv_irq_ops.restore_fl = xen_restore_fl_direct; - pv_irq_ops.irq_disable = xen_irq_disable_direct; - pv_irq_ops.irq_enable = xen_irq_enable_direct; + pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); + pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); + pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); + pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); pv_mmu_ops.read_cr2 = xen_read_cr2_direct; } } diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 2e8271431e1..5a070900ad3 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -50,6 +50,7 @@ static unsigned long xen_save_fl(void) */ return (-flags) & X86_EFLAGS_IF; } +PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); static void xen_restore_fl(unsigned long flags) { @@ -76,6 +77,7 @@ static void xen_restore_fl(unsigned long flags) xen_force_evtchn_callback(); } } +PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); static void xen_irq_disable(void) { @@ -86,6 +88,7 @@ static void xen_irq_disable(void) percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; preempt_enable_no_resched(); } +PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); static void xen_irq_enable(void) { @@ -106,6 +109,7 @@ static void xen_irq_enable(void) if (unlikely(vcpu->evtchn_upcall_pending)) xen_force_evtchn_callback(); } +PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable); static void xen_safe_halt(void) { @@ -124,10 +128,12 @@ static void xen_halt(void) static const struct pv_irq_ops xen_irq_ops __initdata = { .init_IRQ = __xen_init_IRQ, - .save_fl = xen_save_fl, - .restore_fl = xen_restore_fl, - .irq_disable = xen_irq_disable, - .irq_enable = xen_irq_enable, + + .save_fl = PV_CALLEE_SAVE(xen_save_fl), + .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), + .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), + .irq_enable = PV_CALLEE_SAVE(xen_irq_enable), + .safe_halt = xen_safe_halt, .halt = xen_halt, #ifdef CONFIG_X86_64 -- cgit v1.2.3 From 791bad9d28d405d9397ea0c370ffb7c7bdd2aa6e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Jan 2009 14:35:06 -0800 Subject: x86/paravirt: implement PVOP_CALL macros for callee-save functions Impact: Optimization Functions with the callee save calling convention clobber many fewer registers than the normal C calling convention. Implement variants of PVOP_V?CALL* accordingly. This only bothers with functions up to 3 args, since functions with more args may as well use the normal calling convention. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 134 +++++++++++++++++++++++++++++----------- 1 file changed, 99 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index f9107b88631..beb10ecdbe6 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -510,25 +510,45 @@ int paravirt_disable_iospace(void); * makes sure the incoming and outgoing types are always correct. */ #ifdef CONFIG_X86_32 -#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx +#define PVOP_VCALL_ARGS \ + unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx #define PVOP_CALL_ARGS PVOP_VCALL_ARGS + +#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) +#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) +#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) + #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ "=c" (__ecx) #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS + +#define PVOP_VCALLEE_CLOBBERS "=a" (__eax) +#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS + #define EXTRA_CLOBBERS #define VEXTRA_CLOBBERS -#else -#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx +#else /* CONFIG_X86_64 */ +#define PVOP_VCALL_ARGS \ + unsigned long __edi = __edi, __esi = __esi, \ + __edx = __edx, __ecx = __ecx #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax + +#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) +#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) +#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) +#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) + #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ "=S" (__esi), "=d" (__edx), \ "=c" (__ecx) - #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) +#define PVOP_VCALLEE_CLOBBERS "=a" (__eax) +#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS + #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" -#endif +#endif /* CONFIG_X86_32 */ #ifdef CONFIG_PARAVIRT_DEBUG #define PVOP_TEST_NULL(op) BUG_ON(op == NULL) @@ -536,10 +556,11 @@ int paravirt_disable_iospace(void); #define PVOP_TEST_NULL(op) ((void)op) #endif -#define __PVOP_CALL(rettype, op, pre, post, ...) \ +#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ + pre, post, ...) \ ({ \ rettype __ret; \ - PVOP_CALL_ARGS; \ + PVOP_CALL_ARGS; \ PVOP_TEST_NULL(op); \ /* This is 32-bit specific, but is okay in 64-bit */ \ /* since this condition will never hold */ \ @@ -547,70 +568,113 @@ int paravirt_disable_iospace(void); asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : PVOP_CALL_CLOBBERS \ + : call_clbr \ : paravirt_type(op), \ - paravirt_clobber(CLBR_ANY), \ + paravirt_clobber(clbr), \ ##__VA_ARGS__ \ - : "memory", "cc" EXTRA_CLOBBERS); \ + : "memory", "cc" extra_clbr); \ __ret = (rettype)((((u64)__edx) << 32) | __eax); \ } else { \ asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : PVOP_CALL_CLOBBERS \ + : call_clbr \ : paravirt_type(op), \ - paravirt_clobber(CLBR_ANY), \ + paravirt_clobber(clbr), \ ##__VA_ARGS__ \ - : "memory", "cc" EXTRA_CLOBBERS); \ + : "memory", "cc" extra_clbr); \ __ret = (rettype)__eax; \ } \ __ret; \ }) -#define __PVOP_VCALL(op, pre, post, ...) \ + +#define __PVOP_CALL(rettype, op, pre, post, ...) \ + ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ + EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) + +#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ + ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ + PVOP_CALLEE_CLOBBERS, , \ + pre, post, ##__VA_ARGS__) + + +#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ ({ \ PVOP_VCALL_ARGS; \ PVOP_TEST_NULL(op); \ asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : PVOP_VCALL_CLOBBERS \ + : call_clbr \ : paravirt_type(op), \ - paravirt_clobber(CLBR_ANY), \ + paravirt_clobber(clbr), \ ##__VA_ARGS__ \ - : "memory", "cc" VEXTRA_CLOBBERS); \ + : "memory", "cc" extra_clbr); \ }) +#define __PVOP_VCALL(op, pre, post, ...) \ + ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ + VEXTRA_CLOBBERS, \ + pre, post, ##__VA_ARGS__) + +#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ + ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ + PVOP_VCALLEE_CLOBBERS, , \ + pre, post, ##__VA_ARGS__) + + + #define PVOP_CALL0(rettype, op) \ __PVOP_CALL(rettype, op, "", "") #define PVOP_VCALL0(op) \ __PVOP_VCALL(op, "", "") +#define PVOP_CALLEE0(rettype, op) \ + __PVOP_CALLEESAVE(rettype, op, "", "") +#define PVOP_VCALLEE0(op) \ + __PVOP_VCALLEESAVE(op, "", "") + + #define PVOP_CALL1(rettype, op, arg1) \ - __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) + __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) #define PVOP_VCALL1(op, arg1) \ - __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) + __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) + +#define PVOP_CALLEE1(rettype, op, arg1) \ + __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) +#define PVOP_VCALLEE1(op, arg1) \ + __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) + #define PVOP_CALL2(rettype, op, arg1, arg2) \ - __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ - "1" ((unsigned long)(arg2))) + __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2)) #define PVOP_VCALL2(op, arg1, arg2) \ - __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ - "1" ((unsigned long)(arg2))) + __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2)) + +#define PVOP_CALLEE2(rettype, op, arg1, arg2) \ + __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2)) +#define PVOP_VCALLEE2(op, arg1, arg2) \ + __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2)) + #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ - __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ - "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) + __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) #define PVOP_VCALL3(op, arg1, arg2, arg3) \ - __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ - "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) + __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) /* This is the only difference in x86_64. We can make it much simpler */ #ifdef CONFIG_X86_32 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ __PVOP_CALL(rettype, op, \ "push %[_arg4];", "lea 4(%%esp),%%esp;", \ - "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ - "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) + PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ + PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ __PVOP_VCALL(op, \ "push %[_arg4];", "lea 4(%%esp),%%esp;", \ @@ -618,13 +682,13 @@ int paravirt_disable_iospace(void); "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) #else #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ - __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ - "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ - "3"((unsigned long)(arg4))) + __PVOP_CALL(rettype, op, "", "", \ + PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ + PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ - __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ - "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ - "3"((unsigned long)(arg4))) + __PVOP_VCALL(op, "", "", \ + PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ + PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) #endif static inline int paravirt_enabled(void) -- cgit v1.2.3 From da5de7c22eb705be709a57e486e7475a6969b994 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Jan 2009 14:35:07 -0800 Subject: x86/paravirt: use callee-saved convention for pte_val/make_pte/etc Impact: Optimization In the native case, pte_val, make_pte, etc are all just identity functions, so there's no need to clobber a lot of registers over them. (This changes the 32-bit callee-save calling convention to return both EAX and EDX so functions can return 64-bit values.) Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 78 ++++++++++++++++++++--------------------- arch/x86/kernel/paravirt.c | 53 +++++++--------------------- arch/x86/xen/mmu.c | 24 ++++++++----- 3 files changed, 67 insertions(+), 88 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index beb10ecdbe6..2d098b78bc1 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -19,7 +19,7 @@ #define CLBR_ANY ((1 << 4) - 1) #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) -#define CLBR_RET_REG (CLBR_EAX) +#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) #define CLBR_SCRATCH (0) #else #define CLBR_RAX CLBR_EAX @@ -308,11 +308,11 @@ struct pv_mmu_ops { void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); - pteval_t (*pte_val)(pte_t); - pte_t (*make_pte)(pteval_t pte); + struct paravirt_callee_save pte_val; + struct paravirt_callee_save make_pte; - pgdval_t (*pgd_val)(pgd_t); - pgd_t (*make_pgd)(pgdval_t pgd); + struct paravirt_callee_save pgd_val; + struct paravirt_callee_save make_pgd; #if PAGETABLE_LEVELS >= 3 #ifdef CONFIG_X86_PAE @@ -327,12 +327,12 @@ struct pv_mmu_ops { void (*set_pud)(pud_t *pudp, pud_t pudval); - pmdval_t (*pmd_val)(pmd_t); - pmd_t (*make_pmd)(pmdval_t pmd); + struct paravirt_callee_save pmd_val; + struct paravirt_callee_save make_pmd; #if PAGETABLE_LEVELS == 4 - pudval_t (*pud_val)(pud_t); - pud_t (*make_pud)(pudval_t pud); + struct paravirt_callee_save pud_val; + struct paravirt_callee_save make_pud; void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); #endif /* PAGETABLE_LEVELS == 4 */ @@ -1155,13 +1155,13 @@ static inline pte_t __pte(pteval_t val) pteval_t ret; if (sizeof(pteval_t) > sizeof(long)) - ret = PVOP_CALL2(pteval_t, - pv_mmu_ops.make_pte, - val, (u64)val >> 32); + ret = PVOP_CALLEE2(pteval_t, + pv_mmu_ops.make_pte, + val, (u64)val >> 32); else - ret = PVOP_CALL1(pteval_t, - pv_mmu_ops.make_pte, - val); + ret = PVOP_CALLEE1(pteval_t, + pv_mmu_ops.make_pte, + val); return (pte_t) { .pte = ret }; } @@ -1171,11 +1171,11 @@ static inline pteval_t pte_val(pte_t pte) pteval_t ret; if (sizeof(pteval_t) > sizeof(long)) - ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, - pte.pte, (u64)pte.pte >> 32); + ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val, + pte.pte, (u64)pte.pte >> 32); else - ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val, - pte.pte); + ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val, + pte.pte); return ret; } @@ -1185,11 +1185,11 @@ static inline pgd_t __pgd(pgdval_t val) pgdval_t ret; if (sizeof(pgdval_t) > sizeof(long)) - ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, - val, (u64)val >> 32); + ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd, + val, (u64)val >> 32); else - ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, - val); + ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd, + val); return (pgd_t) { ret }; } @@ -1199,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd) pgdval_t ret; if (sizeof(pgdval_t) > sizeof(long)) - ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, - pgd.pgd, (u64)pgd.pgd >> 32); + ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val, + pgd.pgd, (u64)pgd.pgd >> 32); else - ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, - pgd.pgd); + ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val, + pgd.pgd); return ret; } @@ -1267,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val) pmdval_t ret; if (sizeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, - val, (u64)val >> 32); + ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd, + val, (u64)val >> 32); else - ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, - val); + ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd, + val); return (pmd_t) { ret }; } @@ -1281,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd) pmdval_t ret; if (sizeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, - pmd.pmd, (u64)pmd.pmd >> 32); + ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val, + pmd.pmd, (u64)pmd.pmd >> 32); else - ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, - pmd.pmd); + ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val, + pmd.pmd); return ret; } @@ -1307,11 +1307,11 @@ static inline pud_t __pud(pudval_t val) pudval_t ret; if (sizeof(pudval_t) > sizeof(long)) - ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, - val, (u64)val >> 32); + ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud, + val, (u64)val >> 32); else - ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, - val); + ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud, + val); return (pud_t) { ret }; } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 8adb6b5aa42..cea11c8e304 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -391,43 +391,12 @@ struct pv_apic_ops pv_apic_ops = { #endif }; -typedef pte_t make_pte_t(pteval_t); -typedef pmd_t make_pmd_t(pmdval_t); -typedef pud_t make_pud_t(pudval_t); -typedef pgd_t make_pgd_t(pgdval_t); - -typedef pteval_t pte_val_t(pte_t); -typedef pmdval_t pmd_val_t(pmd_t); -typedef pudval_t pud_val_t(pud_t); -typedef pgdval_t pgd_val_t(pgd_t); - - #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) /* 32-bit pagetable entries */ -#define paravirt_native_make_pte (make_pte_t *)_paravirt_ident_32 -#define paravirt_native_pte_val (pte_val_t *)_paravirt_ident_32 - -#define paravirt_native_make_pmd (make_pmd_t *)_paravirt_ident_32 -#define paravirt_native_pmd_val (pmd_val_t *)_paravirt_ident_32 - -#define paravirt_native_make_pud (make_pud_t *)_paravirt_ident_32 -#define paravirt_native_pud_val (pud_val_t *)_paravirt_ident_32 - -#define paravirt_native_make_pgd (make_pgd_t *)_paravirt_ident_32 -#define paravirt_native_pgd_val (pgd_val_t *)_paravirt_ident_32 +#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) #else /* 64-bit pagetable entries */ -#define paravirt_native_make_pte (make_pte_t *)_paravirt_ident_64 -#define paravirt_native_pte_val (pte_val_t *)_paravirt_ident_64 - -#define paravirt_native_make_pmd (make_pmd_t *)_paravirt_ident_64 -#define paravirt_native_pmd_val (pmd_val_t *)_paravirt_ident_64 - -#define paravirt_native_make_pud (make_pud_t *)_paravirt_ident_64 -#define paravirt_native_pud_val (pud_val_t *)_paravirt_ident_64 - -#define paravirt_native_make_pgd (make_pgd_t *)_paravirt_ident_64 -#define paravirt_native_pgd_val (pgd_val_t *)_paravirt_ident_64 +#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) #endif struct pv_mmu_ops pv_mmu_ops = { @@ -481,21 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = { .pmd_clear = native_pmd_clear, #endif .set_pud = native_set_pud, - .pmd_val = paravirt_native_pmd_val, - .make_pmd = paravirt_native_make_pmd, + + .pmd_val = PTE_IDENT, + .make_pmd = PTE_IDENT, #if PAGETABLE_LEVELS == 4 - .pud_val = paravirt_native_pud_val, - .make_pud = paravirt_native_make_pud, + .pud_val = PTE_IDENT, + .make_pud = PTE_IDENT, + .set_pgd = native_set_pgd, #endif #endif /* PAGETABLE_LEVELS >= 3 */ - .pte_val = paravirt_native_pte_val, - .pgd_val = paravirt_native_pgd_val, + .pte_val = PTE_IDENT, + .pgd_val = PTE_IDENT, - .make_pte = paravirt_native_make_pte, - .make_pgd = paravirt_native_make_pgd, + .make_pte = PTE_IDENT, + .make_pgd = PTE_IDENT, .dup_mmap = paravirt_nop, .exit_mmap = paravirt_nop, diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 94e452c0b00..5e41f7fc6cf 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -492,28 +492,33 @@ pteval_t xen_pte_val(pte_t pte) { return pte_mfn_to_pfn(pte.pte); } +PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); pgdval_t xen_pgd_val(pgd_t pgd) { return pte_mfn_to_pfn(pgd.pgd); } +PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); pte_t xen_make_pte(pteval_t pte) { pte = pte_pfn_to_mfn(pte); return native_make_pte(pte); } +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); return native_make_pgd(pgd); } +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); pmdval_t xen_pmd_val(pmd_t pmd) { return pte_mfn_to_pfn(pmd.pmd); } +PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); void xen_set_pud_hyper(pud_t *ptr, pud_t val) { @@ -590,12 +595,14 @@ pmd_t xen_make_pmd(pmdval_t pmd) pmd = pte_pfn_to_mfn(pmd); return native_make_pmd(pmd); } +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); #if PAGETABLE_LEVELS == 4 pudval_t xen_pud_val(pud_t pud) { return pte_mfn_to_pfn(pud.pud); } +PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); pud_t xen_make_pud(pudval_t pud) { @@ -603,6 +610,7 @@ pud_t xen_make_pud(pudval_t pud) return native_make_pud(pud); } +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); pgd_t *xen_get_user_pgd(pgd_t *pgd) { @@ -1813,11 +1821,11 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = { .ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_commit = __ptep_modify_prot_commit, - .pte_val = xen_pte_val, - .pgd_val = xen_pgd_val, + .pte_val = PV_CALLEE_SAVE(xen_pte_val), + .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), - .make_pte = xen_make_pte, - .make_pgd = xen_make_pgd, + .make_pte = PV_CALLEE_SAVE(xen_make_pte), + .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), #ifdef CONFIG_X86_PAE .set_pte_atomic = xen_set_pte_atomic, @@ -1827,12 +1835,12 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = { #endif /* CONFIG_X86_PAE */ .set_pud = xen_set_pud_hyper, - .make_pmd = xen_make_pmd, - .pmd_val = xen_pmd_val, + .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), + .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), #if PAGETABLE_LEVELS == 4 - .pud_val = xen_pud_val, - .make_pud = xen_make_pud, + .pud_val = PV_CALLEE_SAVE(xen_pud_val), + .make_pud = PV_CALLEE_SAVE(xen_make_pud), .set_pgd = xen_set_pgd_hyper, .alloc_pud = xen_alloc_pte_init, -- cgit v1.2.3 From 4767afbf1f60f73997a7eb69a86d380f1fb27a92 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 29 Jan 2009 01:51:34 -0800 Subject: x86/paravirt: fix missing callee-save call on pud_val Impact: Fix build when CONFIG_PARAVIRT_DEBUG is enabled Fix missed convertion to using callee-saved calls for pud_val, which causes a compile error when CONFIG_PARAVIRT_DEBUG is enabled. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 2d098b78bc1..b17365c3974 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -1321,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud) pudval_t ret; if (sizeof(pudval_t) > sizeof(long)) - ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, - pud.pud, (u64)pud.pud >> 32); + ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val, + pud.pud, (u64)pud.pud >> 32); else - ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, - pud.pud); + ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val, + pud.pud); return ret; } -- cgit v1.2.3 From 193c81b979adbc4a540bf89e75b9039fae75bf82 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:23:27 +0100 Subject: x86, irq: add LOCAL_PERF_VECTOR Add a slot for the performance monitoring interrupt. Not yet used by any subsystem - but the hardware has it. (This eases integration with performance monitoring code.) Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 9a83a10a5d5..0e2220bb314 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -88,6 +88,11 @@ */ #define LOCAL_TIMER_VECTOR 0xef +/* + * Performance monitoring interrupt vector: + */ +#define LOCAL_PERF_VECTOR 0xee + /* * First APIC vector available to drivers: (vectors 0x30-0xee) we * start at 0x31(0x41) to spread out vectors evenly between priority -- cgit v1.2.3 From d1de36f5b5a30b8f9dae7142516fb122ce1e0661 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 01:59:14 +0100 Subject: x86, apic: clean up header section Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 46 ++++++++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 968c817762a..29e7186b0e8 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -14,43 +14,41 @@ * Mikael Pettersson : PM converted to driver model. */ -#include - -#include -#include -#include -#include -#include #include -#include -#include -#include -#include +#include #include +#include +#include +#include +#include +#include #include -#include +#include +#include +#include #include -#include -#include +#include +#include +#include #include -#include +#include +#include -#include -#include -#include -#include #include -#include #include +#include +#include +#include #include -#include +#include #include #include -#include +#include +#include +#include +#include #include -#include - /* * Sanity check */ -- cgit v1.2.3 From 8f47e16348e8e25eedf639092a8a2f10a66aba34 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:03:42 +0100 Subject: x86: update copyrights Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 2 +- arch/x86/kernel/io_apic.c | 2 +- arch/x86/kernel/mpparse.c | 2 +- arch/x86/kernel/smp.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- arch/x86/kernel/stacktrace.c | 2 +- arch/x86/mm/mmap.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 29e7186b0e8..d6da6dd2f60 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1,7 +1,7 @@ /* * Local APIC handling, local APIC timers * - * (c) 1999, 2000 Ingo Molnar + * (c) 1999, 2000, 2009 Ingo Molnar * * Fixes * Maciej W. Rozycki : Bits for genuine 82489DX APICs; diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 3378ffb2140..57d60c741e3 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1,7 +1,7 @@ /* * Intel IO-APIC support for multi-Pentium hosts. * - * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo + * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo * * Many thanks to Stig Venaas for trying out countless experimental * patches and reporting/debugging problems patiently! diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index b46ca7d31fe..66ebb823f39 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -3,7 +3,7 @@ * compliant MP-table parsing routines. * * (c) 1995 Alan Cox, Building #3 - * (c) 1998, 1999, 2000 Ingo Molnar + * (c) 1998, 1999, 2000, 2009 Ingo Molnar * (c) 2008 Alexey Starikovskiy */ diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 0eb32ae9bf1..eaaffae31cc 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -2,7 +2,7 @@ * Intel SMP support routines. * * (c) 1995 Alan Cox, Building #3 - * (c) 1998-99, 2000 Ingo Molnar + * (c) 1998-99, 2000, 2009 Ingo Molnar * (c) 2002,2003 Andi Kleen, SuSE Labs. * * i386 and x86_64 integration by Glauber Costa diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 1268a862abb..f40f86fec2f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -2,7 +2,7 @@ * x86 SMP booting functions * * (c) 1995 Alan Cox, Building #3 - * (c) 1998, 1999, 2000 Ingo Molnar + * (c) 1998, 1999, 2000, 2009 Ingo Molnar * Copyright 2001 Andi Kleen, SuSE Labs. * * Much of the core SMP work is based on previous work by Thomas Radke, to diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 10786af9554..f7bddc2e37d 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -1,7 +1,7 @@ /* * Stack trace management functions * - * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar */ #include #include diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 56fe7124fbe..16582960056 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -4,7 +4,7 @@ * Based on code by Ingo Molnar and Andi Kleen, copyrighted * as follows: * - * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. + * Copyright 2003-2009 Red Hat Inc. * All Rights Reserved. * Copyright 2005 Andi Kleen, SUSE Labs. * Copyright 2007 Jiri Kosina, SUSE Labs. -- cgit v1.2.3 From 5da690d29f0de17cc1835dd3eb8f8bd0945521f0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:10:03 +0100 Subject: x86, apic: unify the APIC vector enumeration Most of the vector layout on 32-bit and 64-bit is identical now, so eliminate the duplicated enumeration of the vectors. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 35 ++++++++++++----------------------- 1 file changed, 12 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 0e2220bb314..393f85ecdd8 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -50,37 +50,26 @@ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. */ -#ifdef CONFIG_X86_32 - -# define SPURIOUS_APIC_VECTOR 0xff -# define ERROR_APIC_VECTOR 0xfe -# define RESCHEDULE_VECTOR 0xfd -# define CALL_FUNCTION_VECTOR 0xfc -# define CALL_FUNCTION_SINGLE_VECTOR 0xfb -# define THERMAL_APIC_VECTOR 0xfa -/* 0xf8 - 0xf9 : free */ -# define INVALIDATE_TLB_VECTOR_END 0xf7 -# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ -# define NUM_INVALIDATE_TLB_VECTORS 8 +#define SPURIOUS_APIC_VECTOR 0xff +#define ERROR_APIC_VECTOR 0xfe +#define RESCHEDULE_VECTOR 0xfd +#define CALL_FUNCTION_VECTOR 0xfc +#define CALL_FUNCTION_SINGLE_VECTOR 0xfb +#define THERMAL_APIC_VECTOR 0xfa +#ifdef CONFIG_X86_32 +/* 0xf8 - 0xf9 : free */ #else - -# define SPURIOUS_APIC_VECTOR 0xff -# define ERROR_APIC_VECTOR 0xfe -# define RESCHEDULE_VECTOR 0xfd -# define CALL_FUNCTION_VECTOR 0xfc -# define CALL_FUNCTION_SINGLE_VECTOR 0xfb -# define THERMAL_APIC_VECTOR 0xfa # define THRESHOLD_APIC_VECTOR 0xf9 # define UV_BAU_MESSAGE 0xf8 -# define INVALIDATE_TLB_VECTOR_END 0xf7 -# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ +#endif +/* f0-f7 used for spreading out TLB flushes: */ +#define INVALIDATE_TLB_VECTOR_END 0xf7 +#define INVALIDATE_TLB_VECTOR_START 0xf0 #define NUM_INVALIDATE_TLB_VECTORS 8 -#endif - /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ -- cgit v1.2.3 From 647ad94fc0479e33958cb4d0e20e241c0bcf599c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:06:50 +0100 Subject: x86, apic: clean up spurious vector sanity check Move the spurious vector sanity check to the place where it's defined - out of a .c file. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 7 +++++++ arch/x86/kernel/apic.c | 7 ------- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 393f85ecdd8..2601fd108c7 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -52,6 +52,13 @@ */ #define SPURIOUS_APIC_VECTOR 0xff +/* + * Sanity check + */ +#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F) +# error SPURIOUS_APIC_VECTOR definition error +#endif + #define ERROR_APIC_VECTOR 0xfe #define RESCHEDULE_VECTOR 0xfd #define CALL_FUNCTION_VECTOR 0xfc diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index d6da6dd2f60..85d8b50d1af 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -49,13 +49,6 @@ #include #include -/* - * Sanity check - */ -#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F) -# error SPURIOUS_APIC_VECTOR definition error -#endif - unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; /* Processor that is doing the boot up */ -- cgit v1.2.3 From ed74ca6d5a3e57eb0969d4e14e46cf9f88d25d3f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:16:04 +0100 Subject: x86, voyager: move Voyager-specific defines to voyager.h They dont belong into the generic headers. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/hw_irq.h | 10 --------- arch/x86/include/asm/irq_vectors.h | 35 ------------------------------- arch/x86/include/asm/voyager.h | 42 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 41550797396..3ef2bded97a 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -84,16 +84,6 @@ extern atomic_t irq_mis_count; /* EISA */ extern void eisa_set_level_irq(unsigned int irq); -/* Voyager functions */ -extern asmlinkage void vic_cpi_interrupt(void); -extern asmlinkage void vic_sys_interrupt(void); -extern asmlinkage void vic_cmn_interrupt(void); -extern asmlinkage void qic_timer_interrupt(void); -extern asmlinkage void qic_invalidate_interrupt(void); -extern asmlinkage void qic_reschedule_interrupt(void); -extern asmlinkage void qic_enable_irq_interrupt(void); -extern asmlinkage void qic_call_function_interrupt(void); - /* SMP */ extern void smp_apic_timer_interrupt(struct pt_regs *); extern void smp_spurious_interrupt(struct pt_regs *); diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 2601fd108c7..067d22ffb3e 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -135,39 +135,4 @@ #endif -/* Voyager specific defines */ -/* These define the CPIs we use in linux */ -#define VIC_CPI_LEVEL0 0 -#define VIC_CPI_LEVEL1 1 -/* now the fake CPIs */ -#define VIC_TIMER_CPI 2 -#define VIC_INVALIDATE_CPI 3 -#define VIC_RESCHEDULE_CPI 4 -#define VIC_ENABLE_IRQ_CPI 5 -#define VIC_CALL_FUNCTION_CPI 6 -#define VIC_CALL_FUNCTION_SINGLE_CPI 7 - -/* Now the QIC CPIs: Since we don't need the two initial levels, - * these are 2 less than the VIC CPIs */ -#define QIC_CPI_OFFSET 1 -#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET) -#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET) -#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) -#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) -#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) -#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET) - -#define VIC_START_FAKE_CPI VIC_TIMER_CPI -#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI - -/* this is the SYS_INT CPI. */ -#define VIC_SYS_INT 8 -#define VIC_CMN_INT 15 - -/* This is the boot CPI for alternate processors. It gets overwritten - * by the above once the system has activated all available processors */ -#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0 -#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) - - #endif /* _ASM_X86_IRQ_VECTORS_H */ diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h index b3e64730762..c1635d43616 100644 --- a/arch/x86/include/asm/voyager.h +++ b/arch/x86/include/asm/voyager.h @@ -527,3 +527,45 @@ extern void voyager_smp_intr_init(void); #define VOYAGER_PSI_SUBREAD 2 #define VOYAGER_PSI_SUBWRITE 3 extern void voyager_cat_psi(__u8, __u16, __u8 *); + +/* These define the CPIs we use in linux */ +#define VIC_CPI_LEVEL0 0 +#define VIC_CPI_LEVEL1 1 +/* now the fake CPIs */ +#define VIC_TIMER_CPI 2 +#define VIC_INVALIDATE_CPI 3 +#define VIC_RESCHEDULE_CPI 4 +#define VIC_ENABLE_IRQ_CPI 5 +#define VIC_CALL_FUNCTION_CPI 6 +#define VIC_CALL_FUNCTION_SINGLE_CPI 7 + +/* Now the QIC CPIs: Since we don't need the two initial levels, + * these are 2 less than the VIC CPIs */ +#define QIC_CPI_OFFSET 1 +#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET) +#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET) +#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) +#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) +#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) +#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET) + +#define VIC_START_FAKE_CPI VIC_TIMER_CPI +#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI + +/* this is the SYS_INT CPI. */ +#define VIC_SYS_INT 8 +#define VIC_CMN_INT 15 + +/* This is the boot CPI for alternate processors. It gets overwritten + * by the above once the system has activated all available processors */ +#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0 +#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) + +extern asmlinkage void vic_cpi_interrupt(void); +extern asmlinkage void vic_sys_interrupt(void); +extern asmlinkage void vic_cmn_interrupt(void); +extern asmlinkage void qic_timer_interrupt(void); +extern asmlinkage void qic_invalidate_interrupt(void); +extern asmlinkage void qic_reschedule_interrupt(void); +extern asmlinkage void qic_enable_irq_interrupt(void); +extern asmlinkage void qic_call_function_interrupt(void); -- cgit v1.2.3 From 3e92ab3d7e2edef5dccd8b0db21528699c81d2c0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:21:42 +0100 Subject: x86, irqs, voyager: remove Voyager quirk Remove a Voyager complication from the generic irq_vectors.h header. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 067d22ffb3e..81fc883b3c0 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -106,7 +106,7 @@ #define NR_IRQS_LEGACY 16 -#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) +#ifdef CONFIG_X86_IO_APIC #include /* need MAX_IO_APICS */ @@ -117,22 +117,14 @@ # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) # endif #else - # define NR_IRQS \ ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \ (NR_VECTORS + (8 * NR_CPUS)) : \ - (NR_VECTORS + (32 * MAX_IO_APICS))) \ - + (NR_VECTORS + (32 * MAX_IO_APICS))) #endif -#elif defined(CONFIG_X86_VOYAGER) - -# define NR_IRQS 224 - -#else /* IO_APIC || VOYAGER */ - +#else /* !CONFIG_X86_IO_APIC: */ # define NR_IRQS 16 - #endif #endif /* _ASM_X86_IRQ_VECTORS_H */ -- cgit v1.2.3 From 9fc2e79d4f239c1c1dfdab7b10854c7588b39d9a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:48:17 +0100 Subject: x86, irq: add IRQ layout comments Describe the layout of x86 trap/exception/IRQ vectors and clean up indentation and other small details. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 92 ++++++++++++++++++++++++-------------- 1 file changed, 58 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 81fc883b3c0..5f7d6a1e3d2 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -3,45 +3,69 @@ #include -#define NMI_VECTOR 0x02 +/* + * Linux IRQ vector layout. + * + * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can + * be defined by Linux. They are used as a jump table by the CPU when a + * given vector is triggered - by a CPU-external, CPU-internal or + * software-triggered event. + * + * Linux sets the kernel code address each entry jumps to early during + * bootup, and never changes them. This is the general layout of the + * IDT entries: + * + * Vectors 0 ... 31 : system traps and exceptions - hardcoded events + * Vectors 32 ... 127 : device interrupts + * Vector 128 : legacy int80 syscall interface + * Vectors 129 ... 237 : device interrupts + * Vectors 238 ... 255 : special interrupts + * + * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. + * + * This file enumerates the exact layout of them: + */ + +#define NMI_VECTOR 0x02 /* * IDT vectors usable for external interrupt sources start * at 0x20: */ -#define FIRST_EXTERNAL_VECTOR 0x20 +#define FIRST_EXTERNAL_VECTOR 0x20 #ifdef CONFIG_X86_32 -# define SYSCALL_VECTOR 0x80 +# define SYSCALL_VECTOR 0x80 #else -# define IA32_SYSCALL_VECTOR 0x80 +# define IA32_SYSCALL_VECTOR 0x80 #endif /* * Reserve the lowest usable priority level 0x20 - 0x2f for triggering * cleanup after irq migration. */ -#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR +#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR /* * Vectors 0x30-0x3f are used for ISA interrupts. */ -#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) -#define IRQ1_VECTOR (IRQ0_VECTOR + 1) -#define IRQ2_VECTOR (IRQ0_VECTOR + 2) -#define IRQ3_VECTOR (IRQ0_VECTOR + 3) -#define IRQ4_VECTOR (IRQ0_VECTOR + 4) -#define IRQ5_VECTOR (IRQ0_VECTOR + 5) -#define IRQ6_VECTOR (IRQ0_VECTOR + 6) -#define IRQ7_VECTOR (IRQ0_VECTOR + 7) -#define IRQ8_VECTOR (IRQ0_VECTOR + 8) -#define IRQ9_VECTOR (IRQ0_VECTOR + 9) -#define IRQ10_VECTOR (IRQ0_VECTOR + 10) -#define IRQ11_VECTOR (IRQ0_VECTOR + 11) -#define IRQ12_VECTOR (IRQ0_VECTOR + 12) -#define IRQ13_VECTOR (IRQ0_VECTOR + 13) -#define IRQ14_VECTOR (IRQ0_VECTOR + 14) -#define IRQ15_VECTOR (IRQ0_VECTOR + 15) +#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) + +#define IRQ1_VECTOR (IRQ0_VECTOR + 1) +#define IRQ2_VECTOR (IRQ0_VECTOR + 2) +#define IRQ3_VECTOR (IRQ0_VECTOR + 3) +#define IRQ4_VECTOR (IRQ0_VECTOR + 4) +#define IRQ5_VECTOR (IRQ0_VECTOR + 5) +#define IRQ6_VECTOR (IRQ0_VECTOR + 6) +#define IRQ7_VECTOR (IRQ0_VECTOR + 7) +#define IRQ8_VECTOR (IRQ0_VECTOR + 8) +#define IRQ9_VECTOR (IRQ0_VECTOR + 9) +#define IRQ10_VECTOR (IRQ0_VECTOR + 10) +#define IRQ11_VECTOR (IRQ0_VECTOR + 11) +#define IRQ12_VECTOR (IRQ0_VECTOR + 12) +#define IRQ13_VECTOR (IRQ0_VECTOR + 13) +#define IRQ14_VECTOR (IRQ0_VECTOR + 14) +#define IRQ15_VECTOR (IRQ0_VECTOR + 15) /* * Special IRQ vectors used by the SMP architecture, 0xf0-0xff @@ -75,36 +99,36 @@ /* f0-f7 used for spreading out TLB flushes: */ #define INVALIDATE_TLB_VECTOR_END 0xf7 #define INVALIDATE_TLB_VECTOR_START 0xf0 -#define NUM_INVALIDATE_TLB_VECTORS 8 +#define NUM_INVALIDATE_TLB_VECTORS 8 /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ * sources per level' errata. */ -#define LOCAL_TIMER_VECTOR 0xef +#define LOCAL_TIMER_VECTOR 0xef /* * Performance monitoring interrupt vector: */ -#define LOCAL_PERF_VECTOR 0xee +#define LOCAL_PERF_VECTOR 0xee /* * First APIC vector available to drivers: (vectors 0x30-0xee) we * start at 0x31(0x41) to spread out vectors evenly between priority * levels. (0x80 is the syscall vector) */ -#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) +#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) -#define NR_VECTORS 256 +#define NR_VECTORS 256 -#define FPU_IRQ 13 +#define FPU_IRQ 13 -#define FIRST_VM86_IRQ 3 -#define LAST_VM86_IRQ 15 -#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) +#define FIRST_VM86_IRQ 3 +#define LAST_VM86_IRQ 15 +#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) -#define NR_IRQS_LEGACY 16 +#define NR_IRQS_LEGACY 16 #ifdef CONFIG_X86_IO_APIC @@ -112,9 +136,9 @@ #ifndef CONFIG_SPARSE_IRQ # if NR_CPUS < MAX_IO_APICS -# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) +# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) # else -# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) +# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) # endif #else # define NR_IRQS \ @@ -124,7 +148,7 @@ #endif #else /* !CONFIG_X86_IO_APIC: */ -# define NR_IRQS 16 +# define NR_IRQS 16 #endif #endif /* _ASM_X86_IRQ_VECTORS_H */ -- cgit v1.2.3 From c379698fdac7cb65c96dec549850ce606dd6ceba Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:50:46 +0100 Subject: x86, irq_vectors.h: remove needless includes Reduce include file dependencies a bit - remove the two headers that are included in irq_vectors.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 5f7d6a1e3d2..ec87910025d 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -1,8 +1,6 @@ #ifndef _ASM_X86_IRQ_VECTORS_H #define _ASM_X86_IRQ_VECTORS_H -#include - /* * Linux IRQ vector layout. * @@ -131,22 +129,18 @@ #define NR_IRQS_LEGACY 16 #ifdef CONFIG_X86_IO_APIC - -#include /* need MAX_IO_APICS */ - -#ifndef CONFIG_SPARSE_IRQ -# if NR_CPUS < MAX_IO_APICS -# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) +# ifndef CONFIG_SPARSE_IRQ +# if NR_CPUS < MAX_IO_APICS +# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) +# else +# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) +# endif # else -# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) -# endif -#else -# define NR_IRQS \ +# define NR_IRQS \ ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \ (NR_VECTORS + (8 * NR_CPUS)) : \ (NR_VECTORS + (32 * MAX_IO_APICS))) -#endif - +# endif #else /* !CONFIG_X86_IO_APIC: */ # define NR_IRQS 16 #endif -- cgit v1.2.3 From 009eb3fe146aa6f1951f3c5235851bb8d1330dfb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 02:56:44 +0100 Subject: x86, irq: describe NR_IRQ sizing details, clean up Impact: cleanup Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index ec87910025d..41e2450e13b 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -126,23 +126,37 @@ #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) +/* + * Size the maximum number of interrupts. + * + * If the irq_desc[] array has a sparse layout, we can size things + * generously - it scales up linearly with the maximum number of CPUs, + * and the maximum number of IO-APICs, whichever is higher. + * + * In other cases we size more conservatively, to not create too large + * static arrays. + */ + #define NR_IRQS_LEGACY 16 +#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS ) +#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) + #ifdef CONFIG_X86_IO_APIC -# ifndef CONFIG_SPARSE_IRQ +# ifdef CONFIG_SPARSE_IRQ +# define NR_IRQS \ + (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ + (NR_VECTORS + CPU_VECTOR_LIMIT) : \ + (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) +# else # if NR_CPUS < MAX_IO_APICS -# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) +# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) # else -# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) +# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) # endif -# else -# define NR_IRQS \ - ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \ - (NR_VECTORS + (8 * NR_CPUS)) : \ - (NR_VECTORS + (32 * MAX_IO_APICS))) # endif #else /* !CONFIG_X86_IO_APIC: */ -# define NR_IRQS 16 +# define NR_IRQS NR_IRQS_LEGACY #endif #endif /* _ASM_X86_IRQ_VECTORS_H */ -- cgit v1.2.3 From d8106d2e24d54497233ca9cd97fa9bec807de458 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 03:06:17 +0100 Subject: x86, vm86: clean up invalid_vm86_irq() Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 41e2450e13b..b07278c55e9 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -124,7 +124,13 @@ #define FIRST_VM86_IRQ 3 #define LAST_VM86_IRQ 15 -#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) + +#ifndef __ASSEMBLY__ +static inline int invalid_vm86_irq(int irq) +{ + return irq < 3 || irq > 15; +} +#endif /* * Size the maximum number of interrupts. -- cgit v1.2.3 From 2749ebe320ff9f77548d10fcc0a3464ac21c8e58 Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Thu, 29 Jan 2009 15:35:26 -0600 Subject: x86: UV fix uv_flush_send_and_wait() Impact: fix possible tlb mis-flushing on UV uv_flush_send_and_wait() should return a pointer if the broadcast remote tlb shootdown requests fail. That causes the conventional IPI method of shootdown to be used. Signed-off-by: Cliff Wickman Signed-off-by: Tejun Heo --- arch/x86/kernel/tlb_uv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 89fce1b6d01..f4b2f27d19b 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -259,7 +259,7 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, * the cpu's, all of which are still in the mask. */ __get_cpu_var(ptcstats).ptc_i++; - return 0; + return flush_mask; } /* -- cgit v1.2.3 From 552be871e67ff577ed36beb2f53d078b42304739 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 30 Jan 2009 17:47:53 +0900 Subject: x86: pass in cpu number to switch_to_new_gdt() Impact: cleanup, prepare for xen boot fix. Xen needs to call this function very early to setup the GDT and per-cpu segments. Remove the call to smp_processor_id() and just pass in the cpu number. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/processor.h | 2 +- arch/x86/kernel/cpu/common.c | 7 +++---- arch/x86/kernel/setup_percpu.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- arch/x86/mach-voyager/voyager_smp.c | 11 ++++++----- 5 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index befa20b4a68..1c25eb69ea8 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -768,7 +768,7 @@ extern int sysenter_setup(void); extern struct desc_ptr early_gdt_descr; extern void cpu_set_gdt(int); -extern void switch_to_new_gdt(void); +extern void switch_to_new_gdt(int); extern void cpu_init(void); static inline unsigned long get_debugctlmsr(void) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 652fdc9a757..6eacd64b602 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -255,10 +255,9 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; /* Current gdt points %fs at the "master" per-cpu area: after this, * it's on the real one. */ -void switch_to_new_gdt(void) +void switch_to_new_gdt(int cpu) { struct desc_ptr gdt_descr; - int cpu = smp_processor_id(); gdt_descr.address = (long)get_cpu_gdt_table(cpu); gdt_descr.size = GDT_SIZE - 1; @@ -993,7 +992,7 @@ void __cpuinit cpu_init(void) * and set up the GDT descriptor: */ - switch_to_new_gdt(); + switch_to_new_gdt(cpu); loadsegment(fs, 0); load_idt((const struct desc_ptr *)&idt_descr); @@ -1098,7 +1097,7 @@ void __cpuinit cpu_init(void) clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); load_idt(&idt_descr); - switch_to_new_gdt(); + switch_to_new_gdt(cpu); /* * Set up and load the per-CPU TSS and LDT diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 0d1e7ac439f..ef91747bbed 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -122,7 +122,7 @@ void __init setup_per_cpu_areas(void) * area. Reload any changed state for the boot CPU. */ if (cpu == boot_cpu_id) - switch_to_new_gdt(); + switch_to_new_gdt(cpu); DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f9dbcff4354..612d3c74f6a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1185,7 +1185,7 @@ out: void __init native_smp_prepare_boot_cpu(void) { int me = smp_processor_id(); - switch_to_new_gdt(); + switch_to_new_gdt(me); /* already set me in cpu_online_mask in boot_cpu_init() */ cpumask_set_cpu(me, cpu_callout_mask); per_cpu(cpu_state, me) = CPU_ONLINE; diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 331cd6d5648..58c7cac3440 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -1746,12 +1746,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) static void __cpuinit voyager_smp_prepare_boot_cpu(void) { - switch_to_new_gdt(); + int cpu = smp_processor_id(); + switch_to_new_gdt(cpu); - cpu_set(smp_processor_id(), cpu_online_map); - cpu_set(smp_processor_id(), cpu_callout_map); - cpu_set(smp_processor_id(), cpu_possible_map); - cpu_set(smp_processor_id(), cpu_present_map); + cpu_set(cpu, cpu_online_map); + cpu_set(cpu, cpu_callout_map); + cpu_set(cpu, cpu_possible_map); + cpu_set(cpu, cpu_present_map); } static int __cpuinit voyager_cpu_up(unsigned int cpu) -- cgit v1.2.3 From 11e3a840cd5b731cdd8f6f956dfae78a8046d09c Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 30 Jan 2009 17:47:54 +0900 Subject: x86: split loading percpu segments from loading gdt Impact: split out a function, no functional change Xen needs to be able to access percpu data from very early on. For various reasons, it cannot also load the gdt at that time. It does, however, have a pefectly functional gdt at that point, so there's no pressing need to reload the gdt. Split the function to load the segment registers off, so Xen can call it directly. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Tejun Heo --- arch/x86/include/asm/processor.h | 1 + arch/x86/kernel/cpu/common.c | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1c25eb69ea8..656d02ea509 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -769,6 +769,7 @@ extern struct desc_ptr early_gdt_descr; extern void cpu_set_gdt(int); extern void switch_to_new_gdt(int); +extern void load_percpu_segment(int); extern void cpu_init(void); static inline unsigned long get_debugctlmsr(void) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6eacd64b602..0f73ea42308 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -253,6 +253,16 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; +void load_percpu_segment(int cpu) +{ +#ifdef CONFIG_X86_32 + loadsegment(fs, __KERNEL_PERCPU); +#else + loadsegment(gs, 0); + wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); +#endif +} + /* Current gdt points %fs at the "master" per-cpu area: after this, * it's on the real one. */ void switch_to_new_gdt(int cpu) @@ -263,12 +273,8 @@ void switch_to_new_gdt(int cpu) gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); /* Reload the per-cpu base */ -#ifdef CONFIG_X86_32 - loadsegment(fs, __KERNEL_PERCPU); -#else - loadsegment(gs, 0); - wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); -#endif + + load_percpu_segment(cpu); } static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; -- cgit v1.2.3 From 795f99b61d20c34cb04d17d8906b32f745a635ec Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 30 Jan 2009 17:47:54 +0900 Subject: xen: setup percpu data pointers Impact: fix xen booting We need to access percpu data fairly early, so set up the percpu registers as soon as possible. We only need to load the appropriate segment register. We already have a GDT, but its hard to change it early because we need to manipulate the pagetable to do so, and that hasn't been set up yet. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Tejun Heo --- arch/x86/xen/enlighten.c | 3 +++ arch/x86/xen/smp.c | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bef941f6145..fe19c88a502 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1647,6 +1647,9 @@ asmlinkage void __init xen_start_kernel(void) have_vcpu_info_placement = 0; #endif + /* setup percpu state */ + load_percpu_segment(0); + xen_smp_init(); /* Get mfn list */ diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 7735e3dd359..88d5d5ec6be 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -170,7 +170,8 @@ static void __init xen_smp_prepare_boot_cpu(void) /* We've switched to the "real" per-cpu gdt, so make sure the old memory can be recycled */ - make_lowmem_page_readwrite(&per_cpu_var(gdt_page)); + make_lowmem_page_readwrite(__per_cpu_load + + (unsigned long)&per_cpu_var(gdt_page)); xen_setup_vcpu_info_placement(); } @@ -235,6 +236,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ctxt->user_regs.ss = __KERNEL_DS; #ifdef CONFIG_X86_32 ctxt->user_regs.fs = __KERNEL_PERCPU; +#else + ctxt->gs_base_kernel = per_cpu_offset(cpu); #endif ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ -- cgit v1.2.3 From 0fc2eb3bade59365ed0b28b8ea3b5c448b2f4a26 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:34:04 +0530 Subject: headers_check fix: alpha, statfs.h fix the following 'make headers_check' warning: usr/include/asm-alpha/statfs.h:6: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/alpha/include/asm/statfs.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/alpha/include/asm/statfs.h b/arch/alpha/include/asm/statfs.h index de35cd438a1..ccd2e186bfd 100644 --- a/arch/alpha/include/asm/statfs.h +++ b/arch/alpha/include/asm/statfs.h @@ -1,6 +1,8 @@ #ifndef _ALPHA_STATFS_H #define _ALPHA_STATFS_H +#include + /* Alpha is the only 64-bit platform with 32-bit statfs. And doesn't even seem to implement statfs64 */ #define __statfs_word __u32 -- cgit v1.2.3 From 3fd59061b7b16dd0bb7bf779ba297daa5f0bf0f5 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:36:04 +0530 Subject: headers_check fix: alpha, swab.h fix the following 'make headers_check' warnings: usr/include/asm-alpha/swab.h:4: include of is preferred over usr/include/asm-alpha/swab.h:10: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/alpha/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/swab.h b/arch/alpha/include/asm/swab.h index 68e7089e02d..4d682b16c7c 100644 --- a/arch/alpha/include/asm/swab.h +++ b/arch/alpha/include/asm/swab.h @@ -1,7 +1,7 @@ #ifndef _ALPHA_SWAB_H #define _ALPHA_SWAB_H -#include +#include #include #include -- cgit v1.2.3 From f100e6d0368742ddb8b6b9be986536e63117c05a Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:38:16 +0530 Subject: headers_check fix: arm, a.out.h fix the following 'make headers_check' warnings: usr/include/asm-arm/a.out.h:5: include of is preferred over usr/include/asm-arm/a.out.h:9: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/arm/include/asm/a.out.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/a.out.h b/arch/arm/include/asm/a.out.h index 79489fdcc8b..083894b2e3b 100644 --- a/arch/arm/include/asm/a.out.h +++ b/arch/arm/include/asm/a.out.h @@ -2,7 +2,7 @@ #define __ARM_A_OUT_H__ #include -#include +#include struct exec { -- cgit v1.2.3 From 4af3bf6b393a2cec947cd42cc10fc03f5b782484 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:41:59 +0530 Subject: headers_check fix: arm, setup.h fix the following 'make headers_check' warnings: usr/include/asm-arm/setup.h:17: include of is preferred over usr/include/asm-arm/setup.h:25: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/arm/include/asm/setup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index f2cd18a0932..ee1304f22f9 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -14,7 +14,7 @@ #ifndef __ASMARM_SETUP_H #define __ASMARM_SETUP_H -#include +#include #define COMMAND_LINE_SIZE 1024 -- cgit v1.2.3 From e42ec2418fa96f98ed8d4e6d8a572a7200156df6 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:43:44 +0530 Subject: headers_check fix: arm, swab.h fix the following 'make headers_check' warnings: usr/include/asm-arm/swab.h:19: include of is preferred over usr/include/asm-arm/swab.h:25: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/arm/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h index 27a689be085..ca2bf2f6d6e 100644 --- a/arch/arm/include/asm/swab.h +++ b/arch/arm/include/asm/swab.h @@ -16,7 +16,7 @@ #define __ASM_ARM_SWAB_H #include -#include +#include #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) # define __SWAB_64_THRU_32__ -- cgit v1.2.3 From 1c6ce704f1e965f64ad0b017842854ceec5b9cc7 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:46:26 +0530 Subject: headers_check fix: avr32, swab.h fix the following 'make headers_check' warnings: usr/include/asm-avr32/swab.h:7: include of is preferred over usr/include/asm-avr32/swab.h:22: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/avr32/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/avr32/include/asm/swab.h b/arch/avr32/include/asm/swab.h index a14aa5b46d9..14cc737bbca 100644 --- a/arch/avr32/include/asm/swab.h +++ b/arch/avr32/include/asm/swab.h @@ -4,7 +4,7 @@ #ifndef __ASM_AVR32_SWAB_H #define __ASM_AVR32_SWAB_H -#include +#include #include #define __SWAB_64_THRU_32__ -- cgit v1.2.3 From 350eb8b3cb5e4860a4c8352f2cca00e6eb4a16b2 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:48:44 +0530 Subject: headers_check fix: blackfin, swab.h fix the following 'make headers_check' warnings: usr/include/asm-blackfin/swab.h:4: include of is preferred over usr/include/asm-blackfin/swab.h:13: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/blackfin/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/blackfin/include/asm/swab.h b/arch/blackfin/include/asm/swab.h index 69a051b612b..6403ad2932e 100644 --- a/arch/blackfin/include/asm/swab.h +++ b/arch/blackfin/include/asm/swab.h @@ -1,7 +1,7 @@ #ifndef _BLACKFIN_SWAB_H #define _BLACKFIN_SWAB_H -#include +#include #include #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) -- cgit v1.2.3 From 295803eea178d777cf3813b16696c54b0b2bcd23 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:55:12 +0530 Subject: headers_check fix: h8300, swab.h fix the following 'make headers_check' warning: usr/include/asm-h8300/swab.h:4: include of is preferred over Signed-off-by: Jaswinder Singh Rajput --- arch/h8300/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/h8300/include/asm/swab.h b/arch/h8300/include/asm/swab.h index c108f39b8bc..39abbf52807 100644 --- a/arch/h8300/include/asm/swab.h +++ b/arch/h8300/include/asm/swab.h @@ -1,7 +1,7 @@ #ifndef _H8300_SWAB_H #define _H8300_SWAB_H -#include +#include #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) # define __SWAB_64_THRU_32__ -- cgit v1.2.3 From fa9ea6c7abd94482ecd84e130676b6a1b3e61c2c Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 10:59:12 +0530 Subject: headers_check fix: ia64, fpu.h fix the following 'make headers_check' warning: usr/include/asm-ia64/fpu.h:9: include of is preferred over Signed-off-by: Jaswinder Singh Rajput --- arch/ia64/include/asm/fpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h index 3859558ff0a..b6395ad1500 100644 --- a/arch/ia64/include/asm/fpu.h +++ b/arch/ia64/include/asm/fpu.h @@ -6,7 +6,7 @@ * David Mosberger-Tang */ -#include +#include /* floating point status register: */ #define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ -- cgit v1.2.3 From a812a9170c5db5390280eb4fa075f1555e6ae53c Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:03:28 +0530 Subject: headers_check fix: ia64, gcc_intrin.h fix the following 'make headers_check' warning: usr/include/asm-ia64/gcc_intrin.h:63: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/ia64/include/asm/gcc_intrin.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h index 0f5b5592175..c2c5fd8fcac 100644 --- a/arch/ia64/include/asm/gcc_intrin.h +++ b/arch/ia64/include/asm/gcc_intrin.h @@ -6,6 +6,7 @@ * Copyright (C) 2002,2003 Suresh Siddha */ +#include #include /* define this macro to get some asm stmts included in 'c' files */ -- cgit v1.2.3 From 1ecbb7fcfd20803bdd403de0c9c514da7d6c8843 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:07:06 +0530 Subject: headers_check fix: ia64, intrinsics.h fix the following 'make headers_check' warning: usr/include/asm-ia64/intrinsics.h:57: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/ia64/include/asm/intrinsics.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h index a3e44a5ed49..c47830e26cb 100644 --- a/arch/ia64/include/asm/intrinsics.h +++ b/arch/ia64/include/asm/intrinsics.h @@ -10,6 +10,7 @@ #ifndef __ASSEMBLY__ +#include /* include compiler specific intrinsics */ #include #ifdef __INTEL_COMPILER -- cgit v1.2.3 From 6ce795065bdd2bfeebd97fa91a95918dcff7d0ec Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:10:22 +0530 Subject: headers_check fix: ia64, kvm.h fix the following 'make headers_check' warnings: usr/include/asm-ia64/kvm.h:24: include of is preferred over usr/include/asm-ia64/kvm.h:34: found __[us]{8,16,32,64} type without #include --- arch/ia64/include/asm/kvm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index 68aa6da807c..116761ca462 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -21,8 +21,7 @@ * */ -#include - +#include #include /* Architectural interrupt line count. */ -- cgit v1.2.3 From 040c92b8e5f080e1f5a610bf0e10c683328dce75 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:12:29 +0530 Subject: headers_check fix: ia64, swab.h fix the following 'make headers_check' warnings: usr/include/asm-ia64/swab.h:9: include of is preferred over usr/include/asm-ia64/swab.h:13: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/ia64/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/swab.h b/arch/ia64/include/asm/swab.h index 6aa58b699ee..c89a8cb5d8a 100644 --- a/arch/ia64/include/asm/swab.h +++ b/arch/ia64/include/asm/swab.h @@ -6,7 +6,7 @@ * David Mosberger-Tang , Hewlett-Packard Co. */ -#include +#include #include #include -- cgit v1.2.3 From ae612fb05b0f60ff58e2a86eea46dc99532d62c5 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:22:18 +0530 Subject: headers_check fix: mips, sigcontext.h fix the following 'make headers_check' warning: usr/include/asm-mips/sigcontext.h:57: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/mips/include/asm/sigcontext.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h index 9ce0607d7a4..9e89cf99d4e 100644 --- a/arch/mips/include/asm/sigcontext.h +++ b/arch/mips/include/asm/sigcontext.h @@ -9,6 +9,7 @@ #ifndef _ASM_SIGCONTEXT_H #define _ASM_SIGCONTEXT_H +#include #include #if _MIPS_SIM == _MIPS_SIM_ABI32 -- cgit v1.2.3 From a9f6acc5ab36c7533c9b3e224f7c209d8da4048d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:23:37 +0530 Subject: headers_check fix: mips, swab.h fix the following 'make headers_check' warnings: usr/include/asm-mips/swab.h:12: include of is preferred over usr/include/asm-mips/swab.h:18: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/mips/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/include/asm/swab.h b/arch/mips/include/asm/swab.h index 88f1f7d555c..99993c0d6c1 100644 --- a/arch/mips/include/asm/swab.h +++ b/arch/mips/include/asm/swab.h @@ -9,7 +9,7 @@ #define _ASM_SWAB_H #include -#include +#include #define __SWAB_64_THRU_32__ -- cgit v1.2.3 From 79f95ac2412c993e52f02cfde1f71d141b2e530d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:36:07 +0530 Subject: headers_check fix: parisc, pdc.h fix the following 'make headers_check' warning: usr/include/asm-parisc/pdc.h:420: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/parisc/include/asm/pdc.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index c584b00c607..430f1aeea0b 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -336,10 +336,11 @@ #define NUM_PDC_RESULT 32 #if !defined(__ASSEMBLY__) -#ifdef __KERNEL__ #include +#ifdef __KERNEL__ + extern int pdc_type; /* Values for pdc_type */ -- cgit v1.2.3 From 726da1e3408caae6af8372f93380f18986f1003b Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:39:01 +0530 Subject: headers_check fix: parisc, swab.h fix the following 'make headers_check' warnings: usr/include/asm-parisc/swab.h:4: include of is preferred over usr/include/asm-parisc/swab.h:9: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/parisc/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/swab.h b/arch/parisc/include/asm/swab.h index 3ff16c5a335..e78403b129e 100644 --- a/arch/parisc/include/asm/swab.h +++ b/arch/parisc/include/asm/swab.h @@ -1,7 +1,7 @@ #ifndef _PARISC_SWAB_H #define _PARISC_SWAB_H -#include +#include #include #define __SWAB_64_THRU_32__ -- cgit v1.2.3 From 4be2c7ff4f362e41706e84a3d6726bdcda9ab65f Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:41:04 +0530 Subject: headers_check fix: powerpc, bootx.h fix the following 'make headers_check' warnings: usr/include/asm-powerpc/bootx.h:12: include of is preferred over usr/include/asm-powerpc/bootx.h:57: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/powerpc/include/asm/bootx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/bootx.h b/arch/powerpc/include/asm/bootx.h index 57b82e3f89c..60a3c9ef301 100644 --- a/arch/powerpc/include/asm/bootx.h +++ b/arch/powerpc/include/asm/bootx.h @@ -9,7 +9,7 @@ #ifndef __ASM_BOOTX_H__ #define __ASM_BOOTX_H__ -#include +#include #ifdef macintosh #include -- cgit v1.2.3 From 785857f5f0b7fbeed934e39af24edec471637375 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:42:29 +0530 Subject: headers_check fix: powerpc, elf.h fix the following 'make headers_check' warning: usr/include/asm-powerpc/elf.h:5: include of is preferred over Signed-off-by: Jaswinder Singh Rajput --- arch/powerpc/include/asm/elf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index cd46f023ec6..b5600ce6055 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -7,7 +7,7 @@ #include #endif -#include +#include #include #include #include -- cgit v1.2.3 From 9f2cd967b7f029ebe2c74969709ff9c745344dba Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:44:45 +0530 Subject: headers_check fix: powerpc, kvm.h fix the following 'make headers_check' warnings: usr/include/asm-powerpc/kvm.h:23: include of is preferred over usr/include/asm-powerpc/kvm.h:26: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/powerpc/include/asm/kvm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index f993e4198d5..4e0cf65f7f5 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h @@ -20,7 +20,7 @@ #ifndef __LINUX_KVM_POWERPC_H #define __LINUX_KVM_POWERPC_H -#include +#include struct kvm_regs { __u64 pc; -- cgit v1.2.3 From 122bb2207b8107ce117d10fcfd3a2f6c3804a362 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:46:23 +0530 Subject: headers_check fix: powerpc, ps3fb.h fix the following 'make headers_check' warning: usr/include/asm-powerpc/ps3fb.h:33: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/powerpc/include/asm/ps3fb.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/ps3fb.h b/arch/powerpc/include/asm/ps3fb.h index 3f121fe4010..e7233a84968 100644 --- a/arch/powerpc/include/asm/ps3fb.h +++ b/arch/powerpc/include/asm/ps3fb.h @@ -19,6 +19,7 @@ #ifndef _ASM_POWERPC_PS3FB_H_ #define _ASM_POWERPC_PS3FB_H_ +#include #include /* ioctl */ -- cgit v1.2.3 From 1a16bc4590fcc94630571c541c8fef7a0845c2c0 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:52:05 +0530 Subject: headers_check fix: powerpc, spu_info.h fix the following 'make headers_check' warning: usr/include/asm-powerpc/spu_info.h:27: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/powerpc/include/asm/spu_info.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/spu_info.h b/arch/powerpc/include/asm/spu_info.h index 3545efbf989..1286c823f0d 100644 --- a/arch/powerpc/include/asm/spu_info.h +++ b/arch/powerpc/include/asm/spu_info.h @@ -23,9 +23,10 @@ #ifndef _SPU_INFO_H #define _SPU_INFO_H +#include + #ifdef __KERNEL__ #include -#include #else struct mfc_cq_sr { __u64 mfc_cq_data0_RW; -- cgit v1.2.3 From 48109870bab7e66f30f933cd218258368024cd9f Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 11:54:05 +0530 Subject: headers_check fix: powerpc, swab.h fix the following 'make headers_check' warning: usr/include/asm-powerpc/swab.h:11: include of is preferred over Signed-off-by: Jaswinder Singh Rajput --- arch/powerpc/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h index ef824ae4b79..c581e3ef73e 100644 --- a/arch/powerpc/include/asm/swab.h +++ b/arch/powerpc/include/asm/swab.h @@ -8,7 +8,7 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include #include #ifdef __GNUC__ -- cgit v1.2.3 From 1ff8f739c7cc4eaa89b6ba986494f458ff7bdbef Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 31 Jan 2009 12:02:14 +0530 Subject: headers_check fix: xtensa, swab.h fix the following 'make headers_check' warnings: usr/include/asm-xtensa/swab.h:14: include of is preferred over usr/include/asm-xtensa/swab.h:19: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/xtensa/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/swab.h b/arch/xtensa/include/asm/swab.h index f50b697eb60..226a3916231 100644 --- a/arch/xtensa/include/asm/swab.h +++ b/arch/xtensa/include/asm/swab.h @@ -11,7 +11,7 @@ #ifndef _XTENSA_SWAB_H #define _XTENSA_SWAB_H -#include +#include #include #define __SWAB_64_THRU_32__ -- cgit v1.2.3 From e584f559c7b8711cccdf319400acd6294b2c074e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 30 Jan 2009 23:17:23 -0800 Subject: x86/paravirt: don't restore second return reg Impact: bugfix In the 32-bit calling convention, %eax:%edx is used to return 64-bit values. Don't save and restore %edx around wrapped functions, or they can't return a full 64-bit result. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index b17365c3974..016dce31130 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -1524,8 +1524,8 @@ extern struct paravirt_patch_site __parainstructions[], #define PV_RESTORE_REGS "popl %edx; popl %ecx;" /* save and restore all caller-save registers, except return value */ -#define PV_SAVE_ALL_CALLER_REGS PV_SAVE_REGS -#define PV_RESTORE_ALL_CALLER_REGS PV_RESTORE_REGS +#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" +#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" #define PV_FLAGS_ARG "0" #define PV_EXTRA_CLOBBERS -- cgit v1.2.3 From 664c7954721adfc9bd61de6ec78f89f482f1a802 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 30 Jan 2009 23:18:41 -0800 Subject: x86/vmi: fix interrupt enable/disable/save/restore calling convention. Zach says: > Enable/Disable have no clobbers at all. > Save clobbers only return value, %eax > Restore also clobbers nothing. This is precisely compatible with the calling convention, so we can just call them directly without wrapping. (Compile tested only.) Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/kernel/vmi_32.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 1d3302cc2dd..eb9e7347928 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -670,10 +670,11 @@ static inline int __init activate_vmi(void) para_fill(pv_mmu_ops.write_cr2, SetCR2); para_fill(pv_mmu_ops.write_cr3, SetCR3); para_fill(pv_cpu_ops.write_cr4, SetCR4); - para_fill(pv_irq_ops.save_fl, GetInterruptMask); - para_fill(pv_irq_ops.restore_fl, SetInterruptMask); - para_fill(pv_irq_ops.irq_disable, DisableInterrupts); - para_fill(pv_irq_ops.irq_enable, EnableInterrupts); + + para_fill(pv_irq_ops.save_fl.func, GetInterruptMask); + para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask); + para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts); + para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts); para_fill(pv_cpu_ops.wbinvd, WBINVD); para_fill(pv_cpu_ops.read_tsc, RDTSC); -- cgit v1.2.3 From 8d4b4981195849dd50ed94be33ede926c6f41dcd Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 2 Feb 2009 21:48:33 +0530 Subject: headers_check fix: x86, prctl.h fix the following 'make headers_check' warning: usr/include/asm/prctl.h:10: extern's make no sense in userspace Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/prctl.h | 4 ---- arch/x86/include/asm/syscalls.h | 1 + 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h index a8894647dd9..3ac5032fae0 100644 --- a/arch/x86/include/asm/prctl.h +++ b/arch/x86/include/asm/prctl.h @@ -6,8 +6,4 @@ #define ARCH_GET_FS 0x1003 #define ARCH_GET_GS 0x1004 -#ifdef CONFIG_X86_64 -extern long sys_arch_prctl(int, unsigned long); -#endif /* CONFIG_X86_64 */ - #endif /* _ASM_X86_PRCTL_H */ diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index c0b0bda754e..e26d34b0bc7 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -74,6 +74,7 @@ asmlinkage long sys_vfork(struct pt_regs *); asmlinkage long sys_execve(char __user *, char __user * __user *, char __user * __user *, struct pt_regs *); +long sys_arch_prctl(int, unsigned long); /* kernel/ioport.c */ asmlinkage long sys_iopl(unsigned int, struct pt_regs *); -- cgit v1.2.3 From 15c554439faedfa490389b31db893dc764245e88 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 2 Feb 2009 21:59:19 +0530 Subject: headers_check fix: x86, setup.h fix the following 'make headers_check' warning: usr/include/asm/setup.h:16: extern's make no sense in userspace usr/include/asm/setup.h:17: extern's make no sense in userspace usr/include/asm/setup.h:23: extern's make no sense in userspace usr/include/asm/setup.h:24: extern's make no sense in userspace usr/include/asm/setup.h:51: extern's make no sense in userspace usr/include/asm/setup.h:52: extern's make no sense in userspace Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/setup.h | 45 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ebe858cdc8a..5a3a1371575 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -5,23 +5,6 @@ #ifndef __ASSEMBLY__ -/* Interrupt control for vSMPowered x86_64 systems */ -void vsmp_init(void); - - -void setup_bios_corruption_check(void); - - -#ifdef CONFIG_X86_VISWS -extern void visws_early_detect(void); -extern int is_visws_box(void); -#else -static inline void visws_early_detect(void) { } -static inline int is_visws_box(void) { return 0; } -#endif - -extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); -extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); /* * Any setup quirks to be performed? */ @@ -48,12 +31,6 @@ struct x86_quirks { int (*update_genapic)(void); }; -extern struct x86_quirks *x86_quirks; -extern unsigned long saved_video_mode; - -#ifndef CONFIG_PARAVIRT -#define paravirt_post_allocator_init() do {} while (0) -#endif #endif /* __ASSEMBLY__ */ #ifdef __KERNEL__ @@ -78,6 +55,28 @@ extern unsigned long saved_video_mode; #ifndef __ASSEMBLY__ #include +/* Interrupt control for vSMPowered x86_64 systems */ +void vsmp_init(void); + +void setup_bios_corruption_check(void); + +#ifdef CONFIG_X86_VISWS +extern void visws_early_detect(void); +extern int is_visws_box(void); +#else +static inline void visws_early_detect(void) { } +static inline int is_visws_box(void) { return 0; } +#endif + +extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); +extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); +extern struct x86_quirks *x86_quirks; +extern unsigned long saved_video_mode; + +#ifndef CONFIG_PARAVIRT +#define paravirt_post_allocator_init() do {} while (0) +#endif + #ifndef _SETUP /* -- cgit v1.2.3 From 3bd323a1da42525317e2ce6c93b97b5ba653bc9d Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Mon, 2 Feb 2009 14:52:00 -0800 Subject: x86 setup: a20: early timeout for a nonexistent keyboard controller When probing the keyboard controller to enable A20, if we get FF back (which is *possible* as a valid status word, but is extremely unlikely) then bail after much fewer iterations than we otherwise would, and abort the attempt to access the KBC. This hopefully should make it work a lot better for embedded platforms which don't have a KBC and where the BIOS doesn't implement INT 15h AX=2401h (and doesn't boot with A20 already enabled.) If this works, it will be the one remaining use of CONFIG_X86_ELAN as anything other than a processor type optimization option. Signed-off-by: H. Peter Anvin --- arch/x86/boot/a20.c | 75 +++++++++++++++++++++++++++++------------------------ 1 file changed, 41 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/a20.c b/arch/x86/boot/a20.c index 4063d630def..fba8e9c6a50 100644 --- a/arch/x86/boot/a20.c +++ b/arch/x86/boot/a20.c @@ -2,6 +2,7 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007-2008 rPath, Inc. - All Rights Reserved + * Copyright 2009 Intel Corporation * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -15,16 +16,23 @@ #include "boot.h" #define MAX_8042_LOOPS 100000 +#define MAX_8042_FF 32 static int empty_8042(void) { u8 status; int loops = MAX_8042_LOOPS; + int ffs = MAX_8042_FF; while (loops--) { io_delay(); status = inb(0x64); + if (status == 0xff) { + /* FF is a plausible, but very unlikely status */ + if (!--ffs) + return -1; /* Assume no KBC present */ + } if (status & 1) { /* Read and discard input data */ io_delay(); @@ -118,44 +126,43 @@ static void enable_a20_fast(void) int enable_a20(void) { -#if defined(CONFIG_X86_ELAN) - /* Elan croaks if we try to touch the KBC */ - enable_a20_fast(); - while (!a20_test_long()) - ; - return 0; -#elif defined(CONFIG_X86_VOYAGER) +#ifdef CONFIG_X86_VOYAGER /* On Voyager, a20_test() is unsafe? */ enable_a20_kbc(); return 0; #else int loops = A20_ENABLE_LOOPS; - while (loops--) { - /* First, check to see if A20 is already enabled - (legacy free, etc.) */ - if (a20_test_short()) - return 0; - - /* Next, try the BIOS (INT 0x15, AX=0x2401) */ - enable_a20_bios(); - if (a20_test_short()) - return 0; - - /* Try enabling A20 through the keyboard controller */ - empty_8042(); - if (a20_test_short()) - return 0; /* BIOS worked, but with delayed reaction */ - - enable_a20_kbc(); - if (a20_test_long()) - return 0; - - /* Finally, try enabling the "fast A20 gate" */ - enable_a20_fast(); - if (a20_test_long()) - return 0; - } - - return -1; + int kbc_err; + + while (loops--) { + /* First, check to see if A20 is already enabled + (legacy free, etc.) */ + if (a20_test_short()) + return 0; + + /* Next, try the BIOS (INT 0x15, AX=0x2401) */ + enable_a20_bios(); + if (a20_test_short()) + return 0; + + /* Try enabling A20 through the keyboard controller */ + kbc_err = empty_8042(); + + if (a20_test_short()) + return 0; /* BIOS worked, but with delayed reaction */ + + if (!kbc_err) { + enable_a20_kbc(); + if (a20_test_long()) + return 0; + } + + /* Finally, try enabling the "fast A20 gate" */ + enable_a20_fast(); + if (a20_test_long()) + return 0; + } + + return -1; #endif } -- cgit v1.2.3 From ef3892bd63420380d115f755d351d2071f1f805f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 2 Feb 2009 18:16:19 -0800 Subject: x86, percpu: fix kexec with vmlinux Impact: fix regression with kexec with vmlinux Split data.init into data.init, percpu, data.init2 sections instead of let data.init wrap percpu secion. Thus kexec loading will be happy, because sections will not overlap. Before the patch we have: Elf file type is EXEC (Executable file) Entry point 0x200000 There are 6 program headers, starting at offset 64 Program Headers: Type Offset VirtAddr PhysAddr FileSiz MemSiz Flags Align LOAD 0x0000000000200000 0xffffffff80200000 0x0000000000200000 0x0000000000ca6000 0x0000000000ca6000 R E 200000 LOAD 0x0000000000ea6000 0xffffffff80ea6000 0x0000000000ea6000 0x000000000014dfe0 0x000000000014dfe0 RWE 200000 LOAD 0x0000000001000000 0xffffffffff600000 0x0000000000ff4000 0x0000000000000888 0x0000000000000888 RWE 200000 LOAD 0x00000000011f6000 0xffffffff80ff6000 0x0000000000ff6000 0x0000000000073086 0x0000000000a2d938 RWE 200000 LOAD 0x0000000001400000 0x0000000000000000 0x000000000106a000 0x00000000001d2ce0 0x00000000001d2ce0 RWE 200000 NOTE 0x00000000009e2c1c 0xffffffff809e2c1c 0x00000000009e2c1c 0x0000000000000024 0x0000000000000024 4 Section to Segment mapping: Segment Sections... 00 .text .notes __ex_table .rodata __bug_table .pci_fixup .builtin_fw __ksymtab __ksymtab_gpl __ksymtab_strings __init_rodata __param 01 .data .init.rodata .data.cacheline_aligned .data.read_mostly 02 .vsyscall_0 .vsyscall_fn .vsyscall_gtod_data .vsyscall_1 .vsyscall_2 .vgetcpu_mode .jiffies 03 .data.init_task .smp_locks .init.text .init.data .init.setup .initcall.init .con_initcall.init .x86_cpu_dev.init .altinstructions .altinstr_replacement .exit.text .init.ramfs .bss 04 .data.percpu 05 .notes After patch we've got: Elf file type is EXEC (Executable file) Entry point 0x200000 There are 7 program headers, starting at offset 64 Program Headers: Type Offset VirtAddr PhysAddr FileSiz MemSiz Flags Align LOAD 0x0000000000200000 0xffffffff80200000 0x0000000000200000 0x0000000000ca6000 0x0000000000ca6000 R E 200000 LOAD 0x0000000000ea6000 0xffffffff80ea6000 0x0000000000ea6000 0x000000000014dfe0 0x000000000014dfe0 RWE 200000 LOAD 0x0000000001000000 0xffffffffff600000 0x0000000000ff4000 0x0000000000000888 0x0000000000000888 RWE 200000 LOAD 0x00000000011f6000 0xffffffff80ff6000 0x0000000000ff6000 0x0000000000073086 0x0000000000073086 RWE 200000 LOAD 0x0000000001400000 0x0000000000000000 0x000000000106a000 0x00000000001d2ce0 0x00000000001d2ce0 RWE 200000 LOAD 0x000000000163d000 0xffffffff8123d000 0x000000000123d000 0x0000000000000000 0x00000000007e6938 RWE 200000 NOTE 0x00000000009e2c1c 0xffffffff809e2c1c 0x00000000009e2c1c 0x0000000000000024 0x0000000000000024 4 Section to Segment mapping: Segment Sections... 00 .text .notes __ex_table .rodata __bug_table .pci_fixup .builtin_fw __ksymtab __ksymtab_gpl __ksymtab_strings __init_rodata __param 01 .data .init.rodata .data.cacheline_aligned .data.read_mostly 02 .vsyscall_0 .vsyscall_fn .vsyscall_gtod_data .vsyscall_1 .vsyscall_2 .vgetcpu_mode .jiffies 03 .data.init_task .smp_locks .init.text .init.data .init.setup .initcall.init .con_initcall.init .x86_cpu_dev.init .altinstructions .altinstr_replacement .exit.text .init.ramfs 04 .data.percpu 05 .bss 06 .notes Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/vmlinux_64.lds.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index c9740996430..07f62d287ff 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -22,6 +22,7 @@ PHDRS { #ifdef CONFIG_SMP percpu PT_LOAD FLAGS(7); /* RWE */ #endif + data.init2 PT_LOAD FLAGS(7); /* RWE */ note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS @@ -215,7 +216,7 @@ SECTIONS /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - __data_nosave - should - * switch it back to data.init. Also, pda should be at the head of + * start another section data.init2. Also, pda should be at the head of * percpu area. Preallocate it and define the percpu offset symbol * so that it can be accessed as a percpu variable. */ @@ -232,7 +233,7 @@ SECTIONS __nosave_begin = .; .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) - } :data.init /* switch back to data.init, see PERCPU_VADDR() above */ + } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */ . = ALIGN(PAGE_SIZE); __nosave_end = .; -- cgit v1.2.3 From 063f8913afb48842b9329e195d90d2c28e58aacc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 3 Feb 2009 18:02:36 +0100 Subject: x86: document 64-bit and 32-bit function call convention ABI - also clean up the calling.h file a tiny bit Signed-off-by: Ingo Molnar --- arch/x86/include/asm/calling.h | 56 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 53 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index 2bc162e0ec6..0e63c9a2a8d 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h @@ -1,5 +1,55 @@ /* - * Some macros to handle stack frames in assembly. + + x86 function call convention, 64-bit: + ------------------------------------- + arguments | callee-saved | extra caller-saved | return + [callee-clobbered] | | [callee-clobbered] | + --------------------------------------------------------------------------- + rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] + + ( rsp is obviously invariant across normal function calls. (gcc can 'merge' + functions when it sees tail-call optimization possibilities) rflags is + clobbered. Leftover arguments are passed over the stack frame.) + + [*] In the frame-pointers case rbp is fixed to the stack frame. + + [**] for struct return values wider than 64 bits the return convention is a + bit more complex: up to 128 bits width we return small structures + straight in rax, rdx. For structures larger than that (3 words or + larger) the caller puts a pointer to an on-stack return struct + [allocated in the caller's stack frame] into the first argument - i.e. + into rdi. All other arguments shift up by one in this case. + Fortunately this case is rare in the kernel. + +For 32-bit we have the following conventions - kernel is built with +-mregparm=3 and -freg-struct-return: + + x86 function calling convention, 32-bit: + ---------------------------------------- + arguments | callee-saved | extra caller-saved | return + [callee-clobbered] | | [callee-clobbered] | + ------------------------------------------------------------------------- + eax edx ecx | ebx edi esi ebp [*] | | eax, edx [**] + + ( here too esp is obviously invariant across normal function calls. eflags + is clobbered. Leftover arguments are passed over the stack frame. ) + + [*] In the frame-pointers case ebp is fixed to the stack frame. + + [**] We build with -freg-struct-return, which on 32-bit means similar + semantics as on 64-bit: edx can be used for a second return value + (i.e. covering integer and structure sizes up to 64 bits) - after that + it gets more complex and more expensive: 3-word or larger struct returns + get done in the caller's frame and the pointer to the return struct goes + into regparm0, i.e. eax - the other arguments shift up and the + function's register parameters degenerate to regparm=2 in essence. + +*/ + + +/* + * 64-bit system call stack frame layout defines and helpers, + * for assembly code: */ #define R15 0 @@ -9,7 +59,7 @@ #define RBP 32 #define RBX 40 -/* arguments: interrupts/non tracing syscalls only save upto here*/ +/* arguments: interrupts/non tracing syscalls only save up to here: */ #define R11 48 #define R10 56 #define R9 64 @@ -22,7 +72,7 @@ #define ORIG_RAX 120 /* + error_code */ /* end of arguments */ -/* cpu exception frame or undefined in case of fast syscall. */ +/* cpu exception frame or undefined in case of fast syscall: */ #define RIP 128 #define CS 136 #define EFLAGS 144 -- cgit v1.2.3 From 0eb592dbba40baebec9cdde3ff4574185de6cbcc Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 3 Feb 2009 16:00:38 -0800 Subject: x86/paravirt: return full 64-bit result Impact: Bug fix A hunk went missing in the original patch, and callee-save callsites were not marked as returning the upper 32-bit of result, causing Badness. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 016dce31130..c85e7475e17 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -522,7 +522,7 @@ int paravirt_disable_iospace(void); "=c" (__ecx) #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS -#define PVOP_VCALLEE_CLOBBERS "=a" (__eax) +#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS #define EXTRA_CLOBBERS -- cgit v1.2.3 From f5deb79679af6eb41b61112fadcda28b2a4cfb0d Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Tue, 3 Feb 2009 14:22:48 +0800 Subject: x86: kexec: Use one page table in x86_64 machine_kexec Impact: reduce kernel BSS size by 7 pages, improve code readability Two page tables are used in current x86_64 kexec implementation. One is used to jump from kernel virtual address to identity map address, the other is used to map all physical memory. In fact, on x86_64, there is no conflict between kernel virtual address space and physical memory space, so just one page table is sufficient. The page table pages used to map control page are dynamically allocated to save memory if kexec image is not loaded. ASM code used to map control page is replaced by C code too. Signed-off-by: Huang Ying Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/kexec.h | 27 +++----- arch/x86/kernel/machine_kexec_64.c | 82 +++++++++++++++-------- arch/x86/kernel/relocate_kernel_64.S | 125 +---------------------------------- 3 files changed, 67 insertions(+), 167 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index c61d8b2ab8b..0ceb6d19ed3 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -9,23 +9,8 @@ # define PAGES_NR 4 #else # define PA_CONTROL_PAGE 0 -# define VA_CONTROL_PAGE 1 -# define PA_PGD 2 -# define VA_PGD 3 -# define PA_PUD_0 4 -# define VA_PUD_0 5 -# define PA_PMD_0 6 -# define VA_PMD_0 7 -# define PA_PTE_0 8 -# define VA_PTE_0 9 -# define PA_PUD_1 10 -# define VA_PUD_1 11 -# define PA_PMD_1 12 -# define VA_PMD_1 13 -# define PA_PTE_1 14 -# define VA_PTE_1 15 -# define PA_TABLE_PAGE 16 -# define PAGES_NR 17 +# define PA_TABLE_PAGE 1 +# define PAGES_NR 2 #endif #ifdef CONFIG_X86_32 @@ -157,9 +142,9 @@ relocate_kernel(unsigned long indirection_page, unsigned long start_address) ATTRIB_NORET; #endif -#ifdef CONFIG_X86_32 #define ARCH_HAS_KIMAGE_ARCH +#ifdef CONFIG_X86_32 struct kimage_arch { pgd_t *pgd; #ifdef CONFIG_X86_PAE @@ -169,6 +154,12 @@ struct kimage_arch { pte_t *pte0; pte_t *pte1; }; +#else +struct kimage_arch { + pud_t *pud; + pmd_t *pmd; + pte_t *pte; +}; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index c43caa3a91f..6993d51b7fd 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -18,15 +18,6 @@ #include #include -#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) -static u64 kexec_pgd[512] PAGE_ALIGNED; -static u64 kexec_pud0[512] PAGE_ALIGNED; -static u64 kexec_pmd0[512] PAGE_ALIGNED; -static u64 kexec_pte0[512] PAGE_ALIGNED; -static u64 kexec_pud1[512] PAGE_ALIGNED; -static u64 kexec_pmd1[512] PAGE_ALIGNED; -static u64 kexec_pte1[512] PAGE_ALIGNED; - static void init_level2_page(pmd_t *level2p, unsigned long addr) { unsigned long end_addr; @@ -107,12 +98,65 @@ out: return result; } +static void free_transition_pgtable(struct kimage *image) +{ + free_page((unsigned long)image->arch.pud); + free_page((unsigned long)image->arch.pmd); + free_page((unsigned long)image->arch.pte); +} + +static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + unsigned long vaddr, paddr; + int result = -ENOMEM; + + vaddr = (unsigned long)relocate_kernel; + paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE); + pgd += pgd_index(vaddr); + if (!pgd_present(*pgd)) { + pud = (pud_t *)get_zeroed_page(GFP_KERNEL); + if (!pud) + goto err; + image->arch.pud = pud; + set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); + } + pud = pud_offset(pgd, vaddr); + if (!pud_present(*pud)) { + pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); + if (!pmd) + goto err; + image->arch.pmd = pmd; + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); + } + pmd = pmd_offset(pud, vaddr); + if (!pmd_present(*pmd)) { + pte = (pte_t *)get_zeroed_page(GFP_KERNEL); + if (!pte) + goto err; + image->arch.pte = pte; + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); + } + pte = pte_offset_kernel(pmd, vaddr); + set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); + return 0; +err: + free_transition_pgtable(image); + return result; +} + static int init_pgtable(struct kimage *image, unsigned long start_pgtable) { pgd_t *level4p; + int result; level4p = (pgd_t *)__va(start_pgtable); - return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT); + result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT); + if (result) + return result; + return init_transition_pgtable(image, level4p); } static void set_idt(void *newidt, u16 limit) @@ -174,7 +218,7 @@ int machine_kexec_prepare(struct kimage *image) void machine_kexec_cleanup(struct kimage *image) { - return; + free_transition_pgtable(image); } /* @@ -195,22 +239,6 @@ void machine_kexec(struct kimage *image) memcpy(control_page, relocate_kernel, PAGE_SIZE); page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); - page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; - page_list[PA_PGD] = virt_to_phys(&kexec_pgd); - page_list[VA_PGD] = (unsigned long)kexec_pgd; - page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0); - page_list[VA_PUD_0] = (unsigned long)kexec_pud0; - page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0); - page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; - page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0); - page_list[VA_PTE_0] = (unsigned long)kexec_pte0; - page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1); - page_list[VA_PUD_1] = (unsigned long)kexec_pud1; - page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1); - page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; - page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1); - page_list[VA_PTE_1] = (unsigned long)kexec_pte1; - page_list[PA_TABLE_PAGE] = (unsigned long)__pa(page_address(image->control_code_page)); diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index f5afe665a82..b0bbdd4829c 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -29,122 +29,6 @@ relocate_kernel: * %rdx start address */ - /* map the control page at its virtual address */ - - movq $0x0000ff8000000000, %r10 /* mask */ - mov $(39 - 3), %cl /* bits to shift */ - movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */ - - movq %r11, %r9 - andq %r10, %r9 - shrq %cl, %r9 - - movq PTR(VA_PGD)(%rsi), %r8 - addq %r8, %r9 - movq PTR(PA_PUD_0)(%rsi), %r8 - orq $PAGE_ATTR, %r8 - movq %r8, (%r9) - - shrq $9, %r10 - sub $9, %cl - - movq %r11, %r9 - andq %r10, %r9 - shrq %cl, %r9 - - movq PTR(VA_PUD_0)(%rsi), %r8 - addq %r8, %r9 - movq PTR(PA_PMD_0)(%rsi), %r8 - orq $PAGE_ATTR, %r8 - movq %r8, (%r9) - - shrq $9, %r10 - sub $9, %cl - - movq %r11, %r9 - andq %r10, %r9 - shrq %cl, %r9 - - movq PTR(VA_PMD_0)(%rsi), %r8 - addq %r8, %r9 - movq PTR(PA_PTE_0)(%rsi), %r8 - orq $PAGE_ATTR, %r8 - movq %r8, (%r9) - - shrq $9, %r10 - sub $9, %cl - - movq %r11, %r9 - andq %r10, %r9 - shrq %cl, %r9 - - movq PTR(VA_PTE_0)(%rsi), %r8 - addq %r8, %r9 - movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 - orq $PAGE_ATTR, %r8 - movq %r8, (%r9) - - /* identity map the control page at its physical address */ - - movq $0x0000ff8000000000, %r10 /* mask */ - mov $(39 - 3), %cl /* bits to shift */ - movq PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */ - - movq %r11, %r9 - andq %r10, %r9 - shrq %cl, %r9 - - movq PTR(VA_PGD)(%rsi), %r8 - addq %r8, %r9 - movq PTR(PA_PUD_1)(%rsi), %r8 - orq $PAGE_ATTR, %r8 - movq %r8, (%r9) - - shrq $9, %r10 - sub $9, %cl - - movq %r11, %r9 - andq %r10, %r9 - shrq %cl, %r9 - - movq PTR(VA_PUD_1)(%rsi), %r8 - addq %r8, %r9 - movq PTR(PA_PMD_1)(%rsi), %r8 - orq $PAGE_ATTR, %r8 - movq %r8, (%r9) - - shrq $9, %r10 - sub $9, %cl - - movq %r11, %r9 - andq %r10, %r9 - shrq %cl, %r9 - - movq PTR(VA_PMD_1)(%rsi), %r8 - addq %r8, %r9 - movq PTR(PA_PTE_1)(%rsi), %r8 - orq $PAGE_ATTR, %r8 - movq %r8, (%r9) - - shrq $9, %r10 - sub $9, %cl - - movq %r11, %r9 - andq %r10, %r9 - shrq %cl, %r9 - - movq PTR(VA_PTE_1)(%rsi), %r8 - addq %r8, %r9 - movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 - orq $PAGE_ATTR, %r8 - movq %r8, (%r9) - -relocate_new_kernel: - /* %rdi indirection_page - * %rsi page_list - * %rdx start address - */ - /* zero out flags, and disable interrupts */ pushq $0 popfq @@ -156,9 +40,8 @@ relocate_new_kernel: /* get physical address of page table now too */ movq PTR(PA_TABLE_PAGE)(%rsi), %rcx - /* switch to new set of page tables */ - movq PTR(PA_PGD)(%rsi), %r9 - movq %r9, %cr3 + /* Switch to the identity mapped page tables */ + movq %rcx, %cr3 /* setup a new stack at the end of the physical control page */ lea PAGE_SIZE(%r8), %rsp @@ -194,9 +77,7 @@ identity_mapped: jmp 1f 1: - /* Switch to the identity mapped page tables, - * and flush the TLB. - */ + /* Flush the TLB (needed?) */ movq %rcx, %cr3 /* Do the copies */ -- cgit v1.2.3 From 508eb2ce222053e51e2243b7add8eeac85b1d250 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 4 Feb 2009 15:28:06 +0900 Subject: sh: Restrict old CMT timer code to SH-2/SH-2A. None of the other platforms use this, and need individual porting. Restrict it back to the supported set of CPU subtypes. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 50c992444e5..78a01d7d37e 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -419,7 +419,7 @@ config SH_TMU config SH_CMT bool "CMT timer support" - depends on SYS_SUPPORTS_CMT + depends on SYS_SUPPORTS_CMT && CPU_SH2 default y help This enables the use of the CMT as the system timer. -- cgit v1.2.3 From 0973a06cde8cc1522fbcf2baacb926f1ee3f4c79 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 4 Feb 2009 15:24:09 -0800 Subject: x86: mm: introduce helper function in fault.c Impact: cleanup Introduce helper function fault_in_kernel_address() to make editors happy. Signed-off-by: Hiroshi Shimamoto Signed-off-by: H. Peter Anvin --- arch/x86/mm/fault.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index eb4d7fe0593..8e9b0f1fd87 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -775,6 +775,15 @@ static inline int access_error(unsigned long error_code, int write, return 0; } +static int fault_in_kernel_space(unsigned long address) +{ +#ifdef CONFIG_X86_32 + return address >= TASK_SIZE; +#else /* !CONFIG_X86_32 */ + return address >= TASK_SIZE64; +#endif /* CONFIG_X86_32 */ +} + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -817,11 +826,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) * (error_code & 4) == 0, and that the fault was not a * protection error (error_code & 9) == 0. */ -#ifdef CONFIG_X86_32 - if (unlikely(address >= TASK_SIZE)) { -#else - if (unlikely(address >= TASK_SIZE64)) { -#endif + if (unlikely(fault_in_kernel_space(address))) { if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && vmalloc_fault(address) >= 0) return; -- cgit v1.2.3 From 1f4f931501e9270c156d05ee76b7b872de486304 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 2 Feb 2009 13:58:06 -0800 Subject: xen: fix 32-bit build resulting from mmu move Moving the mmu code from enlighten.c to mmu.c inadvertently broke the 32-bit build. Fix it. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/xen/mmu.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'arch') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e41f7fc6cf..d2e8ed1aff3 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1396,6 +1396,43 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) #endif } +#ifdef CONFIG_HIGHPTE +static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) +{ + pgprot_t prot = PAGE_KERNEL; + + if (PagePinned(page)) + prot = PAGE_KERNEL_RO; + + if (0 && PageHighMem(page)) + printk("mapping highpte %lx type %d prot %s\n", + page_to_pfn(page), type, + (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); + + return kmap_atomic_prot(page, type, prot); +} +#endif + +#ifdef CONFIG_X86_32 +static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) +{ + /* If there's an existing pte, then don't allow _PAGE_RW to be set */ + if (pte_val_ma(*ptep) & _PAGE_PRESENT) + pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & + pte_val_ma(pte)); + + return pte; +} + +/* Init-time set_pte while constructing initial pagetables, which + doesn't allow RO pagetable pages to be remapped RW */ +static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) +{ + pte = mask_rw_pte(ptep, pte); + + xen_set_pte(ptep, pte); +} +#endif /* Early in boot, while setting up the initial pagetable, assume everything is pinned. */ -- cgit v1.2.3 From 383414322b3b3ced0cbc146801e0cc6c60a6c5f4 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 2 Feb 2009 13:55:31 -0800 Subject: xen: setup percpu data pointers We need to access percpu data fairly early, so set up the percpu registers as soon as possible. We only need to load the appropriate segment register. We already have a GDT, but its hard to change it early because we need to manipulate the pagetable to do so, and that hasn't been set up yet. Also, set the kernel stack when bringing up secondary CPUs. If we don't they all end up sharing the same stack... Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/xen/enlighten.c | 15 ++++++++++++++- arch/x86/xen/smp.c | 6 ++++-- arch/x86/xen/xen-ops.h | 2 ++ 3 files changed, 20 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index cd022c43dfb..aed7ceeb4b6 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -66,6 +66,8 @@ EXPORT_SYMBOL_GPL(xen_start_info); struct shared_info xen_dummy_shared_info; +void *xen_initial_gdt; + /* * Point at some empty memory to start with. We map the real shared_info * page as soon as fixmap is up and running. @@ -917,8 +919,19 @@ asmlinkage void __init xen_start_kernel(void) have_vcpu_info_placement = 0; #endif - /* setup percpu state */ +#ifdef CONFIG_X86_64 + /* + * Setup percpu state. We only need to do this for 64-bit + * because 32-bit already has %fs set properly. + */ load_percpu_segment(0); +#endif + /* + * The only reliable way to retain the initial address of the + * percpu gdt_page is to remember it here, so we can go and + * mark it RW later, when the initial percpu area is freed. + */ + xen_initial_gdt = &per_cpu(gdt_page, 0); xen_smp_init(); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 88d5d5ec6be..035582ae815 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -170,8 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void) /* We've switched to the "real" per-cpu gdt, so make sure the old memory can be recycled */ - make_lowmem_page_readwrite(__per_cpu_load + - (unsigned long)&per_cpu_var(gdt_page)); + make_lowmem_page_readwrite(xen_initial_gdt); xen_setup_vcpu_info_placement(); } @@ -287,6 +286,9 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); + per_cpu(kernel_stack, cpu) = + (unsigned long)task_stack_page(idle) - + KERNEL_STACK_OFFSET + THREAD_SIZE; #endif xen_setup_timer(cpu); xen_init_lock_cpu(cpu); diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 11913fc94c1..2f5ef2632ea 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -10,6 +10,8 @@ extern const char xen_hypervisor_callback[]; extern const char xen_failsafe_callback[]; +extern void *xen_initial_gdt; + struct trap_info; void xen_copy_trap_info(struct trap_info *traps); -- cgit v1.2.3 From 5393744b71ce797f1b1546fafaed127fc50c2b61 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 2 Feb 2009 13:55:42 -0800 Subject: xen: make direct versions of irq_enable/disable/save/restore to common code Now that x86-64 has directly accessible percpu variables, it can also implement the direct versions of these operations, which operate on a vcpu_info structure directly embedded in the percpu area. In fact, the 64-bit versions are more or less identical, and so can be shared. The only two differences are: 1. xen_restore_fl_direct takes its argument in eax on 32-bit, and rdi on 64-bit. Unfortunately it isn't possible to directly refer to the 2nd lsb of rdi directly (as you can with %ah), so the code isn't quite as dense. 2. check_events needs to variants to save different registers. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/xen/Makefile | 3 +- arch/x86/xen/xen-asm.S | 140 ++++++++++++++++++++++++++++++++++++++++++++++ arch/x86/xen/xen-asm.h | 12 ++++ arch/x86/xen/xen-asm_32.S | 111 ++++-------------------------------- arch/x86/xen/xen-asm_64.S | 134 +------------------------------------------- 5 files changed, 169 insertions(+), 231 deletions(-) create mode 100644 arch/x86/xen/xen-asm.S create mode 100644 arch/x86/xen/xen-asm.h (limited to 'arch') diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 6dcefba7836..3b767d03fd6 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg endif obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ - time.o xen-asm_$(BITS).o grant-table.o suspend.o + time.o xen-asm.o xen-asm_$(BITS).o \ + grant-table.o suspend.o obj-$(CONFIG_SMP) += smp.o spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S new file mode 100644 index 00000000000..4c6f9679913 --- /dev/null +++ b/arch/x86/xen/xen-asm.S @@ -0,0 +1,140 @@ +/* + Asm versions of Xen pv-ops, suitable for either direct use or inlining. + The inline versions are the same as the direct-use versions, with the + pre- and post-amble chopped off. + + This code is encoded for size rather than absolute efficiency, + with a view to being able to inline as much as possible. + + We only bother with direct forms (ie, vcpu in percpu data) of + the operations here; the indirect forms are better handled in + C, since they're generally too large to inline anyway. + */ + +#include +#include +#include + +#include "xen-asm.h" + +/* + Enable events. This clears the event mask and tests the pending + event status with one and operation. If there are pending + events, then enter the hypervisor to get them handled. + */ +ENTRY(xen_irq_enable_direct) + /* Unmask events */ + movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask + + /* Preempt here doesn't matter because that will deal with + any pending interrupts. The pending check may end up being + run on the wrong CPU, but that doesn't hurt. */ + + /* Test for pending */ + testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending + jz 1f + +2: call check_events +1: +ENDPATCH(xen_irq_enable_direct) + ret + ENDPROC(xen_irq_enable_direct) + RELOC(xen_irq_enable_direct, 2b+1) + + +/* + Disabling events is simply a matter of making the event mask + non-zero. + */ +ENTRY(xen_irq_disable_direct) + movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask +ENDPATCH(xen_irq_disable_direct) + ret + ENDPROC(xen_irq_disable_direct) + RELOC(xen_irq_disable_direct, 0) + +/* + (xen_)save_fl is used to get the current interrupt enable status. + Callers expect the status to be in X86_EFLAGS_IF, and other bits + may be set in the return value. We take advantage of this by + making sure that X86_EFLAGS_IF has the right value (and other bits + in that byte are 0), but other bits in the return value are + undefined. We need to toggle the state of the bit, because + Xen and x86 use opposite senses (mask vs enable). + */ +ENTRY(xen_save_fl_direct) + testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask + setz %ah + addb %ah,%ah +ENDPATCH(xen_save_fl_direct) + ret + ENDPROC(xen_save_fl_direct) + RELOC(xen_save_fl_direct, 0) + + +/* + In principle the caller should be passing us a value return + from xen_save_fl_direct, but for robustness sake we test only + the X86_EFLAGS_IF flag rather than the whole byte. After + setting the interrupt mask state, it checks for unmasked + pending events and enters the hypervisor to get them delivered + if so. + */ +ENTRY(xen_restore_fl_direct) +#ifdef CONFIG_X86_64 + testw $X86_EFLAGS_IF, %di +#else + testb $X86_EFLAGS_IF>>8, %ah +#endif + setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask + /* Preempt here doesn't matter because that will deal with + any pending interrupts. The pending check may end up being + run on the wrong CPU, but that doesn't hurt. */ + + /* check for unmasked and pending */ + cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending + jz 1f +2: call check_events +1: +ENDPATCH(xen_restore_fl_direct) + ret + ENDPROC(xen_restore_fl_direct) + RELOC(xen_restore_fl_direct, 2b+1) + + +/* + Force an event check by making a hypercall, + but preserve regs before making the call. + */ +check_events: +#ifdef CONFIG_X86_32 + push %eax + push %ecx + push %edx + call xen_force_evtchn_callback + pop %edx + pop %ecx + pop %eax +#else + push %rax + push %rcx + push %rdx + push %rsi + push %rdi + push %r8 + push %r9 + push %r10 + push %r11 + call xen_force_evtchn_callback + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop %rdi + pop %rsi + pop %rdx + pop %rcx + pop %rax +#endif + ret + diff --git a/arch/x86/xen/xen-asm.h b/arch/x86/xen/xen-asm.h new file mode 100644 index 00000000000..465276467a4 --- /dev/null +++ b/arch/x86/xen/xen-asm.h @@ -0,0 +1,12 @@ +#ifndef _XEN_XEN_ASM_H +#define _XEN_XEN_ASM_H + +#include + +#define RELOC(x, v) .globl x##_reloc; x##_reloc=v +#define ENDPATCH(x) .globl x##_end; x##_end=. + +/* Pseudo-flag used for virtual NMI, which we don't implement yet */ +#define XEN_EFLAGS_NMI 0x80000000 + +#endif diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index 42786f59d9c..082d173caaf 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -11,101 +11,28 @@ generally too large to inline anyway. */ -#include - -#include +//#include #include -#include #include #include #include -#define RELOC(x, v) .globl x##_reloc; x##_reloc=v -#define ENDPATCH(x) .globl x##_end; x##_end=. - -/* Pseudo-flag used for virtual NMI, which we don't implement yet */ -#define XEN_EFLAGS_NMI 0x80000000 - -/* - Enable events. This clears the event mask and tests the pending - event status with one and operation. If there are pending - events, then enter the hypervisor to get them handled. - */ -ENTRY(xen_irq_enable_direct) - /* Unmask events */ - movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask - - /* Preempt here doesn't matter because that will deal with - any pending interrupts. The pending check may end up being - run on the wrong CPU, but that doesn't hurt. */ - - /* Test for pending */ - testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending - jz 1f - -2: call check_events -1: -ENDPATCH(xen_irq_enable_direct) - ret - ENDPROC(xen_irq_enable_direct) - RELOC(xen_irq_enable_direct, 2b+1) - +#include "xen-asm.h" /* - Disabling events is simply a matter of making the event mask - non-zero. - */ -ENTRY(xen_irq_disable_direct) - movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask -ENDPATCH(xen_irq_disable_direct) - ret - ENDPROC(xen_irq_disable_direct) - RELOC(xen_irq_disable_direct, 0) - -/* - (xen_)save_fl is used to get the current interrupt enable status. - Callers expect the status to be in X86_EFLAGS_IF, and other bits - may be set in the return value. We take advantage of this by - making sure that X86_EFLAGS_IF has the right value (and other bits - in that byte are 0), but other bits in the return value are - undefined. We need to toggle the state of the bit, because - Xen and x86 use opposite senses (mask vs enable). - */ -ENTRY(xen_save_fl_direct) - testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask - setz %ah - addb %ah,%ah -ENDPATCH(xen_save_fl_direct) - ret - ENDPROC(xen_save_fl_direct) - RELOC(xen_save_fl_direct, 0) - - -/* - In principle the caller should be passing us a value return - from xen_save_fl_direct, but for robustness sake we test only - the X86_EFLAGS_IF flag rather than the whole byte. After - setting the interrupt mask state, it checks for unmasked - pending events and enters the hypervisor to get them delivered - if so. + Force an event check by making a hypercall, + but preserve regs before making the call. */ -ENTRY(xen_restore_fl_direct) - testb $X86_EFLAGS_IF>>8, %ah - setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask - /* Preempt here doesn't matter because that will deal with - any pending interrupts. The pending check may end up being - run on the wrong CPU, but that doesn't hurt. */ - - /* check for unmasked and pending */ - cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending - jz 1f -2: call check_events -1: -ENDPATCH(xen_restore_fl_direct) +check_events: + push %eax + push %ecx + push %edx + call xen_force_evtchn_callback + pop %edx + pop %ecx + pop %eax ret - ENDPROC(xen_restore_fl_direct) - RELOC(xen_restore_fl_direct, 2b+1) /* We can't use sysexit directly, because we're not running in ring0. @@ -289,17 +216,3 @@ ENTRY(xen_iret_crit_fixup) lea 4(%edi),%esp /* point esp to new frame */ 2: jmp xen_do_upcall - -/* - Force an event check by making a hypercall, - but preserve regs before making the call. - */ -check_events: - push %eax - push %ecx - push %edx - call xen_force_evtchn_callback - pop %edx - pop %ecx - pop %eax - ret diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index d6fc51f4ce8..d205a283efe 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -11,142 +11,14 @@ generally too large to inline anyway. */ -#include - -#include -#include #include -#include #include +#include +#include #include -#define RELOC(x, v) .globl x##_reloc; x##_reloc=v -#define ENDPATCH(x) .globl x##_end; x##_end=. - -/* Pseudo-flag used for virtual NMI, which we don't implement yet */ -#define XEN_EFLAGS_NMI 0x80000000 - -#if 1 -/* - FIXME: x86_64 now can support direct access to percpu variables - via a segment override. Update xen accordingly. - */ -#define BUG ud2a -#endif - -/* - Enable events. This clears the event mask and tests the pending - event status with one and operation. If there are pending - events, then enter the hypervisor to get them handled. - */ -ENTRY(xen_irq_enable_direct) - BUG - - /* Unmask events */ - movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask - - /* Preempt here doesn't matter because that will deal with - any pending interrupts. The pending check may end up being - run on the wrong CPU, but that doesn't hurt. */ - - /* Test for pending */ - testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending - jz 1f - -2: call check_events -1: -ENDPATCH(xen_irq_enable_direct) - ret - ENDPROC(xen_irq_enable_direct) - RELOC(xen_irq_enable_direct, 2b+1) - -/* - Disabling events is simply a matter of making the event mask - non-zero. - */ -ENTRY(xen_irq_disable_direct) - BUG - - movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask -ENDPATCH(xen_irq_disable_direct) - ret - ENDPROC(xen_irq_disable_direct) - RELOC(xen_irq_disable_direct, 0) - -/* - (xen_)save_fl is used to get the current interrupt enable status. - Callers expect the status to be in X86_EFLAGS_IF, and other bits - may be set in the return value. We take advantage of this by - making sure that X86_EFLAGS_IF has the right value (and other bits - in that byte are 0), but other bits in the return value are - undefined. We need to toggle the state of the bit, because - Xen and x86 use opposite senses (mask vs enable). - */ -ENTRY(xen_save_fl_direct) - BUG - - testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask - setz %ah - addb %ah,%ah -ENDPATCH(xen_save_fl_direct) - ret - ENDPROC(xen_save_fl_direct) - RELOC(xen_save_fl_direct, 0) - -/* - In principle the caller should be passing us a value return - from xen_save_fl_direct, but for robustness sake we test only - the X86_EFLAGS_IF flag rather than the whole byte. After - setting the interrupt mask state, it checks for unmasked - pending events and enters the hypervisor to get them delivered - if so. - */ -ENTRY(xen_restore_fl_direct) - BUG - - testb $X86_EFLAGS_IF>>8, %ah - setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask - /* Preempt here doesn't matter because that will deal with - any pending interrupts. The pending check may end up being - run on the wrong CPU, but that doesn't hurt. */ - - /* check for unmasked and pending */ - cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending - jz 1f -2: call check_events -1: -ENDPATCH(xen_restore_fl_direct) - ret - ENDPROC(xen_restore_fl_direct) - RELOC(xen_restore_fl_direct, 2b+1) - - -/* - Force an event check by making a hypercall, - but preserve regs before making the call. - */ -check_events: - push %rax - push %rcx - push %rdx - push %rsi - push %rdi - push %r8 - push %r9 - push %r10 - push %r11 - call xen_force_evtchn_callback - pop %r11 - pop %r10 - pop %r9 - pop %r8 - pop %rdi - pop %rsi - pop %rdx - pop %rcx - pop %rax - ret +#include "xen-asm.h" ENTRY(xen_adjust_exception_frame) mov 8+0(%rsp),%rcx -- cgit v1.2.3 From e4d0407185cdbdcfd99fc23bde2e5454bbc46329 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 2 Feb 2009 13:55:54 -0800 Subject: xen: use direct ops on 64-bit Enable the use of the direct vcpu-access operations on 64-bit. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/xen/enlighten.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index aed7ceeb4b6..37230342c2c 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -87,14 +87,7 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; * * 0: not available, 1: available */ -static int have_vcpu_info_placement = -#ifdef CONFIG_X86_32 - 1 -#else - 0 -#endif - ; - +static int have_vcpu_info_placement = 1; static void xen_vcpu_setup(int cpu) { @@ -914,11 +907,6 @@ asmlinkage void __init xen_start_kernel(void) machine_ops = xen_machine_ops; -#ifdef CONFIG_X86_64 - /* Disable until direct per-cpu data access. */ - have_vcpu_info_placement = 0; -#endif - #ifdef CONFIG_X86_64 /* * Setup percpu state. We only need to do this for 64-bit -- cgit v1.2.3 From 18114f61359ac05e3aa797d53d63f40db41f798d Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 30 Jan 2009 18:16:46 -0800 Subject: x86: uaccess: use errret as error value in __put_user_size() Impact: cleanup In __put_user_size() macro errret is used for error value. But if size is 8, errret isn't passed to__put_user_asm_u64(). This behavior is inconsistent. Signed-off-by: Hiroshi Shimamoto Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/uaccess.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index b9a24155f7a..b685ece89d5 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -186,7 +186,7 @@ extern int __get_user_bad(void); #ifdef CONFIG_X86_32 -#define __put_user_asm_u64(x, addr, err) \ +#define __put_user_asm_u64(x, addr, err, errret) \ asm volatile("1: movl %%eax,0(%2)\n" \ "2: movl %%edx,4(%2)\n" \ "3:\n" \ @@ -197,7 +197,7 @@ extern int __get_user_bad(void); _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=r" (err) \ - : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) + : "A" (x), "r" (addr), "i" (errret), "0" (err)) #define __put_user_asm_ex_u64(x, addr) \ asm volatile("1: movl %%eax,0(%1)\n" \ @@ -211,8 +211,8 @@ extern int __get_user_bad(void); asm volatile("call __put_user_8" : "=a" (__ret_pu) \ : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #else -#define __put_user_asm_u64(x, ptr, retval) \ - __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) +#define __put_user_asm_u64(x, ptr, retval, errret) \ + __put_user_asm(x, ptr, retval, "q", "", "Zr", errret) #define __put_user_asm_ex_u64(x, addr) \ __put_user_asm_ex(x, addr, "q", "", "Zr") #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) @@ -289,7 +289,8 @@ do { \ __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ break; \ case 8: \ - __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval); \ + __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ + errret); \ break; \ default: \ __put_user_bad(); \ -- cgit v1.2.3 From 130ace11a9dc682541336d1fe5cb3bc7771a149e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Feb 2009 00:57:48 +0900 Subject: x86: style cleanups for xen assemblies Make the following style cleanups: * drop unnecessary //#include from xen-asm_32.S * compulsive adding of space after comma * reformat multiline comments Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/xen/xen-asm.S | 78 +++++++-------- arch/x86/xen/xen-asm_32.S | 238 ++++++++++++++++++++++++---------------------- arch/x86/xen/xen-asm_64.S | 107 +++++++++++---------- 3 files changed, 219 insertions(+), 204 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 4c6f9679913..79d7362ad6d 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -1,14 +1,14 @@ /* - Asm versions of Xen pv-ops, suitable for either direct use or inlining. - The inline versions are the same as the direct-use versions, with the - pre- and post-amble chopped off. - - This code is encoded for size rather than absolute efficiency, - with a view to being able to inline as much as possible. - - We only bother with direct forms (ie, vcpu in percpu data) of - the operations here; the indirect forms are better handled in - C, since they're generally too large to inline anyway. + * Asm versions of Xen pv-ops, suitable for either direct use or + * inlining. The inline versions are the same as the direct-use + * versions, with the pre- and post-amble chopped off. + * + * This code is encoded for size rather than absolute efficiency, with + * a view to being able to inline as much as possible. + * + * We only bother with direct forms (ie, vcpu in percpu data) of the + * operations here; the indirect forms are better handled in C, since + * they're generally too large to inline anyway. */ #include @@ -18,17 +18,19 @@ #include "xen-asm.h" /* - Enable events. This clears the event mask and tests the pending - event status with one and operation. If there are pending - events, then enter the hypervisor to get them handled. + * Enable events. This clears the event mask and tests the pending + * event status with one and operation. If there are pending events, + * then enter the hypervisor to get them handled. */ ENTRY(xen_irq_enable_direct) /* Unmask events */ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask - /* Preempt here doesn't matter because that will deal with - any pending interrupts. The pending check may end up being - run on the wrong CPU, but that doesn't hurt. */ + /* + * Preempt here doesn't matter because that will deal with any + * pending interrupts. The pending check may end up being run + * on the wrong CPU, but that doesn't hurt. + */ /* Test for pending */ testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending @@ -43,8 +45,8 @@ ENDPATCH(xen_irq_enable_direct) /* - Disabling events is simply a matter of making the event mask - non-zero. + * Disabling events is simply a matter of making the event mask + * non-zero. */ ENTRY(xen_irq_disable_direct) movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask @@ -54,18 +56,18 @@ ENDPATCH(xen_irq_disable_direct) RELOC(xen_irq_disable_direct, 0) /* - (xen_)save_fl is used to get the current interrupt enable status. - Callers expect the status to be in X86_EFLAGS_IF, and other bits - may be set in the return value. We take advantage of this by - making sure that X86_EFLAGS_IF has the right value (and other bits - in that byte are 0), but other bits in the return value are - undefined. We need to toggle the state of the bit, because - Xen and x86 use opposite senses (mask vs enable). + * (xen_)save_fl is used to get the current interrupt enable status. + * Callers expect the status to be in X86_EFLAGS_IF, and other bits + * may be set in the return value. We take advantage of this by + * making sure that X86_EFLAGS_IF has the right value (and other bits + * in that byte are 0), but other bits in the return value are + * undefined. We need to toggle the state of the bit, because Xen and + * x86 use opposite senses (mask vs enable). */ ENTRY(xen_save_fl_direct) testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah - addb %ah,%ah + addb %ah, %ah ENDPATCH(xen_save_fl_direct) ret ENDPROC(xen_save_fl_direct) @@ -73,12 +75,11 @@ ENDPATCH(xen_save_fl_direct) /* - In principle the caller should be passing us a value return - from xen_save_fl_direct, but for robustness sake we test only - the X86_EFLAGS_IF flag rather than the whole byte. After - setting the interrupt mask state, it checks for unmasked - pending events and enters the hypervisor to get them delivered - if so. + * In principle the caller should be passing us a value return from + * xen_save_fl_direct, but for robustness sake we test only the + * X86_EFLAGS_IF flag rather than the whole byte. After setting the + * interrupt mask state, it checks for unmasked pending events and + * enters the hypervisor to get them delivered if so. */ ENTRY(xen_restore_fl_direct) #ifdef CONFIG_X86_64 @@ -87,9 +88,11 @@ ENTRY(xen_restore_fl_direct) testb $X86_EFLAGS_IF>>8, %ah #endif setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask - /* Preempt here doesn't matter because that will deal with - any pending interrupts. The pending check may end up being - run on the wrong CPU, but that doesn't hurt. */ + /* + * Preempt here doesn't matter because that will deal with any + * pending interrupts. The pending check may end up being run + * on the wrong CPU, but that doesn't hurt. + */ /* check for unmasked and pending */ cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending @@ -103,8 +106,8 @@ ENDPATCH(xen_restore_fl_direct) /* - Force an event check by making a hypercall, - but preserve regs before making the call. + * Force an event check by making a hypercall, but preserve regs + * before making the call. */ check_events: #ifdef CONFIG_X86_32 @@ -137,4 +140,3 @@ check_events: pop %rax #endif ret - diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index 082d173caaf..88e15deb8b8 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -1,17 +1,16 @@ /* - Asm versions of Xen pv-ops, suitable for either direct use or inlining. - The inline versions are the same as the direct-use versions, with the - pre- and post-amble chopped off. - - This code is encoded for size rather than absolute efficiency, - with a view to being able to inline as much as possible. - - We only bother with direct forms (ie, vcpu in pda) of the operations - here; the indirect forms are better handled in C, since they're - generally too large to inline anyway. + * Asm versions of Xen pv-ops, suitable for either direct use or + * inlining. The inline versions are the same as the direct-use + * versions, with the pre- and post-amble chopped off. + * + * This code is encoded for size rather than absolute efficiency, with + * a view to being able to inline as much as possible. + * + * We only bother with direct forms (ie, vcpu in pda) of the + * operations here; the indirect forms are better handled in C, since + * they're generally too large to inline anyway. */ -//#include #include #include #include @@ -21,8 +20,8 @@ #include "xen-asm.h" /* - Force an event check by making a hypercall, - but preserve regs before making the call. + * Force an event check by making a hypercall, but preserve regs + * before making the call. */ check_events: push %eax @@ -35,10 +34,10 @@ check_events: ret /* - We can't use sysexit directly, because we're not running in ring0. - But we can easily fake it up using iret. Assuming xen_sysexit - is jumped to with a standard stack frame, we can just strip it - back to a standard iret frame and use iret. + * We can't use sysexit directly, because we're not running in ring0. + * But we can easily fake it up using iret. Assuming xen_sysexit is + * jumped to with a standard stack frame, we can just strip it back to + * a standard iret frame and use iret. */ ENTRY(xen_sysexit) movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */ @@ -49,33 +48,31 @@ ENTRY(xen_sysexit) ENDPROC(xen_sysexit) /* - This is run where a normal iret would be run, with the same stack setup: - 8: eflags - 4: cs - esp-> 0: eip - - This attempts to make sure that any pending events are dealt - with on return to usermode, but there is a small window in - which an event can happen just before entering usermode. If - the nested interrupt ends up setting one of the TIF_WORK_MASK - pending work flags, they will not be tested again before - returning to usermode. This means that a process can end up - with pending work, which will be unprocessed until the process - enters and leaves the kernel again, which could be an - unbounded amount of time. This means that a pending signal or - reschedule event could be indefinitely delayed. - - The fix is to notice a nested interrupt in the critical - window, and if one occurs, then fold the nested interrupt into - the current interrupt stack frame, and re-process it - iteratively rather than recursively. This means that it will - exit via the normal path, and all pending work will be dealt - with appropriately. - - Because the nested interrupt handler needs to deal with the - current stack state in whatever form its in, we keep things - simple by only using a single register which is pushed/popped - on the stack. + * This is run where a normal iret would be run, with the same stack setup: + * 8: eflags + * 4: cs + * esp-> 0: eip + * + * This attempts to make sure that any pending events are dealt with + * on return to usermode, but there is a small window in which an + * event can happen just before entering usermode. If the nested + * interrupt ends up setting one of the TIF_WORK_MASK pending work + * flags, they will not be tested again before returning to + * usermode. This means that a process can end up with pending work, + * which will be unprocessed until the process enters and leaves the + * kernel again, which could be an unbounded amount of time. This + * means that a pending signal or reschedule event could be + * indefinitely delayed. + * + * The fix is to notice a nested interrupt in the critical window, and + * if one occurs, then fold the nested interrupt into the current + * interrupt stack frame, and re-process it iteratively rather than + * recursively. This means that it will exit via the normal path, and + * all pending work will be dealt with appropriately. + * + * Because the nested interrupt handler needs to deal with the current + * stack state in whatever form its in, we keep things simple by only + * using a single register which is pushed/popped on the stack. */ ENTRY(xen_iret) /* test eflags for special cases */ @@ -85,13 +82,15 @@ ENTRY(xen_iret) push %eax ESP_OFFSET=4 # bytes pushed onto stack - /* Store vcpu_info pointer for easy access. Do it this - way to avoid having to reload %fs */ + /* + * Store vcpu_info pointer for easy access. Do it this way to + * avoid having to reload %fs + */ #ifdef CONFIG_SMP GET_THREAD_INFO(%eax) - movl TI_cpu(%eax),%eax - movl __per_cpu_offset(,%eax,4),%eax - mov per_cpu__xen_vcpu(%eax),%eax + movl TI_cpu(%eax), %eax + movl __per_cpu_offset(,%eax,4), %eax + mov per_cpu__xen_vcpu(%eax), %eax #else movl per_cpu__xen_vcpu, %eax #endif @@ -99,37 +98,46 @@ ENTRY(xen_iret) /* check IF state we're restoring */ testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp) - /* Maybe enable events. Once this happens we could get a - recursive event, so the critical region starts immediately - afterwards. However, if that happens we don't end up - resuming the code, so we don't have to be worried about - being preempted to another CPU. */ + /* + * Maybe enable events. Once this happens we could get a + * recursive event, so the critical region starts immediately + * afterwards. However, if that happens we don't end up + * resuming the code, so we don't have to be worried about + * being preempted to another CPU. + */ setz XEN_vcpu_info_mask(%eax) xen_iret_start_crit: /* check for unmasked and pending */ cmpw $0x0001, XEN_vcpu_info_pending(%eax) - /* If there's something pending, mask events again so we - can jump back into xen_hypervisor_callback */ + /* + * If there's something pending, mask events again so we can + * jump back into xen_hypervisor_callback + */ sete XEN_vcpu_info_mask(%eax) popl %eax - /* From this point on the registers are restored and the stack - updated, so we don't need to worry about it if we're preempted */ + /* + * From this point on the registers are restored and the stack + * updated, so we don't need to worry about it if we're + * preempted + */ iret_restore_end: - /* Jump to hypervisor_callback after fixing up the stack. - Events are masked, so jumping out of the critical - region is OK. */ + /* + * Jump to hypervisor_callback after fixing up the stack. + * Events are masked, so jumping out of the critical region is + * OK. + */ je xen_hypervisor_callback 1: iret xen_iret_end_crit: -.section __ex_table,"a" +.section __ex_table, "a" .align 4 - .long 1b,iret_exc + .long 1b, iret_exc .previous hyper_iret: @@ -139,55 +147,55 @@ hyper_iret: .globl xen_iret_start_crit, xen_iret_end_crit /* - This is called by xen_hypervisor_callback in entry.S when it sees - that the EIP at the time of interrupt was between xen_iret_start_crit - and xen_iret_end_crit. We're passed the EIP in %eax so we can do - a more refined determination of what to do. - - The stack format at this point is: - ---------------- - ss : (ss/esp may be present if we came from usermode) - esp : - eflags } outer exception info - cs } - eip } - ---------------- <- edi (copy dest) - eax : outer eax if it hasn't been restored - ---------------- - eflags } nested exception info - cs } (no ss/esp because we're nested - eip } from the same ring) - orig_eax }<- esi (copy src) - - - - - - - - - - fs } - es } - ds } SAVE_ALL state - eax } - : : - ebx }<- esp - ---------------- - - In order to deliver the nested exception properly, we need to shift - everything from the return addr up to the error code so it - sits just under the outer exception info. This means that when we - handle the exception, we do it in the context of the outer exception - rather than starting a new one. - - The only caveat is that if the outer eax hasn't been - restored yet (ie, it's still on stack), we need to insert - its value into the SAVE_ALL state before going on, since - it's usermode state which we eventually need to restore. + * This is called by xen_hypervisor_callback in entry.S when it sees + * that the EIP at the time of interrupt was between + * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in + * %eax so we can do a more refined determination of what to do. + * + * The stack format at this point is: + * ---------------- + * ss : (ss/esp may be present if we came from usermode) + * esp : + * eflags } outer exception info + * cs } + * eip } + * ---------------- <- edi (copy dest) + * eax : outer eax if it hasn't been restored + * ---------------- + * eflags } nested exception info + * cs } (no ss/esp because we're nested + * eip } from the same ring) + * orig_eax }<- esi (copy src) + * - - - - - - - - + * fs } + * es } + * ds } SAVE_ALL state + * eax } + * : : + * ebx }<- esp + * ---------------- + * + * In order to deliver the nested exception properly, we need to shift + * everything from the return addr up to the error code so it sits + * just under the outer exception info. This means that when we + * handle the exception, we do it in the context of the outer + * exception rather than starting a new one. + * + * The only caveat is that if the outer eax hasn't been restored yet + * (ie, it's still on stack), we need to insert its value into the + * SAVE_ALL state before going on, since it's usermode state which we + * eventually need to restore. */ ENTRY(xen_iret_crit_fixup) /* - Paranoia: Make sure we're really coming from kernel space. - One could imagine a case where userspace jumps into the - critical range address, but just before the CPU delivers a GP, - it decides to deliver an interrupt instead. Unlikely? - Definitely. Easy to avoid? Yes. The Intel documents - explicitly say that the reported EIP for a bad jump is the - jump instruction itself, not the destination, but some virtual - environments get this wrong. + * Paranoia: Make sure we're really coming from kernel space. + * One could imagine a case where userspace jumps into the + * critical range address, but just before the CPU delivers a + * GP, it decides to deliver an interrupt instead. Unlikely? + * Definitely. Easy to avoid? Yes. The Intel documents + * explicitly say that the reported EIP for a bad jump is the + * jump instruction itself, not the destination, but some + * virtual environments get this wrong. */ movl PT_CS(%esp), %ecx andl $SEGMENT_RPL_MASK, %ecx @@ -197,15 +205,17 @@ ENTRY(xen_iret_crit_fixup) lea PT_ORIG_EAX(%esp), %esi lea PT_EFLAGS(%esp), %edi - /* If eip is before iret_restore_end then stack - hasn't been restored yet. */ + /* + * If eip is before iret_restore_end then stack + * hasn't been restored yet. + */ cmp $iret_restore_end, %eax jae 1f - movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */ + movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */ movl %eax, PT_EAX(%esp) - lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */ + lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */ /* set up the copy */ 1: std @@ -213,6 +223,6 @@ ENTRY(xen_iret_crit_fixup) rep movsl cld - lea 4(%edi),%esp /* point esp to new frame */ + lea 4(%edi), %esp /* point esp to new frame */ 2: jmp xen_do_upcall diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index d205a283efe..02f496a8dba 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -1,14 +1,14 @@ /* - Asm versions of Xen pv-ops, suitable for either direct use or inlining. - The inline versions are the same as the direct-use versions, with the - pre- and post-amble chopped off. - - This code is encoded for size rather than absolute efficiency, - with a view to being able to inline as much as possible. - - We only bother with direct forms (ie, vcpu in pda) of the operations - here; the indirect forms are better handled in C, since they're - generally too large to inline anyway. + * Asm versions of Xen pv-ops, suitable for either direct use or + * inlining. The inline versions are the same as the direct-use + * versions, with the pre- and post-amble chopped off. + * + * This code is encoded for size rather than absolute efficiency, with + * a view to being able to inline as much as possible. + * + * We only bother with direct forms (ie, vcpu in pda) of the + * operations here; the indirect forms are better handled in C, since + * they're generally too large to inline anyway. */ #include @@ -21,25 +21,25 @@ #include "xen-asm.h" ENTRY(xen_adjust_exception_frame) - mov 8+0(%rsp),%rcx - mov 8+8(%rsp),%r11 + mov 8+0(%rsp), %rcx + mov 8+8(%rsp), %r11 ret $16 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 /* - Xen64 iret frame: - - ss - rsp - rflags - cs - rip <-- standard iret frame - - flags - - rcx } - r11 }<-- pushed by hypercall page -rsp -> rax } + * Xen64 iret frame: + * + * ss + * rsp + * rflags + * cs + * rip <-- standard iret frame + * + * flags + * + * rcx } + * r11 }<-- pushed by hypercall page + * rsp->rax } */ ENTRY(xen_iret) pushq $0 @@ -48,8 +48,8 @@ ENDPATCH(xen_iret) RELOC(xen_iret, 1b+1) /* - sysexit is not used for 64-bit processes, so it's - only ever used to return to 32-bit compat userspace. + * sysexit is not used for 64-bit processes, so it's only ever used to + * return to 32-bit compat userspace. */ ENTRY(xen_sysexit) pushq $__USER32_DS @@ -64,10 +64,12 @@ ENDPATCH(xen_sysexit) RELOC(xen_sysexit, 1b+1) ENTRY(xen_sysret64) - /* We're already on the usermode stack at this point, but still - with the kernel gs, so we can easily switch back */ + /* + * We're already on the usermode stack at this point, but + * still with the kernel gs, so we can easily switch back + */ movq %rsp, PER_CPU_VAR(old_rsp) - movq PER_CPU_VAR(kernel_stack),%rsp + movq PER_CPU_VAR(kernel_stack), %rsp pushq $__USER_DS pushq PER_CPU_VAR(old_rsp) @@ -81,8 +83,10 @@ ENDPATCH(xen_sysret64) RELOC(xen_sysret64, 1b+1) ENTRY(xen_sysret32) - /* We're already on the usermode stack at this point, but still - with the kernel gs, so we can easily switch back */ + /* + * We're already on the usermode stack at this point, but + * still with the kernel gs, so we can easily switch back + */ movq %rsp, PER_CPU_VAR(old_rsp) movq PER_CPU_VAR(kernel_stack), %rsp @@ -98,28 +102,27 @@ ENDPATCH(xen_sysret32) RELOC(xen_sysret32, 1b+1) /* - Xen handles syscall callbacks much like ordinary exceptions, - which means we have: - - kernel gs - - kernel rsp - - an iret-like stack frame on the stack (including rcx and r11): - ss - rsp - rflags - cs - rip - r11 - rsp-> rcx - - In all the entrypoints, we undo all that to make it look - like a CPU-generated syscall/sysenter and jump to the normal - entrypoint. + * Xen handles syscall callbacks much like ordinary exceptions, which + * means we have: + * - kernel gs + * - kernel rsp + * - an iret-like stack frame on the stack (including rcx and r11): + * ss + * rsp + * rflags + * cs + * rip + * r11 + * rsp->rcx + * + * In all the entrypoints, we undo all that to make it look like a + * CPU-generated syscall/sysenter and jump to the normal entrypoint. */ .macro undo_xen_syscall - mov 0*8(%rsp),%rcx - mov 1*8(%rsp),%r11 - mov 5*8(%rsp),%rsp + mov 0*8(%rsp), %rcx + mov 1*8(%rsp), %r11 + mov 5*8(%rsp), %rsp .endm /* Normal 64-bit system call target */ @@ -146,7 +149,7 @@ ENDPROC(xen_sysenter_target) ENTRY(xen_syscall32_target) ENTRY(xen_sysenter_target) - lea 16(%rsp), %rsp /* strip %rcx,%r11 */ + lea 16(%rsp), %rsp /* strip %rcx, %r11 */ mov $-ENOSYS, %rax pushq $VGCF_in_syscall jmp hypercall_iret -- cgit v1.2.3 From 65a4e574d2382d83f71b30ea92f86d2e40a6ef8d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 03:36:17 +0100 Subject: smp, generic: introduce arch_disable_smp_support() instead of disable_ioapic_setup() Impact: cleanup disable_ioapic_setup() in init/main.c is ugly as the function is x86-specific. The #ifdef inline prototype there is ugly too. Replace it with a generic arch_disable_smp_support() function - which has a weak alias for non-x86 architectures and for non-ioapic x86 builds. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 9 --------- arch/x86/kernel/apic.c | 4 +--- arch/x86/kernel/io_apic.c | 11 ++++++++++- arch/x86/kernel/smpboot.c | 2 +- 4 files changed, 12 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 08ec793aa04..309d0e23193 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -143,15 +143,6 @@ extern int noioapicreroute; /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ extern int timer_through_8259; -static inline void disable_ioapic_setup(void) -{ -#ifdef CONFIG_PCI - noioapicquirk = 1; - noioapicreroute = -1; -#endif - skip_ioapic_setup = 1; -} - /* * If we use the IO-APIC for IRQ routing, disable automatic * assignment of PCI IRQ's. diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 85d8b50d1af..a04a73a51d2 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1138,9 +1138,7 @@ void __cpuinit setup_local_APIC(void) int i, j; if (disable_apic) { -#ifdef CONFIG_X86_IO_APIC - disable_ioapic_setup(); -#endif + arch_disable_smp_support(); return; } diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 57d60c741e3..84bccac4619 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -98,10 +98,19 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int skip_ioapic_setup; +void arch_disable_smp_support(void) +{ +#ifdef CONFIG_PCI + noioapicquirk = 1; + noioapicreroute = -1; +#endif + skip_ioapic_setup = 1; +} + static int __init parse_noapic(char *str) { /* disable IO-APIC */ - disable_ioapic_setup(); + arch_disable_smp_support(); return 0; } early_param("noapic", parse_noapic); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f40f86fec2f..96f7d304f5c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1071,7 +1071,7 @@ static int __init smp_sanity_check(unsigned max_cpus) printk(KERN_ERR "... forcing use of dummy APIC emulation." "(tell your hw vendor)\n"); smpboot_clear_io_apic(); - disable_ioapic_setup(); + arch_disable_smp_support(); return -1; } -- cgit v1.2.3 From fdbecd9fd14198853bec4cbae8bc7af93f2e3de3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 31 Jan 2009 03:57:12 +0100 Subject: x86, apic: explain the purpose of max_physical_apicid Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index a04a73a51d2..5475e1c3180 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -50,13 +50,26 @@ #include unsigned int num_processors; + unsigned disabled_cpus __cpuinitdata; + /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid = -1U; -EXPORT_SYMBOL(boot_cpu_physical_apicid); + +/* + * The highest APIC ID seen during enumeration. + * + * This determines the messaging protocol we can use: if all APIC IDs + * are in the 0 ... 7 range, then we can use logical addressing which + * has some performance advantages (better broadcasting). + * + * If there's an APIC ID above 8, we use physical addressing. + */ unsigned int max_physical_apicid; -/* Bitmask of physically existing CPUs */ +/* + * Bitmask of physically existing CPUs: + */ physid_mask_t phys_cpu_present_map; /* -- cgit v1.2.3 From c5e954820335ef5aed1662b70aaf5deb9de16735 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 30 Jan 2009 17:29:27 -0800 Subject: x86: move default_ipi_xx back to ipi.c Impact: cleanup only leave _default_ipi_xx etc in .h Beyond the cleanup factor, this saves a bit of code size as well: text data bss dec hex filename 7281931 1630144 1463304 10375379 9e50d3 vmlinux.before 7281753 1630144 1463304 10375201 9e5021 vmlinux.after Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/hw_irq.h | 6 -- arch/x86/include/asm/ipi.h | 127 +++++------------------------------------- arch/x86/kernel/ipi.c | 108 +++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 120 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 3ef2bded97a..1a20e3d1200 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -71,12 +71,6 @@ extern void setup_ioapic_dest(void); extern void enable_IO_APIC(void); #endif -/* IPI functions */ -#ifdef CONFIG_X86_32 -extern void default_send_IPI_self(int vector); -#endif -extern void default_send_IPI(int dest, int vector); - /* Statistics */ extern atomic_t irq_err_count; extern atomic_t irq_mis_count; diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index aa79945445b..5f2efc5d992 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -119,112 +119,22 @@ static inline void native_apic_mem_write(APIC_ICR, cfg); } -static inline void -default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) -{ - unsigned long query_cpu; - unsigned long flags; - - /* - * Hack. The clustered APIC addressing mode doesn't allow us to send - * to an arbitrary mask, so I do a unicast to each CPU instead. - * - mbligh - */ - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, - query_cpu), vector, APIC_DEST_PHYSICAL); - } - local_irq_restore(flags); -} - -static inline void -default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector) -{ - unsigned int this_cpu = smp_processor_id(); - unsigned int query_cpu; - unsigned long flags; - - /* See Hack comment above */ - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - if (query_cpu == this_cpu) - continue; - __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, - query_cpu), vector, APIC_DEST_PHYSICAL); - } - local_irq_restore(flags); -} - +extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, + int vector); +extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, + int vector); #include -static inline void -default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector) -{ - unsigned long flags; - unsigned int query_cpu; - - /* - * Hack. The clustered APIC addressing mode doesn't allow us to send - * to an arbitrary mask, so I do a unicasts to each CPU instead. This - * should be modified to do 1 message per cluster ID - mbligh - */ - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) - __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector, - apic->dest_logical); - local_irq_restore(flags); -} - -static inline void -default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector) -{ - unsigned long flags; - unsigned int query_cpu; - unsigned int this_cpu = smp_processor_id(); - - /* See Hack comment above */ - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - if (query_cpu == this_cpu) - continue; - __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector, - apic->dest_logical); - } - local_irq_restore(flags); -} +extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector); +extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector); /* Avoid include hell */ #define NMI_VECTOR 0x02 extern int no_broadcast; -#ifndef CONFIG_X86_64 -/* - * This is only used on smaller machines. - */ -static inline void default_send_IPI_mask_bitmask_logical(const struct cpumask *cpumask, int vector) -{ - unsigned long mask = cpumask_bits(cpumask)[0]; - unsigned long flags; - - local_irq_save(flags); - WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); - __default_send_IPI_dest_field(mask, vector, apic->dest_logical); - local_irq_restore(flags); -} - -static inline void default_send_IPI_mask_logical(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_bitmask_logical(mask, vector); -} -#endif - static inline void __default_local_send_IPI_allbutself(int vector) { if (no_broadcast || vector == NMI_VECTOR) @@ -242,22 +152,11 @@ static inline void __default_local_send_IPI_all(int vector) } #ifdef CONFIG_X86_32 -static inline void default_send_IPI_allbutself(int vector) -{ - /* - * if there are no other CPUs in the system then we get an APIC send - * error if we try to broadcast, thus avoid sending IPIs in this case. - */ - if (!(num_online_cpus() > 1)) - return; - - __default_local_send_IPI_allbutself(vector); -} - -static inline void default_send_IPI_all(int vector) -{ - __default_local_send_IPI_all(vector); -} +extern void default_send_IPI_mask_logical(const struct cpumask *mask, + int vector); +extern void default_send_IPI_allbutself(int vector); +extern void default_send_IPI_all(int vector); +extern void default_send_IPI_self(int vector); #endif #endif diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 339f4f3feee..dbf5445727a 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -19,8 +19,116 @@ #include #include +void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) +{ + unsigned long query_cpu; + unsigned long flags; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicast to each CPU instead. + * - mbligh + */ + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, + int vector) +{ + unsigned int this_cpu = smp_processor_id(); + unsigned int query_cpu; + unsigned long flags; + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector) +{ + unsigned long flags; + unsigned int query_cpu; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicasts to each CPU instead. This + * should be modified to do 1 message per cluster ID - mbligh + */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector, + apic->dest_logical); + local_irq_restore(flags); +} + +void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector) +{ + unsigned long flags; + unsigned int query_cpu; + unsigned int this_cpu = smp_processor_id(); + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector, + apic->dest_logical); + } + local_irq_restore(flags); +} + #ifdef CONFIG_X86_32 +/* + * This is only used on smaller machines. + */ +void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + unsigned long flags; + + local_irq_save(flags); + WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); + __default_send_IPI_dest_field(mask, vector, apic->dest_logical); + local_irq_restore(flags); +} + +void default_send_IPI_allbutself(int vector) +{ + /* + * if there are no other CPUs in the system then we get an APIC send + * error if we try to broadcast, thus avoid sending IPIs in this case. + */ + if (!(num_online_cpus() > 1)) + return; + + __default_local_send_IPI_allbutself(vector); +} + +void default_send_IPI_all(int vector) +{ + __default_local_send_IPI_all(vector); +} + void default_send_IPI_self(int vector) { __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); -- cgit v1.2.3 From 4f179d121885142fb907b64956228b369d495958 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 1 Feb 2009 11:25:57 +0100 Subject: x86, numaq: cleanups Also move xquad_portio over to where it's allocated. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/numaq.h | 2 ++ arch/x86/kernel/numaq_32.c | 33 ++++++++++++++++++--------------- arch/x86/pci/numaq_32.c | 4 ---- 3 files changed, 20 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h index 1e8bd30b4c1..9f0a5f5d29e 100644 --- a/arch/x86/include/asm/numaq.h +++ b/arch/x86/include/asm/numaq.h @@ -31,6 +31,8 @@ extern int found_numaq; extern int get_memcfg_numaq(void); +extern void *xquad_portio; + /* * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the */ diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 947748e1721..0cc41a1d255 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -3,7 +3,7 @@ * * Copyright (C) 2002, IBM Corp. * - * All rights reserved. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -23,17 +23,18 @@ * Send feedback to */ -#include +#include #include #include #include -#include -#include -#include +#include + #include +#include #include -#include +#include #include +#include #define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) @@ -91,19 +92,20 @@ static int __init numaq_pre_time_init(void) } int found_numaq; + /* * Have to match translation table entries to main table entries by counter * hence the mpc_record variable .... can't see a less disgusting way of * doing this .... */ struct mpc_config_translation { - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; + unsigned char mpc_type; + unsigned char trans_len; + unsigned char trans_type; + unsigned char trans_quad; + unsigned char trans_global; + unsigned char trans_local; + unsigned short trans_reserved; }; /* x86_quirks member */ @@ -444,7 +446,8 @@ static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) return physid_mask_of_physid(cpu + 4*node); } -extern void *xquad_portio; +/* Where the IO area was mapped on multiquad, always 0 otherwise */ +void *xquad_portio; static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) { @@ -502,7 +505,7 @@ static void numaq_setup_portio_remap(void) int num_quads = num_online_nodes(); if (num_quads <= 1) - return; + return; printk("Remapping cross-quad port I/O for %d quads\n", num_quads); xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index 1b2d773612e..5601e829c38 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c @@ -18,10 +18,6 @@ #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) -/* Where the IO area was mapped on multiquad, always 0 otherwise */ -void *xquad_portio; -EXPORT_SYMBOL(xquad_portio); - #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \ -- cgit v1.2.3 From 8f9ca475c994e4d32f405183d07e8c7eedbdbdb4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 5 Feb 2009 16:21:53 +0100 Subject: x86: clean up arch/x86/Kconfig* - Consistent alignment of help text - Use the ---help--- keyword everywhere consistently as a visual separator - fix whitespace mismatches Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 404 ++++++++++++++++++++++++------------------------- arch/x86/Kconfig.cpu | 70 ++++----- arch/x86/Kconfig.debug | 47 +++--- 3 files changed, 260 insertions(+), 261 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 80291f749b6..270ecf90bdb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -5,7 +5,7 @@ mainmenu "Linux Kernel Configuration for x86" config 64BIT bool "64-bit kernel" if ARCH = "x86" default ARCH = "x86_64" - help + ---help--- Say yes to build a 64-bit kernel - formerly known as x86_64 Say no to build a 32-bit kernel - formerly known as i386 @@ -235,7 +235,7 @@ config SMP config SPARSE_IRQ bool "Support sparse irq numbering" depends on PCI_MSI || HT_IRQ - help + ---help--- This enables support for sparse irqs. This is useful for distro kernels that want to define a high CONFIG_NR_CPUS value but still want to have low kernel memory footprint on smaller machines. @@ -249,7 +249,7 @@ config NUMA_MIGRATE_IRQ_DESC bool "Move irq desc when changing irq smp_affinity" depends on SPARSE_IRQ && NUMA default n - help + ---help--- This enables moving irq_desc to cpu/node that irq will use handled. If you don't know what to do here, say N. @@ -258,19 +258,19 @@ config X86_MPPARSE bool "Enable MPS table" if ACPI default y depends on X86_LOCAL_APIC - help + ---help--- For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" depends on X86_32 && SMP - help + ---help--- This option is needed for the systems that have more than 8 CPUs config X86_NON_STANDARD bool "Support for non-standard x86 platforms" - help + ---help--- If you disable this option then the kernel will only support standard PC platforms. (which covers the vast majority of systems out there.) @@ -285,7 +285,7 @@ config X86_VISWS bool "SGI 320/540 (Visual Workstation)" depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT depends on X86_NON_STANDARD - help + ---help--- The SGI Visual Workstation series is an IA32-based workstation based on SGI systems chips with some legacy PC hardware attached. @@ -300,7 +300,7 @@ config X86_RDC321X depends on X86_NON_STANDARD select M486 select X86_REBOOTFIXUPS - help + ---help--- This option is needed for RDC R-321x system-on-chip, also known as R-8610-(G). If you don't have one of these chips, you should say N here. @@ -309,7 +309,7 @@ config X86_UV bool "SGI Ultraviolet" depends on X86_64 depends on X86_NON_STANDARD - help + ---help--- This option is needed in order to support SGI Ultraviolet systems. If you don't have one of these, you should say N here. @@ -318,7 +318,7 @@ config X86_VSMP select PARAVIRT depends on X86_64 && PCI depends on X86_NON_STANDARD - help + ---help--- Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is supposed to run on these EM64T-based machines. Only choose this option if you have one of these machines. @@ -327,7 +327,7 @@ config X86_ELAN bool "AMD Elan" depends on X86_32 depends on X86_NON_STANDARD - help + ---help--- Select this for an AMD Elan processor. Do not use this option for K6/Athlon/Opteron processors! @@ -338,8 +338,8 @@ config X86_32_NON_STANDARD bool "Support non-standard 32-bit SMP architectures" depends on X86_32 && SMP depends on X86_NON_STANDARD - help - This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default + ---help--- + This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default subarchitectures. It is intended for a generic binary kernel. if you select them all, kernel will probe it one by one. and will fallback to default. @@ -349,7 +349,7 @@ config X86_NUMAQ depends on X86_32_NON_STANDARD select NUMA select X86_MPPARSE - help + ---help--- This option is used for getting Linux to run on a NUMAQ (IBM/Sequent) NUMA multiquad box. This changes the way that processors are bootstrapped, and uses Clustered Logical APIC addressing mode instead @@ -359,14 +359,14 @@ config X86_NUMAQ config X86_SUMMIT bool "Summit/EXA (IBM x440)" depends on X86_32_NON_STANDARD - help + ---help--- This option is needed for IBM systems that use the Summit/EXA chipset. In particular, it is needed for the x440. config X86_ES7000 bool "Support for Unisys ES7000 IA32 series" depends on X86_32_NON_STANDARD && X86_BIGSMP - help + ---help--- Support for Unisys ES7000 systems. Say 'Y' here if this kernel is supposed to run on an IA32-based Unisys ES7000 system. @@ -374,7 +374,7 @@ config X86_VOYAGER bool "Voyager (NCR)" depends on SMP && !PCI && BROKEN depends on X86_32_NON_STANDARD - help + ---help--- Voyager is an MCA-based 32-way capable SMP architecture proprietary to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. @@ -387,7 +387,7 @@ config SCHED_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" depends on X86 - help + ---help--- Calculate simpler /proc//wchan values. If this option is disabled then wchan values will recurse back to the caller function. This provides more accurate wchan values, @@ -397,7 +397,7 @@ config SCHED_OMIT_FRAME_POINTER menuconfig PARAVIRT_GUEST bool "Paravirtualized guest support" - help + ---help--- Say Y here to get to see options related to running Linux under various hypervisors. This option alone does not add any kernel code. @@ -411,7 +411,7 @@ config VMI bool "VMI Guest support" select PARAVIRT depends on X86_32 - help + ---help--- VMI provides a paravirtualized interface to the VMware ESX server (it could be used by other hypervisors in theory too, but is not at the moment), by linking the kernel to a GPL-ed ROM module @@ -421,7 +421,7 @@ config KVM_CLOCK bool "KVM paravirtualized clock" select PARAVIRT select PARAVIRT_CLOCK - help + ---help--- Turning on this option will allow you to run a paravirtualized clock when running over the KVM hypervisor. Instead of relying on a PIT (or probably other) emulation by the underlying device model, the host @@ -431,15 +431,15 @@ config KVM_CLOCK config KVM_GUEST bool "KVM Guest support" select PARAVIRT - help - This option enables various optimizations for running under the KVM - hypervisor. + ---help--- + This option enables various optimizations for running under the KVM + hypervisor. source "arch/x86/lguest/Kconfig" config PARAVIRT bool "Enable paravirtualization code" - help + ---help--- This changes the kernel so it can modify itself when it is run under a hypervisor, potentially improving performance significantly over full virtualization. However, when run without a hypervisor @@ -452,21 +452,21 @@ config PARAVIRT_CLOCK endif config PARAVIRT_DEBUG - bool "paravirt-ops debugging" - depends on PARAVIRT && DEBUG_KERNEL - help - Enable to debug paravirt_ops internals. Specifically, BUG if - a paravirt_op is missing when it is called. + bool "paravirt-ops debugging" + depends on PARAVIRT && DEBUG_KERNEL + ---help--- + Enable to debug paravirt_ops internals. Specifically, BUG if + a paravirt_op is missing when it is called. config MEMTEST bool "Memtest" - help + ---help--- This option adds a kernel parameter 'memtest', which allows memtest to be set. - memtest=0, mean disabled; -- default - memtest=1, mean do 1 test pattern; - ... - memtest=4, mean do 4 test patterns. + memtest=0, mean disabled; -- default + memtest=1, mean do 1 test pattern; + ... + memtest=4, mean do 4 test patterns. If you are unsure how to answer this question, answer N. config X86_SUMMIT_NUMA @@ -482,21 +482,21 @@ source "arch/x86/Kconfig.cpu" config HPET_TIMER def_bool X86_64 prompt "HPET Timer Support" if X86_32 - help - Use the IA-PC HPET (High Precision Event Timer) to manage - time in preference to the PIT and RTC, if a HPET is - present. - HPET is the next generation timer replacing legacy 8254s. - The HPET provides a stable time base on SMP - systems, unlike the TSC, but it is more expensive to access, - as it is off-chip. You can find the HPET spec at - . + ---help--- + Use the IA-PC HPET (High Precision Event Timer) to manage + time in preference to the PIT and RTC, if a HPET is + present. + HPET is the next generation timer replacing legacy 8254s. + The HPET provides a stable time base on SMP + systems, unlike the TSC, but it is more expensive to access, + as it is off-chip. You can find the HPET spec at + . - You can safely choose Y here. However, HPET will only be - activated if the platform and the BIOS support this feature. - Otherwise the 8254 will be used for timing services. + You can safely choose Y here. However, HPET will only be + activated if the platform and the BIOS support this feature. + Otherwise the 8254 will be used for timing services. - Choose N to continue using the legacy 8254 timer. + Choose N to continue using the legacy 8254 timer. config HPET_EMULATE_RTC def_bool y @@ -507,7 +507,7 @@ config HPET_EMULATE_RTC config DMI default y bool "Enable DMI scanning" if EMBEDDED - help + ---help--- Enabled scanning of DMI to identify machine quirks. Say Y here unless you have verified that your setup is not affected by entries in the DMI blacklist. Required by PNP @@ -519,7 +519,7 @@ config GART_IOMMU select SWIOTLB select AGP depends on X86_64 && PCI - help + ---help--- Support for full DMA access of devices with 32bit memory access only on systems with more than 3GB. This is usually needed for USB, sound, many IDE/SATA chipsets and some other devices. @@ -534,7 +534,7 @@ config CALGARY_IOMMU bool "IBM Calgary IOMMU support" select SWIOTLB depends on X86_64 && PCI && EXPERIMENTAL - help + ---help--- Support for hardware IOMMUs in IBM's xSeries x366 and x460 systems. Needed to run systems with more than 3GB of memory properly with 32-bit PCI devices that do not support DAC @@ -552,7 +552,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT def_bool y prompt "Should Calgary be enabled by default?" depends on CALGARY_IOMMU - help + ---help--- Should Calgary be enabled by default? if you choose 'y', Calgary will be used (if it exists). If you choose 'n', Calgary will not be used even if it exists. If you choose 'n' and would like to use @@ -564,7 +564,7 @@ config AMD_IOMMU select SWIOTLB select PCI_MSI depends on X86_64 && PCI && ACPI - help + ---help--- With this option you can enable support for AMD IOMMU hardware in your system. An IOMMU is a hardware component which provides remapping of DMA memory accesses from devices. With an AMD IOMMU you @@ -579,7 +579,7 @@ config AMD_IOMMU_STATS bool "Export AMD IOMMU statistics to debugfs" depends on AMD_IOMMU select DEBUG_FS - help + ---help--- This option enables code in the AMD IOMMU driver to collect various statistics about whats happening in the driver and exports that information to userspace via debugfs. @@ -588,7 +588,7 @@ config AMD_IOMMU_STATS # need this always selected by IOMMU for the VIA workaround config SWIOTLB def_bool y if X86_64 - help + ---help--- Support for software bounce buffers used on x86-64 systems which don't have a hardware IOMMU (e.g. the current generation of Intel's x86-64 CPUs). Using this PCI devices which can only @@ -606,7 +606,7 @@ config MAXSMP depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL select CPUMASK_OFFSTACK default n - help + ---help--- Configure maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. @@ -617,7 +617,7 @@ config NR_CPUS default "4096" if MAXSMP default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) default "8" if SMP - help + ---help--- This allows you to specify the maximum number of CPUs which this kernel will support. The maximum supported value is 512 and the minimum value which makes sense is 2. @@ -628,7 +628,7 @@ config NR_CPUS config SCHED_SMT bool "SMT (Hyperthreading) scheduler support" depends on X86_HT - help + ---help--- SMT scheduler support improves the CPU scheduler's decision making when dealing with Intel Pentium 4 chips with HyperThreading at a cost of slightly increased overhead in some places. If unsure say @@ -638,7 +638,7 @@ config SCHED_MC def_bool y prompt "Multi-core scheduler support" depends on X86_HT - help + ---help--- Multi-core scheduler support improves the CPU scheduler's decision making when dealing with multi-core CPU chips at a cost of slightly increased overhead in some places. If unsure say N here. @@ -648,7 +648,7 @@ source "kernel/Kconfig.preempt" config X86_UP_APIC bool "Local APIC support on uniprocessors" depends on X86_32 && !SMP && !X86_32_NON_STANDARD - help + ---help--- A local APIC (Advanced Programmable Interrupt Controller) is an integrated interrupt controller in the CPU. If you have a single-CPU system which has a processor with a local APIC, you can say Y here to @@ -661,7 +661,7 @@ config X86_UP_APIC config X86_UP_IOAPIC bool "IO-APIC support on uniprocessors" depends on X86_UP_APIC - help + ---help--- An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an SMP-capable replacement for PC-style interrupt controllers. Most SMP systems and many recent uniprocessor systems have one. @@ -686,7 +686,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS bool "Reroute for broken boot IRQs" default n depends on X86_IO_APIC - help + ---help--- This option enables a workaround that fixes a source of spurious interrupts. This is recommended when threaded interrupt handling is used on systems where the generation of @@ -726,7 +726,7 @@ config X86_MCE_INTEL def_bool y prompt "Intel MCE features" depends on X86_64 && X86_MCE && X86_LOCAL_APIC - help + ---help--- Additional support for intel specific MCE features such as the thermal monitor. @@ -734,14 +734,14 @@ config X86_MCE_AMD def_bool y prompt "AMD MCE features" depends on X86_64 && X86_MCE && X86_LOCAL_APIC - help + ---help--- Additional support for AMD specific MCE features such as the DRAM Error Threshold. config X86_MCE_NONFATAL tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" depends on X86_32 && X86_MCE - help + ---help--- Enabling this feature starts a timer that triggers every 5 seconds which will look at the machine check registers to see if anything happened. Non-fatal problems automatically get corrected (but still logged). @@ -754,7 +754,7 @@ config X86_MCE_NONFATAL config X86_MCE_P4THERMAL bool "check for P4 thermal throttling interrupt." depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP) - help + ---help--- Enabling this feature will cause a message to be printed when the P4 enters thermal throttling. @@ -762,11 +762,11 @@ config VM86 bool "Enable VM86 support" if EMBEDDED default y depends on X86_32 - help - This option is required by programs like DOSEMU to run 16-bit legacy + ---help--- + This option is required by programs like DOSEMU to run 16-bit legacy code on X86 processors. It also may be needed by software like - XFree86 to initialize some video cards via BIOS. Disabling this - option saves about 6k. + XFree86 to initialize some video cards via BIOS. Disabling this + option saves about 6k. config TOSHIBA tristate "Toshiba Laptop support" @@ -840,33 +840,33 @@ config MICROCODE module will be called microcode. config MICROCODE_INTEL - bool "Intel microcode patch loading support" - depends on MICROCODE - default MICROCODE - select FW_LOADER - --help--- - This options enables microcode patch loading support for Intel - processors. - - For latest news and information on obtaining all the required - Intel ingredients for this driver, check: - . + bool "Intel microcode patch loading support" + depends on MICROCODE + default MICROCODE + select FW_LOADER + ---help--- + This options enables microcode patch loading support for Intel + processors. + + For latest news and information on obtaining all the required + Intel ingredients for this driver, check: + . config MICROCODE_AMD - bool "AMD microcode patch loading support" - depends on MICROCODE - select FW_LOADER - --help--- - If you select this option, microcode patch loading support for AMD - processors will be enabled. + bool "AMD microcode patch loading support" + depends on MICROCODE + select FW_LOADER + ---help--- + If you select this option, microcode patch loading support for AMD + processors will be enabled. - config MICROCODE_OLD_INTERFACE +config MICROCODE_OLD_INTERFACE def_bool y depends on MICROCODE config X86_MSR tristate "/dev/cpu/*/msr - Model-specific register support" - help + ---help--- This device gives privileged processes access to the x86 Model-Specific Registers (MSRs). It is a character device with major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr. @@ -875,7 +875,7 @@ config X86_MSR config X86_CPUID tristate "/dev/cpu/*/cpuid - CPU information support" - help + ---help--- This device gives processes access to the x86 CPUID instruction to be executed on a specific processor. It is a character device with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to @@ -927,7 +927,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" depends on !X86_NUMAQ - help + ---help--- Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. @@ -935,7 +935,7 @@ config HIGHMEM64G bool "64GB" depends on !M386 && !M486 select X86_PAE - help + ---help--- Select this if you have a 32-bit processor and more than 4 gigabytes of physical RAM. @@ -946,7 +946,7 @@ choice prompt "Memory split" if EMBEDDED default VMSPLIT_3G depends on X86_32 - help + ---help--- Select the desired split between kernel and user memory. If the address range available to the kernel is less than the @@ -992,20 +992,20 @@ config HIGHMEM config X86_PAE bool "PAE (Physical Address Extension) Support" depends on X86_32 && !HIGHMEM4G - help + ---help--- PAE is required for NX support, and furthermore enables larger swapspace support for non-overcommit purposes. It has the cost of more pagetable lookup overhead, and also consumes more pagetable space per process. config ARCH_PHYS_ADDR_T_64BIT - def_bool X86_64 || X86_PAE + def_bool X86_64 || X86_PAE config DIRECT_GBPAGES bool "Enable 1GB pages for kernel pagetables" if EMBEDDED default y depends on X86_64 - help + ---help--- Allow the kernel linear mapping to use 1GB pages on CPUs that support it. This can improve the kernel's performance a tiny bit by reducing TLB pressure. If in doubt, say "Y". @@ -1016,7 +1016,7 @@ config NUMA depends on SMP depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) - help + ---help--- Enable NUMA (Non Uniform Memory Access) support. The kernel will try to allocate memory used by a CPU on the @@ -1039,19 +1039,19 @@ config K8_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" depends on X86_64 && NUMA && PCI - help - Enable K8 NUMA node topology detection. You should say Y here if - you have a multi processor AMD K8 system. This uses an old - method to read the NUMA configuration directly from the builtin - Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA - instead, which also takes priority if both are compiled in. + ---help--- + Enable K8 NUMA node topology detection. You should say Y here if + you have a multi processor AMD K8 system. This uses an old + method to read the NUMA configuration directly from the builtin + Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA + instead, which also takes priority if both are compiled in. config X86_64_ACPI_NUMA def_bool y prompt "ACPI NUMA detection" depends on X86_64 && NUMA && ACPI && PCI select ACPI_NUMA - help + ---help--- Enable ACPI SRAT based node topology detection. # Some NUMA nodes have memory ranges that span @@ -1066,7 +1066,7 @@ config NODES_SPAN_OTHER_NODES config NUMA_EMU bool "NUMA emulation" depends on X86_64 && NUMA - help + ---help--- Enable NUMA emulation. A flat machine will be split into virtual nodes when booted with "numa=fake=N", where N is the number of nodes. This is only useful for debugging. @@ -1079,7 +1079,7 @@ config NODES_SHIFT default "4" if X86_NUMAQ default "3" depends on NEED_MULTIPLE_NODES - help + ---help--- Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accomodate various tables. @@ -1134,61 +1134,61 @@ source "mm/Kconfig" config HIGHPTE bool "Allocate 3rd-level pagetables from highmem" depends on X86_32 && (HIGHMEM4G || HIGHMEM64G) - help + ---help--- The VM uses one page table entry for each page of physical memory. For systems with a lot of RAM, this can be wasteful of precious low memory. Setting this option will put user-space page table entries in high memory. config X86_CHECK_BIOS_CORRUPTION - bool "Check for low memory corruption" - help - Periodically check for memory corruption in low memory, which - is suspected to be caused by BIOS. Even when enabled in the - configuration, it is disabled at runtime. Enable it by - setting "memory_corruption_check=1" on the kernel command - line. By default it scans the low 64k of memory every 60 - seconds; see the memory_corruption_check_size and - memory_corruption_check_period parameters in - Documentation/kernel-parameters.txt to adjust this. - - When enabled with the default parameters, this option has - almost no overhead, as it reserves a relatively small amount - of memory and scans it infrequently. It both detects corruption - and prevents it from affecting the running system. - - It is, however, intended as a diagnostic tool; if repeatable - BIOS-originated corruption always affects the same memory, - you can use memmap= to prevent the kernel from using that - memory. + bool "Check for low memory corruption" + ---help--- + Periodically check for memory corruption in low memory, which + is suspected to be caused by BIOS. Even when enabled in the + configuration, it is disabled at runtime. Enable it by + setting "memory_corruption_check=1" on the kernel command + line. By default it scans the low 64k of memory every 60 + seconds; see the memory_corruption_check_size and + memory_corruption_check_period parameters in + Documentation/kernel-parameters.txt to adjust this. + + When enabled with the default parameters, this option has + almost no overhead, as it reserves a relatively small amount + of memory and scans it infrequently. It both detects corruption + and prevents it from affecting the running system. + + It is, however, intended as a diagnostic tool; if repeatable + BIOS-originated corruption always affects the same memory, + you can use memmap= to prevent the kernel from using that + memory. config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK - bool "Set the default setting of memory_corruption_check" + bool "Set the default setting of memory_corruption_check" depends on X86_CHECK_BIOS_CORRUPTION default y - help - Set whether the default state of memory_corruption_check is - on or off. + ---help--- + Set whether the default state of memory_corruption_check is + on or off. config X86_RESERVE_LOW_64K - bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen" + bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen" default y - help - Reserve the first 64K of physical RAM on BIOSes that are known - to potentially corrupt that memory range. A numbers of BIOSes are - known to utilize this area during suspend/resume, so it must not - be used by the kernel. + ---help--- + Reserve the first 64K of physical RAM on BIOSes that are known + to potentially corrupt that memory range. A numbers of BIOSes are + known to utilize this area during suspend/resume, so it must not + be used by the kernel. - Set this to N if you are absolutely sure that you trust the BIOS - to get all its memory reservations and usages right. + Set this to N if you are absolutely sure that you trust the BIOS + to get all its memory reservations and usages right. - If you have doubts about the BIOS (e.g. suspend/resume does not - work or there's kernel crashes after certain hardware hotplug - events) and it's not AMI or Phoenix, then you might want to enable - X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical - corruption patterns. + If you have doubts about the BIOS (e.g. suspend/resume does not + work or there's kernel crashes after certain hardware hotplug + events) and it's not AMI or Phoenix, then you might want to enable + X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical + corruption patterns. - Say Y if unsure. + Say Y if unsure. config MATH_EMULATION bool @@ -1254,7 +1254,7 @@ config MTRR_SANITIZER def_bool y prompt "MTRR cleanup support" depends on MTRR - help + ---help--- Convert MTRR layout from continuous to discrete, so X drivers can add writeback entries. @@ -1269,7 +1269,7 @@ config MTRR_SANITIZER_ENABLE_DEFAULT range 0 1 default "0" depends on MTRR_SANITIZER - help + ---help--- Enable mtrr cleanup default value config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT @@ -1277,7 +1277,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT range 0 7 default "1" depends on MTRR_SANITIZER - help + ---help--- mtrr cleanup spare entries default, it can be changed via mtrr_spare_reg_nr=N on the kernel command line. @@ -1285,7 +1285,7 @@ config X86_PAT bool prompt "x86 PAT support" depends on MTRR - help + ---help--- Use PAT attributes to setup page level cache control. PATs are the modern equivalents of MTRRs and are much more @@ -1300,20 +1300,20 @@ config EFI bool "EFI runtime service support" depends on ACPI ---help--- - This enables the kernel to use EFI runtime services that are - available (such as the EFI variable services). + This enables the kernel to use EFI runtime services that are + available (such as the EFI variable services). - This option is only useful on systems that have EFI firmware. - In addition, you should use the latest ELILO loader available - at in order to take advantage - of EFI runtime services. However, even with this option, the - resultant kernel should continue to boot on existing non-EFI - platforms. + This option is only useful on systems that have EFI firmware. + In addition, you should use the latest ELILO loader available + at in order to take advantage + of EFI runtime services. However, even with this option, the + resultant kernel should continue to boot on existing non-EFI + platforms. config SECCOMP def_bool y prompt "Enable seccomp to safely compute untrusted bytecode" - help + ---help--- This kernel feature is useful for number crunching applications that may need to compute untrusted bytecode during their execution. By using pipes or other transports made available to @@ -1333,8 +1333,8 @@ config CC_STACKPROTECTOR bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" depends on X86_64 select CC_STACKPROTECTOR_ALL - help - This option turns on the -fstack-protector GCC feature. This + ---help--- + This option turns on the -fstack-protector GCC feature. This feature puts, at the beginning of functions, a canary value on the stack just before the return address, and validates the value just before actually returning. Stack based buffer @@ -1351,7 +1351,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" - help + ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot but it is independent of the system firmware. And like a reboot @@ -1368,7 +1368,7 @@ config KEXEC config CRASH_DUMP bool "kernel crash dumps" depends on X86_64 || (X86_32 && HIGHMEM) - help + ---help--- Generate crash dump after being started by kexec. This should be normally only set in special crash dump kernels which are loaded in the main kernel with kexec-tools into @@ -1383,7 +1383,7 @@ config KEXEC_JUMP bool "kexec jump (EXPERIMENTAL)" depends on EXPERIMENTAL depends on KEXEC && HIBERNATION && X86_32 - help + ---help--- Jump between original kernel and kexeced kernel and invoke code in physical address mode via KEXEC @@ -1392,7 +1392,7 @@ config PHYSICAL_START default "0x1000000" if X86_NUMAQ default "0x200000" if X86_64 default "0x100000" - help + ---help--- This gives the physical address where the kernel is loaded. If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then @@ -1433,7 +1433,7 @@ config PHYSICAL_START config RELOCATABLE bool "Build a relocatable kernel (EXPERIMENTAL)" depends on EXPERIMENTAL - help + ---help--- This builds a kernel image that retains relocation information so it can be loaded someplace besides the default 1MB. The relocations tend to make the kernel binary about 10% larger, @@ -1453,7 +1453,7 @@ config PHYSICAL_ALIGN default "0x100000" if X86_32 default "0x200000" if X86_64 range 0x2000 0x400000 - help + ---help--- This value puts the alignment restrictions on physical address where kernel is loaded and run from. Kernel is compiled for an address which meets above alignment restriction. @@ -1486,7 +1486,7 @@ config COMPAT_VDSO def_bool y prompt "Compat VDSO support" depends on X86_32 || IA32_EMULATION - help + ---help--- Map the 32-bit VDSO to the predictable old-style address too. ---help--- Say N here if you are running a sufficiently recent glibc @@ -1498,7 +1498,7 @@ config COMPAT_VDSO config CMDLINE_BOOL bool "Built-in kernel command line" default n - help + ---help--- Allow for specifying boot arguments to the kernel at build time. On some systems (e.g. embedded ones), it is necessary or convenient to provide some or all of the @@ -1516,7 +1516,7 @@ config CMDLINE string "Built-in kernel command string" depends on CMDLINE_BOOL default "" - help + ---help--- Enter arguments here that should be compiled into the kernel image and used at boot time. If the boot loader provides a command line at boot time, it is appended to this string to @@ -1533,7 +1533,7 @@ config CMDLINE_OVERRIDE bool "Built-in command line overrides boot loader arguments" default n depends on CMDLINE_BOOL - help + ---help--- Set this option to 'Y' to have the kernel ignore the boot loader command line, and use ONLY the built-in command line. @@ -1632,7 +1632,7 @@ if APM config APM_IGNORE_USER_SUSPEND bool "Ignore USER SUSPEND" - help + ---help--- This option will ignore USER SUSPEND requests. On machines with a compliant APM BIOS, you want to say N. However, on the NEC Versa M series notebooks, it is necessary to say Y because of a BIOS bug. @@ -1656,7 +1656,7 @@ config APM_DO_ENABLE config APM_CPU_IDLE bool "Make CPU Idle calls when idle" - help + ---help--- Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. On some machines, this can activate improved power savings, such as a slowed CPU clock rate, when the machine is idle. These idle calls @@ -1667,7 +1667,7 @@ config APM_CPU_IDLE config APM_DISPLAY_BLANK bool "Enable console blanking using APM" - help + ---help--- Enable console blanking using the APM. Some laptops can use this to turn off the LCD backlight when the screen blanker of the Linux virtual console blanks the screen. Note that this is only used by @@ -1680,7 +1680,7 @@ config APM_DISPLAY_BLANK config APM_ALLOW_INTS bool "Allow interrupts during APM BIOS calls" - help + ---help--- Normally we disable external interrupts while we are making calls to the APM BIOS as a measure to lessen the effects of a badly behaving BIOS implementation. The BIOS should reenable interrupts if it @@ -1705,7 +1705,7 @@ config PCI bool "PCI support" default y select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC) - help + ---help--- Find out whether you have a PCI motherboard. PCI is the name of a bus system, i.e. the way the CPU talks to the other stuff inside your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or @@ -1776,7 +1776,7 @@ config PCI_MMCONFIG config DMAR bool "Support for DMA Remapping Devices (EXPERIMENTAL)" depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL - help + ---help--- DMA remapping (DMAR) devices support enables independent address translations for Direct Memory Access (DMA) from devices. These DMA remapping devices are reported via ACPI tables @@ -1798,29 +1798,29 @@ config DMAR_GFX_WA def_bool y prompt "Support for Graphics workaround" depends on DMAR - help - Current Graphics drivers tend to use physical address - for DMA and avoid using DMA APIs. Setting this config - option permits the IOMMU driver to set a unity map for - all the OS-visible memory. Hence the driver can continue - to use physical addresses for DMA. + ---help--- + Current Graphics drivers tend to use physical address + for DMA and avoid using DMA APIs. Setting this config + option permits the IOMMU driver to set a unity map for + all the OS-visible memory. Hence the driver can continue + to use physical addresses for DMA. config DMAR_FLOPPY_WA def_bool y depends on DMAR - help - Floppy disk drivers are know to bypass DMA API calls - thereby failing to work when IOMMU is enabled. This - workaround will setup a 1:1 mapping for the first - 16M to make floppy (an ISA device) work. + ---help--- + Floppy disk drivers are know to bypass DMA API calls + thereby failing to work when IOMMU is enabled. This + workaround will setup a 1:1 mapping for the first + 16M to make floppy (an ISA device) work. config INTR_REMAP bool "Support for Interrupt Remapping (EXPERIMENTAL)" depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL - help - Supports Interrupt remapping for IO-APIC and MSI devices. - To use x2apic mode in the CPU's which support x2APIC enhancements or - to support platforms with CPU's having > 8 bit APIC ID, say Y. + ---help--- + Supports Interrupt remapping for IO-APIC and MSI devices. + To use x2apic mode in the CPU's which support x2APIC enhancements or + to support platforms with CPU's having > 8 bit APIC ID, say Y. source "drivers/pci/pcie/Kconfig" @@ -1834,7 +1834,7 @@ if X86_32 config ISA bool "ISA support" - help + ---help--- Find out whether you have ISA slots on your motherboard. ISA is the name of a bus system, i.e. the way the CPU talks to the other stuff inside your box. Other bus systems are PCI, EISA, MicroChannel @@ -1861,7 +1861,7 @@ source "drivers/eisa/Kconfig" config MCA bool "MCA support" - help + ---help--- MicroChannel Architecture is found in some IBM PS/2 machines and laptops. It is a bus system similar to PCI or ISA. See (and especially the web page given @@ -1871,7 +1871,7 @@ source "drivers/mca/Kconfig" config SCx200 tristate "NatSemi SCx200 support" - help + ---help--- This provides basic support for National Semiconductor's (now AMD's) Geode processors. The driver probes for the PCI-IDs of several on-chip devices, so its a good dependency @@ -1883,7 +1883,7 @@ config SCx200HR_TIMER tristate "NatSemi SCx200 27MHz High-Resolution Timer Support" depends on SCx200 && GENERIC_TIME default y - help + ---help--- This driver provides a clocksource built upon the on-chip 27MHz high-resolution timer. Its also a workaround for NSC Geode SC-1100's buggy TSC, which loses time when the @@ -1894,7 +1894,7 @@ config GEODE_MFGPT_TIMER def_bool y prompt "Geode Multi-Function General Purpose Timer (MFGPT) events" depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS - help + ---help--- This driver provides a clock event source based on the MFGPT timer(s) in the CS5535 and CS5536 companion chip for the geode. MFGPTs have a better resolution and max interval than the @@ -1903,7 +1903,7 @@ config GEODE_MFGPT_TIMER config OLPC bool "One Laptop Per Child support" default n - help + ---help--- Add support for detecting the unique features of the OLPC XO hardware. @@ -1928,16 +1928,16 @@ config IA32_EMULATION bool "IA32 Emulation" depends on X86_64 select COMPAT_BINFMT_ELF - help + ---help--- Include code to run 32-bit programs under a 64-bit kernel. You should likely turn this on, unless you're 100% sure that you don't have any 32-bit programs left. config IA32_AOUT - tristate "IA32 a.out support" - depends on IA32_EMULATION - help - Support old a.out binaries in the 32bit emulation. + tristate "IA32 a.out support" + depends on IA32_EMULATION + ---help--- + Support old a.out binaries in the 32bit emulation. config COMPAT def_bool y diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 085fef4d866..a95eaf0e582 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -50,7 +50,7 @@ config M386 config M486 bool "486" depends on X86_32 - help + ---help--- Select this for a 486 series processor, either Intel or one of the compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX, DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or @@ -59,7 +59,7 @@ config M486 config M586 bool "586/K5/5x86/6x86/6x86MX" depends on X86_32 - help + ---help--- Select this for an 586 or 686 series processor such as the AMD K5, the Cyrix 5x86, 6x86 and 6x86MX. This choice does not assume the RDTSC (Read Time Stamp Counter) instruction. @@ -67,21 +67,21 @@ config M586 config M586TSC bool "Pentium-Classic" depends on X86_32 - help + ---help--- Select this for a Pentium Classic processor with the RDTSC (Read Time Stamp Counter) instruction for benchmarking. config M586MMX bool "Pentium-MMX" depends on X86_32 - help + ---help--- Select this for a Pentium with the MMX graphics/multimedia extended instructions. config M686 bool "Pentium-Pro" depends on X86_32 - help + ---help--- Select this for Intel Pentium Pro chips. This enables the use of Pentium Pro extended instructions, and disables the init-time guard against the f00f bug found in earlier Pentiums. @@ -89,7 +89,7 @@ config M686 config MPENTIUMII bool "Pentium-II/Celeron(pre-Coppermine)" depends on X86_32 - help + ---help--- Select this for Intel chips based on the Pentium-II and pre-Coppermine Celeron core. This option enables an unaligned copy optimization, compiles the kernel with optimization flags @@ -99,7 +99,7 @@ config MPENTIUMII config MPENTIUMIII bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon" depends on X86_32 - help + ---help--- Select this for Intel chips based on the Pentium-III and Celeron-Coppermine core. This option enables use of some extended prefetch instructions in addition to the Pentium II @@ -108,14 +108,14 @@ config MPENTIUMIII config MPENTIUMM bool "Pentium M" depends on X86_32 - help + ---help--- Select this for Intel Pentium M (not Pentium-4 M) notebook chips. config MPENTIUM4 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" depends on X86_32 - help + ---help--- Select this for Intel Pentium 4 chips. This includes the Pentium 4, Pentium D, P4-based Celeron and Xeon, and Pentium-4 M (not Pentium M) chips. This option enables compile @@ -151,7 +151,7 @@ config MPENTIUM4 config MK6 bool "K6/K6-II/K6-III" depends on X86_32 - help + ---help--- Select this for an AMD K6-family processor. Enables use of some extended instructions, and passes appropriate optimization flags to GCC. @@ -159,14 +159,14 @@ config MK6 config MK7 bool "Athlon/Duron/K7" depends on X86_32 - help + ---help--- Select this for an AMD Athlon K7-family processor. Enables use of some extended instructions, and passes appropriate optimization flags to GCC. config MK8 bool "Opteron/Athlon64/Hammer/K8" - help + ---help--- Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables use of some extended instructions, and passes appropriate optimization flags to GCC. @@ -174,7 +174,7 @@ config MK8 config MCRUSOE bool "Crusoe" depends on X86_32 - help + ---help--- Select this for a Transmeta Crusoe processor. Treats the processor like a 586 with TSC, and sets some GCC optimization flags (like a Pentium Pro with no alignment requirements). @@ -182,13 +182,13 @@ config MCRUSOE config MEFFICEON bool "Efficeon" depends on X86_32 - help + ---help--- Select this for a Transmeta Efficeon processor. config MWINCHIPC6 bool "Winchip-C6" depends on X86_32 - help + ---help--- Select this for an IDT Winchip C6 chip. Linux and GCC treat this chip as a 586TSC with some extended instructions and alignment requirements. @@ -196,7 +196,7 @@ config MWINCHIPC6 config MWINCHIP3D bool "Winchip-2/Winchip-2A/Winchip-3" depends on X86_32 - help + ---help--- Select this for an IDT Winchip-2, 2A or 3. Linux and GCC treat this chip as a 586TSC with some extended instructions and alignment requirements. Also enable out of order memory @@ -206,19 +206,19 @@ config MWINCHIP3D config MGEODEGX1 bool "GeodeGX1" depends on X86_32 - help + ---help--- Select this for a Geode GX1 (Cyrix MediaGX) chip. config MGEODE_LX bool "Geode GX/LX" depends on X86_32 - help + ---help--- Select this for AMD Geode GX and LX processors. config MCYRIXIII bool "CyrixIII/VIA-C3" depends on X86_32 - help + ---help--- Select this for a Cyrix III or C3 chip. Presently Linux and GCC treat this chip as a generic 586. Whilst the CPU is 686 class, it lacks the cmov extension which gcc assumes is present when @@ -230,7 +230,7 @@ config MCYRIXIII config MVIAC3_2 bool "VIA C3-2 (Nehemiah)" depends on X86_32 - help + ---help--- Select this for a VIA C3 "Nehemiah". Selecting this enables usage of SSE and tells gcc to treat the CPU as a 686. Note, this kernel will not boot on older (pre model 9) C3s. @@ -238,14 +238,14 @@ config MVIAC3_2 config MVIAC7 bool "VIA C7" depends on X86_32 - help + ---help--- Select this for a VIA C7. Selecting this uses the correct cache shift and tells gcc to treat the CPU as a 686. config MPSC bool "Intel P4 / older Netburst based Xeon" depends on X86_64 - help + ---help--- Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey Xeon CPUs with Intel 64bit which is compatible with x86-64. Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the @@ -255,7 +255,7 @@ config MPSC config MCORE2 bool "Core 2/newer Xeon" - help + ---help--- Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx) CPUs. You can distinguish newer from older Xeons by the CPU @@ -265,7 +265,7 @@ config MCORE2 config GENERIC_CPU bool "Generic-x86-64" depends on X86_64 - help + ---help--- Generic x86-64 CPU. Run equally well on all x86-64 CPUs. @@ -274,7 +274,7 @@ endchoice config X86_GENERIC bool "Generic x86 support" depends on X86_32 - help + ---help--- Instead of just including optimizations for the selected x86 variant (e.g. PII, Crusoe or Athlon), include some more generic optimizations as well. This will make the kernel @@ -319,7 +319,7 @@ config X86_XADD config X86_PPRO_FENCE bool "PentiumPro memory ordering errata workaround" depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1 - help + ---help--- Old PentiumPro multiprocessor systems had errata that could cause memory operations to violate the x86 ordering standard in rare cases. Enabling this option will attempt to work around some (but not all) @@ -412,14 +412,14 @@ config X86_DEBUGCTLMSR menuconfig PROCESSOR_SELECT bool "Supported processor vendors" if EMBEDDED - help + ---help--- This lets you choose what x86 vendor support code your kernel will include. config CPU_SUP_INTEL default y bool "Support Intel processors" if PROCESSOR_SELECT - help + ---help--- This enables detection, tunings and quirks for Intel processors You need this enabled if you want your kernel to run on an @@ -433,7 +433,7 @@ config CPU_SUP_CYRIX_32 default y bool "Support Cyrix processors" if PROCESSOR_SELECT depends on !64BIT - help + ---help--- This enables detection, tunings and quirks for Cyrix processors You need this enabled if you want your kernel to run on a @@ -446,7 +446,7 @@ config CPU_SUP_CYRIX_32 config CPU_SUP_AMD default y bool "Support AMD processors" if PROCESSOR_SELECT - help + ---help--- This enables detection, tunings and quirks for AMD processors You need this enabled if you want your kernel to run on an @@ -460,7 +460,7 @@ config CPU_SUP_CENTAUR_32 default y bool "Support Centaur processors" if PROCESSOR_SELECT depends on !64BIT - help + ---help--- This enables detection, tunings and quirks for Centaur processors You need this enabled if you want your kernel to run on a @@ -474,7 +474,7 @@ config CPU_SUP_CENTAUR_64 default y bool "Support Centaur processors" if PROCESSOR_SELECT depends on 64BIT - help + ---help--- This enables detection, tunings and quirks for Centaur processors You need this enabled if you want your kernel to run on a @@ -488,7 +488,7 @@ config CPU_SUP_TRANSMETA_32 default y bool "Support Transmeta processors" if PROCESSOR_SELECT depends on !64BIT - help + ---help--- This enables detection, tunings and quirks for Transmeta processors You need this enabled if you want your kernel to run on a @@ -502,7 +502,7 @@ config CPU_SUP_UMC_32 default y bool "Support UMC processors" if PROCESSOR_SELECT depends on !64BIT - help + ---help--- This enables detection, tunings and quirks for UMC processors You need this enabled if you want your kernel to run on a @@ -521,7 +521,7 @@ config X86_PTRACE_BTS bool "Branch Trace Store" default y depends on X86_DEBUGCTLMSR - help + ---help--- This adds a ptrace interface to the hardware's branch trace store. Debuggers may use it to collect an execution trace of the debugged diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index a38dd6064f1..ba4781b9389 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -7,7 +7,7 @@ source "lib/Kconfig.debug" config STRICT_DEVMEM bool "Filter access to /dev/mem" - help + ---help--- If this option is disabled, you allow userspace (root) access to all of memory, including kernel and userspace memory. Accidental access to this is obviously disastrous, but specific access can @@ -25,7 +25,7 @@ config STRICT_DEVMEM config X86_VERBOSE_BOOTUP bool "Enable verbose x86 bootup info messages" default y - help + ---help--- Enables the informational output from the decompression stage (e.g. bzImage) of the boot. If you disable this you will still see errors. Disable this if you want silent bootup. @@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP config EARLY_PRINTK bool "Early printk" if EMBEDDED default y - help + ---help--- Write kernel log output directly into the VGA buffer or to a serial port. @@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP bool "Early printk via EHCI debug port" default n depends on EARLY_PRINTK && PCI - help + ---help--- Write kernel log output directly into the EHCI debug port. This is useful for kernel debugging when your machine crashes very @@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP config DEBUG_STACKOVERFLOW bool "Check for stack overflows" depends on DEBUG_KERNEL - help + ---help--- This option will cause messages to be printed if free stack space drops below a certain limit. config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" depends on DEBUG_KERNEL - help + ---help--- Enables the display of the minimum amount of free stack which each task has ever had available in the sysrq-T and sysrq-P debug output. @@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE config DEBUG_PAGEALLOC bool "Debug page memory allocations" depends on DEBUG_KERNEL - help + ---help--- Unmap pages from the kernel linear mapping after free_pages(). This results in a large slowdown, but helps to find certain types of memory corruptions. @@ -85,7 +85,7 @@ config DEBUG_PER_CPU_MAPS depends on DEBUG_KERNEL depends on SMP default n - help + ---help--- Say Y to verify that the per_cpu map being accessed has been setup. Adds a fair amount of code to kernel memory and decreases performance. @@ -96,7 +96,7 @@ config X86_PTDUMP bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL select DEBUG_FS - help + ---help--- Say Y here if you want to show the kernel pagetable layout in a debugfs file. This information is only useful for kernel developers who are working in architecture specific areas of the kernel. @@ -108,7 +108,7 @@ config DEBUG_RODATA bool "Write protect kernel read-only data structures" default y depends on DEBUG_KERNEL - help + ---help--- Mark the kernel read-only data as write-protected in the pagetables, in order to catch accidental (and incorrect) writes to such const data. This is recommended so that we can catch kernel bugs sooner. @@ -118,7 +118,7 @@ config DEBUG_RODATA_TEST bool "Testcase for the DEBUG_RODATA feature" depends on DEBUG_RODATA default y - help + ---help--- This option enables a testcase for the DEBUG_RODATA feature as well as for the change_page_attr() infrastructure. If in doubt, say "N" @@ -126,7 +126,7 @@ config DEBUG_RODATA_TEST config DEBUG_NX_TEST tristate "Testcase for the NX non-executable stack feature" depends on DEBUG_KERNEL && m - help + ---help--- This option enables a testcase for the CPU NX capability and the software setup of this feature. If in doubt, say "N" @@ -134,7 +134,7 @@ config DEBUG_NX_TEST config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on X86_32 - help + ---help--- If you say Y here the kernel will use a 4Kb stacksize for the kernel stack attached to each process/thread. This facilitates running more threads on a system and also reduces the pressure @@ -145,7 +145,7 @@ config DOUBLEFAULT default y bool "Enable doublefault exception handler" if EMBEDDED depends on X86_32 - help + ---help--- This option allows trapping of rare doublefault exceptions that would otherwise cause a system to silently reboot. Disabling this option saves about 4k and might cause you much additional grey @@ -155,7 +155,7 @@ config IOMMU_DEBUG bool "Enable IOMMU debugging" depends on GART_IOMMU && DEBUG_KERNEL depends on X86_64 - help + ---help--- Force the IOMMU to on even when you have less than 4GB of memory and add debugging code. On overflow always panic. And allow to enable IOMMU leak tracing. Can be disabled at boot @@ -171,7 +171,7 @@ config IOMMU_LEAK bool "IOMMU leak tracing" depends on DEBUG_KERNEL depends on IOMMU_DEBUG - help + ---help--- Add a simple leak tracer to the IOMMU code. This is useful when you are debugging a buggy device driver that leaks IOMMU mappings. @@ -224,25 +224,25 @@ choice config IO_DELAY_0X80 bool "port 0x80 based port-IO delay [recommended]" - help + ---help--- This is the traditional Linux IO delay used for in/out_p. It is the most tested hence safest selection here. config IO_DELAY_0XED bool "port 0xed based port-IO delay" - help + ---help--- Use port 0xed as the IO delay. This frees up port 0x80 which is often used as a hardware-debug port. config IO_DELAY_UDELAY bool "udelay based port-IO delay" - help + ---help--- Use udelay(2) as the IO delay method. This provides the delay while not having any side-effect on the IO port space. config IO_DELAY_NONE bool "no port-IO delay" - help + ---help--- No port-IO delay. Will break on old boxes that require port-IO delay for certain operations. Should work on most new machines. @@ -276,18 +276,18 @@ config DEBUG_BOOT_PARAMS bool "Debug boot parameters" depends on DEBUG_KERNEL depends on DEBUG_FS - help + ---help--- This option will cause struct boot_params to be exported via debugfs. config CPA_DEBUG bool "CPA self-test code" depends on DEBUG_KERNEL - help + ---help--- Do change_page_attr() self-tests every 30 seconds. config OPTIMIZE_INLINING bool "Allow gcc to uninline functions marked 'inline'" - help + ---help--- This option determines if the kernel forces gcc to inline the functions developers have marked 'inline'. Doing so takes away freedom from gcc to do what it thinks is best, which is desirable for the gcc 3.x series of @@ -300,4 +300,3 @@ config OPTIMIZE_INLINING If unsure, say N. endmenu - -- cgit v1.2.3 From 2678c07b07ac2076675e5d57653bdf02e9af1950 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 6 Feb 2009 20:46:06 +0530 Subject: Neither asm/types.h nor linux/types.h is required for arch/ia64/include/asm/fpu.h Signed-off-by: Jaswinder Singh Rajput --- arch/ia64/include/asm/fpu.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h index b6395ad1500..0c26157cffa 100644 --- a/arch/ia64/include/asm/fpu.h +++ b/arch/ia64/include/asm/fpu.h @@ -6,8 +6,6 @@ * David Mosberger-Tang */ -#include - /* floating point status register: */ #define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ #define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */ -- cgit v1.2.3 From a034a010f48bf49efe25098c16c16b9708ccbba5 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:43 -0800 Subject: x86: unify pte_none Impact: cleanup Unify and demacro pte_none. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-2level.h | 2 -- arch/x86/include/asm/pgtable-3level.h | 5 ----- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 1 - 4 files changed, 5 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index e0d199fe1d8..c1774ac9da7 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) #endif -#define pte_none(x) (!(x).pte_low) - /* * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, * split up the 29 bits of offset into this range: diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 447da43cddb..07e0734f620 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -151,11 +151,6 @@ static inline int pte_same(pte_t a, pte_t b) return a.pte_low == b.pte_low && a.pte_high == b.pte_high; } -static inline int pte_none(pte_t pte) -{ - return !pte.pte_low && !pte.pte_high; -} - /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 06bbcbd66e9..841e573b27f 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -429,6 +429,11 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base) } #endif /* CONFIG_PARAVIRT */ +static inline int pte_none(pte_t pte) +{ + return !pte.pte; +} + #endif /* __ASSEMBLY__ */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index ba09289acca..5906a41e849 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -171,7 +171,6 @@ static inline int pmd_bad(pmd_t pmd) return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; } -#define pte_none(x) (!pte_val((x))) #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ -- cgit v1.2.3 From 8de01da35e9dbbb4a9d1e9d5a37df98395dfa558 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:44 -0800 Subject: x86: unify pte_same Impact: cleanup Unify and demacro pte_same. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-3level.h | 6 ------ arch/x86/include/asm/pgtable.h | 6 ++++++ arch/x86/include/asm/pgtable_64.h | 2 -- 3 files changed, 6 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 07e0734f620..51832fa0474 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -145,12 +145,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) #endif -#define __HAVE_ARCH_PTE_SAME -static inline int pte_same(pte_t a, pte_t b) -{ - return a.pte_low == b.pte_low && a.pte_high == b.pte_high; -} - /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 841e573b27f..e929d43753c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -434,6 +434,12 @@ static inline int pte_none(pte_t pte) return !pte.pte; } +#define __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t a, pte_t b) +{ + return a.pte == b.pte; +} + #endif /* __ASSEMBLY__ */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 5906a41e849..ff2571865bf 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -134,8 +134,6 @@ static inline void native_pgd_clear(pgd_t *pgd) native_set_pgd(pgd, native_make_pgd(0)); } -#define pte_same(a, b) ((a).pte == (b).pte) - #endif /* !__ASSEMBLY__ */ #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) -- cgit v1.2.3 From 7c683851d96c8313586c0695b25ca41bde9f0f73 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:45 -0800 Subject: x86: unify pte_present Impact: cleanup Unify and demacro pte_present. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_32.h | 2 -- arch/x86/include/asm/pgtable_64.h | 2 -- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index e929d43753c..17fcc17d6b4 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -440,6 +440,11 @@ static inline int pte_same(pte_t a, pte_t b) return a.pte == b.pte; } +static inline int pte_present(pte_t a) +{ + return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); +} + #endif /* __ASSEMBLY__ */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 72b020deb46..188073713fe 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -85,8 +85,6 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; -#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) - /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val((x))) #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index ff2571865bf..35b8dbc068e 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -169,8 +169,6 @@ static inline int pmd_bad(pmd_t pmd) return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; } -#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) - #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ /* -- cgit v1.2.3 From 5ba7c91341be61e0942f792c237ac067d9f32f51 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:48 -0800 Subject: x86: unify pud_present Impact: cleanup Unify and demacro pud_present. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-3level.h | 5 ----- arch/x86/include/asm/pgtable.h | 7 +++++++ arch/x86/include/asm/pgtable_64.h | 1 - 3 files changed, 7 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 51832fa0474..524bd91b995 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -28,11 +28,6 @@ static inline int pud_bad(pud_t pud) return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; } -static inline int pud_present(pud_t pud) -{ - return pud_val(pud) & _PAGE_PRESENT; -} - /* Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 17fcc17d6b4..c117b28df15 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -445,6 +445,13 @@ static inline int pte_present(pte_t a) return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } +#if PAGETABLE_LEVELS > 2 +static inline int pud_present(pud_t pud) +{ + return pud_val(pud) & _PAGE_PRESENT; +} +#endif /* PAGETABLE_LEVELS > 2 */ + #endif /* __ASSEMBLY__ */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 35b8dbc068e..acdc27b202c 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -194,7 +194,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pud_offset(pgd, address) \ ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) -#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT) static inline int pud_large(pud_t pte) { -- cgit v1.2.3 From 9f38d7e85e914f10a875f65d283432d55a12fc27 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:49 -0800 Subject: x86: unify pgd_present Impact: cleanup Unify and demacro pgd_present. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 7 +++++++ arch/x86/include/asm/pgtable_64.h | 1 - 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index c117b28df15..339e49a9bb6 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -452,6 +452,13 @@ static inline int pud_present(pud_t pud) } #endif /* PAGETABLE_LEVELS > 2 */ +#if PAGETABLE_LEVELS > 3 +static inline int pgd_present(pgd_t pgd) +{ + return pgd_val(pgd) & _PAGE_PRESENT; +} +#endif /* PAGETABLE_LEVELS > 3 */ + #endif /* __ASSEMBLY__ */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index acdc27b202c..447634698f5 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -182,7 +182,6 @@ static inline int pmd_bad(pmd_t pmd) #define pgd_page_vaddr(pgd) \ ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK)) #define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) -#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) static inline int pgd_large(pgd_t pgd) { return 0; } #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) -- cgit v1.2.3 From 649e8ef60fac0a2f6960cdb090d73e78717ac065 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:50 -0800 Subject: x86: unify pmd_present Impact: cleanup Unify and demacro pmd_present. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_32.h | 1 - arch/x86/include/asm/pgtable_64.h | 1 - 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 339e49a9bb6..147d3f097ab 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -445,6 +445,11 @@ static inline int pte_present(pte_t a) return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) & _PAGE_PRESENT; +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 188073713fe..f35160730b6 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -87,7 +87,6 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val((x))) -#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) #define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 447634698f5..471b3058f3d 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -208,7 +208,6 @@ static inline int pud_large(pud_t pte) #define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ pmd_index(address)) #define pmd_none(x) (!pmd_val((x))) -#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) #define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) #define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) -- cgit v1.2.3 From 4fea801ac95d6534a93aa01d3ac62be163d845af Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:51 -0800 Subject: x86: unify pmd_none Impact: cleanup Unify and demacro pmd_none. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 7 +++++++ arch/x86/include/asm/pgtable_32.h | 2 -- arch/x86/include/asm/pgtable_64.h | 1 - 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 147d3f097ab..2f38bbee77e 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -450,6 +450,13 @@ static inline int pmd_present(pmd_t pmd) return pmd_val(pmd) & _PAGE_PRESENT; } +static inline int pmd_none(pmd_t pmd) +{ + /* Only check low word on 32-bit platforms, since it might be + out of sync with upper half. */ + return !(unsigned long)native_pmd_val(pmd); +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index f35160730b6..26e73569223 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -85,8 +85,6 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; -/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ -#define pmd_none(x) (!(unsigned long)pmd_val((x))) #define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 471b3058f3d..f3ad8943324 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -207,7 +207,6 @@ static inline int pud_large(pud_t pte) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ pmd_index(address)) -#define pmd_none(x) (!pmd_val((x))) #define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) #define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) -- cgit v1.2.3 From c5f040b12b2381591932a007432e7ed86b3f2796 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:52 -0800 Subject: x86: unify pgd_page_vaddr Impact: cleanup Unify and demacro pgd_page_vaddr. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 2f38bbee77e..cca4321e076 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -469,6 +469,11 @@ static inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_PRESENT; } + +static inline unsigned long pgd_page_vaddr(pgd_t pgd) +{ + return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); +} #endif /* PAGETABLE_LEVELS > 3 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index f3ad8943324..4f8dbb99b69 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -179,8 +179,6 @@ static inline int pmd_bad(pmd_t pmd) /* * Level 4 access. */ -#define pgd_page_vaddr(pgd) \ - ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK)) #define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) static inline int pgd_large(pgd_t pgd) { return 0; } #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) -- cgit v1.2.3 From 6fff47e3ac5e17f7e164ac4ff9ea29aba3c54d73 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:53 -0800 Subject: x86: unify pud_page_vaddr Impact: cleanup Unify and demacro pud_page_vaddr. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-3level.h | 3 --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 2 -- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 524bd91b995..542616ac192 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -117,9 +117,6 @@ static inline void pud_clear(pud_t *pudp) #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) - - /* Find an entry in the second-level page table.. */ #define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \ pmd_index(address)) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index cca4321e076..4638b4af675 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -462,6 +462,11 @@ static inline int pud_present(pud_t pud) { return pud_val(pud) & _PAGE_PRESENT; } + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); +} #endif /* PAGETABLE_LEVELS > 2 */ #if PAGETABLE_LEVELS > 3 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 4f8dbb99b69..9875e40c058 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -185,8 +185,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; } /* PUD - Level3 access */ /* to find an entry in a page-table-directory. */ -#define pud_page_vaddr(pud) \ - ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK)) #define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT)) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pud_offset(pgd, address) \ -- cgit v1.2.3 From aca159dbb13a5221819d5b3849b8c013f4829e9e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:54 -0800 Subject: x86: include pgtable_SIZE.h earlier We'll need the definitions sooner. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 4638b4af675..bd38feb3492 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -429,6 +429,16 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base) } #endif /* CONFIG_PARAVIRT */ +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_X86_32 +# include "pgtable_32.h" +#else +# include "pgtable_64.h" +#endif + +#ifndef __ASSEMBLY__ + static inline int pte_none(pte_t pte) { return !pte.pte; @@ -483,12 +493,6 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_X86_32 -# include "pgtable_32.h" -#else -# include "pgtable_64.h" -#endif - /* * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] * -- cgit v1.2.3 From f476961cb16312fe4cb80b2b457ef9acf220a7fc Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:55 -0800 Subject: x86: unify pud_page Impact: cleanup Unify and demacro pud_page. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-3level.h | 2 -- arch/x86/include/asm/pgtable.h | 6 ++++++ arch/x86/include/asm/pgtable_64.h | 1 - 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 542616ac192..28ba09ac230 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -115,8 +115,6 @@ static inline void pud_clear(pud_t *pudp) write_cr3(pgd); } -#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) - /* Find an entry in the second-level page table.. */ #define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \ pmd_index(address)) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index bd38feb3492..a871ae55a5c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -438,6 +438,7 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base) #endif #ifndef __ASSEMBLY__ +#include static inline int pte_none(pte_t pte) { @@ -477,6 +478,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud) { return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); } + +static inline struct page *pud_page(pud_t pud) +{ + return pfn_to_page(pud_val(pud) >> PAGE_SHIFT); +} #endif /* PAGETABLE_LEVELS > 2 */ #if PAGETABLE_LEVELS > 3 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 9875e40c058..7edacc7ec89 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -185,7 +185,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; } /* PUD - Level3 access */ /* to find an entry in a page-table-directory. */ -#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT)) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pud_offset(pgd, address) \ ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) -- cgit v1.2.3 From 777cba16aac5a1096db0b936912eb7fd06fb0cc5 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:56 -0800 Subject: x86: unify pgd_page Impact: cleanup Unify and demacro pgd_page. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a871ae55a5c..c1a36dd1e59 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -495,6 +495,11 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) { return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); } + +static inline struct page *pgd_page(pgd_t pgd) +{ + return pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT); +} #endif /* PAGETABLE_LEVELS > 3 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 7edacc7ec89..02477ad40fc 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -179,7 +179,6 @@ static inline int pmd_bad(pmd_t pmd) /* * Level 4 access. */ -#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) static inline int pgd_large(pgd_t pgd) { return 0; } #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) -- cgit v1.2.3 From 7cfb81024bc1dbe8ad2bf5affd58a6a7ad4172ba Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:57 -0800 Subject: x86: unify pud_index Impact: cleanup Unify and demacro pud_index. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 6 ++++++ arch/x86/include/asm/pgtable_64.h | 2 -- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index c1a36dd1e59..a51a97ade63 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -500,6 +500,12 @@ static inline struct page *pgd_page(pgd_t pgd) { return pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT); } + +/* to find an entry in a page-table-directory. */ +static inline unsigned pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} #endif /* PAGETABLE_LEVELS > 3 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 02477ad40fc..c17c30f5751 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -183,8 +183,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) /* PUD - Level3 access */ -/* to find an entry in a page-table-directory. */ -#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pud_offset(pgd, address) \ ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) -- cgit v1.2.3 From 3d081b1812bd4de2bbef58c6d598ddf45493010e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:58 -0800 Subject: x86: unify pud_offset Impact: cleanup Unify and demacro pud_offset. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a51a97ade63..decccb0a641 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -506,6 +506,11 @@ static inline unsigned pud_index(unsigned long address) { return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); } + +static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) +{ + return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); +} #endif /* PAGETABLE_LEVELS > 3 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index c17c30f5751..958dc1e7335 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -183,8 +183,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) /* PUD - Level3 access */ -#define pud_offset(pgd, address) \ - ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) static inline int pud_large(pud_t pte) { -- cgit v1.2.3 From 3ffb3564cd3cd59de8a0d74430ffe2d43ae11f19 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:30:59 -0800 Subject: x86: unify pmd_page_vaddr Impact: cleanup Unify and demacro pmd_page_vaddr. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_32.h | 3 --- arch/x86/include/asm/pgtable_64.h | 1 - 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index decccb0a641..3789c05bf30 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -468,6 +468,11 @@ static inline int pmd_none(pmd_t pmd) return !(unsigned long)native_pmd_val(pmd); } +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 26e73569223..f7f7e297a0a 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -126,9 +126,6 @@ static inline int pud_large(pud_t pud) { return 0; } #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) -#define pmd_page_vaddr(pmd) \ - ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK)) - #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 958dc1e7335..7a510e8a878 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -191,7 +191,6 @@ static inline int pud_large(pud_t pte) } /* PMD - Level 2 access */ -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK)) #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) -- cgit v1.2.3 From 20063ca4eb26d4b10f01d59925deea4aeee415e8 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:00 -0800 Subject: x86: unify pmd_page Impact: cleanup Unify and demacro pmd_page. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_32.h | 2 -- arch/x86/include/asm/pgtable_64.h | 2 -- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3789c05bf30..38330d6288f 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -473,6 +473,11 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); } +static inline struct page *pmd_page(pmd_t pmd) +{ + return pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT); +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index f7f7e297a0a..8714110b4a7 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -124,8 +124,6 @@ static inline int pud_large(pud_t pud) { return 0; } #define pte_offset_kernel(dir, address) \ ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) -#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) - #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 7a510e8a878..97f24d2d60d 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -191,8 +191,6 @@ static inline int pud_large(pud_t pte) } /* PMD - Level 2 access */ -#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) - #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ pmd_index(address)) -- cgit v1.2.3 From e24d7eee0beda24504bf6a4aa03be68328557475 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:01 -0800 Subject: x86: unify pmd_index Impact: cleanup Unify and demacro pmd_index. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 11 +++++++++++ arch/x86/include/asm/pgtable_32.h | 9 --------- arch/x86/include/asm/pgtable_64.h | 1 - 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 38330d6288f..4ec24b6d099 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -478,6 +478,17 @@ static inline struct page *pmd_page(pmd_t pmd) return pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT); } +/* + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] + * + * this macro returns the index of the entry in the pmd page which would + * control the given virtual address + */ +static inline unsigned pmd_index(unsigned long address) +{ + return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 8714110b4a7..40b066215bb 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -104,15 +104,6 @@ extern unsigned long pg0[]; static inline int pud_large(pud_t pud) { return 0; } -/* - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] - * - * this macro returns the index of the entry in the pmd page which would - * control the given virtual address - */ -#define pmd_index(address) \ - (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) - /* * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] * diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 97f24d2d60d..15f42d6ee2f 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -191,7 +191,6 @@ static inline int pud_large(pud_t pte) } /* PMD - Level 2 access */ -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ pmd_index(address)) #define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) -- cgit v1.2.3 From 01ade20d5a22e6ef002cbb751dddc3a01a78f998 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:02 -0800 Subject: x86: unify pmd_offset Impact: cleanup Unify and demacro pmd_offset. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-3level.h | 4 ---- arch/x86/include/asm/pgtable.h | 6 ++++++ arch/x86/include/asm/pgtable_64.h | 2 -- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 28ba09ac230..7ad9d05710b 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -115,10 +115,6 @@ static inline void pud_clear(pud_t *pudp) write_cr3(pgd); } -/* Find an entry in the second-level page table.. */ -#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \ - pmd_index(address)) - #ifdef CONFIG_SMP static inline pte_t native_ptep_get_and_clear(pte_t *ptep) { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 4ec24b6d099..a7dbb05075d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -504,6 +504,12 @@ static inline struct page *pud_page(pud_t pud) { return pfn_to_page(pud_val(pud) >> PAGE_SHIFT); } + +/* Find an entry in the second-level page table.. */ +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); +} #endif /* PAGETABLE_LEVELS > 2 */ #if PAGETABLE_LEVELS > 3 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 15f42d6ee2f..78269656cf0 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -191,8 +191,6 @@ static inline int pud_large(pud_t pte) } /* PMD - Level 2 access */ -#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ - pmd_index(address)) #define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) #define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) -- cgit v1.2.3 From bd44d64db1db6acf2dea9ba130b8cb0a54e1dabd Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:03 -0800 Subject: x86: remove redundant pfn_pmd definition Impact: cleanup It's already defined in pgtable.h Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable_64.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 78269656cf0..6cc4133705b 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -191,7 +191,6 @@ static inline int pud_large(pud_t pte) } /* PMD - Level 2 access */ -#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) #define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) -- cgit v1.2.3 From 3180fba0eec0d14e4ac8183a90d643d0d3383c75 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:04 -0800 Subject: x86: unify pmd_pfn Impact: cleanup Unify and demacro pmd_pfn. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a7dbb05075d..532144c2f1c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -510,6 +510,11 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); } + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; +} #endif /* PAGETABLE_LEVELS > 2 */ #if PAGETABLE_LEVELS > 3 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 6cc4133705b..279ddc6a4eb 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -191,8 +191,6 @@ static inline int pud_large(pud_t pte) } /* PMD - Level 2 access */ -#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) - #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ _PAGE_FILE }) -- cgit v1.2.3 From 97e2817d3423d753fd2f80ea936a370032846382 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:05 -0800 Subject: x86: unify pmd_pfn Impact: cleanup Unify pmd_pfn. Unfortunately it can't be demacroed because it has a cyclic dependency on linux/mm.h:page_to_nid(). Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 9 +++++++++ arch/x86/include/asm/pgtable_32.h | 7 ------- arch/x86/include/asm/pgtable_64.h | 3 --- 3 files changed, 9 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 532144c2f1c..49b5cff78c2 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -489,6 +489,15 @@ static inline unsigned pmd_index(unsigned long address) return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * (Currently stuck as a macro because of indirect forward reference + * to linux/mm.h:page_to_nid()) + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 40b066215bb..6335be843c1 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -95,13 +95,6 @@ extern unsigned long pg0[]; # include #endif -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - - static inline int pud_large(pud_t pud) { return 0; } /* diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 279ddc6a4eb..adce05628a0 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -198,9 +198,6 @@ static inline int pud_large(pud_t pte) /* PTE - Level 1 access. */ -/* page, protection -> pte */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot)) - #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ pte_index((address))) -- cgit v1.2.3 From 346309cff6127a38731cf102de3413a562700b84 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:06 -0800 Subject: x86: unify pte_index Impact: cleanup Unify and demacro pte_index. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 11 +++++++++++ arch/x86/include/asm/pgtable_32.h | 8 -------- arch/x86/include/asm/pgtable_64.h | 1 - 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 49b5cff78c2..10a8c2e5178 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -498,6 +498,17 @@ static inline unsigned pmd_index(unsigned long address) */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +/* + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] + * + * this function returns the index of the entry in the pte page which would + * control the given virtual address + */ +static inline unsigned pte_index(unsigned long address) +{ + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 6335be843c1..00d298e14a8 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -97,14 +97,6 @@ extern unsigned long pg0[]; static inline int pud_large(pud_t pud) { return 0; } -/* - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] - * - * this macro returns the index of the entry in the pte page which would - * control the given virtual address - */ -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) \ ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index adce05628a0..e7321bc8aa8 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -198,7 +198,6 @@ static inline int pud_large(pud_t pte) /* PTE - Level 1 access. */ -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ pte_index((address))) -- cgit v1.2.3 From 3fbc2444f465710cdf0c832461a6a14338437453 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:07 -0800 Subject: x86: unify pte_offset_kernel Impact: cleanup Unify and demacro pte_offset_kernel. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_32.h | 3 --- arch/x86/include/asm/pgtable_64.h | 3 --- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 10a8c2e5178..c61b37af1f2 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -509,6 +509,11 @@ static inline unsigned pte_index(unsigned long address) return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } +static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) +{ + return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 00d298e14a8..133fc4e4529 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -97,9 +97,6 @@ extern unsigned long pg0[]; static inline int pud_large(pud_t pud) { return 0; } -#define pte_offset_kernel(dir, address) \ - ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) - #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index e7321bc8aa8..a8bfb75c76a 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -198,9 +198,6 @@ static inline int pud_large(pud_t pte) /* PTE - Level 1 access. */ -#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ - pte_index((address))) - /* x86-64 always has all page tables mapped. */ #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) -- cgit v1.2.3 From 3f6cbef1d7f474d16f3a824c6d2910d930778fbd Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:08 -0800 Subject: x86: unify pud_large Impact: cleanup Unify and demacro pud_large. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 6 ++++++ arch/x86/include/asm/pgtable_32.h | 2 -- arch/x86/include/asm/pgtable_64.h | 6 ------ 3 files changed, 6 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index c61b37af1f2..0c734e2a90c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -540,6 +540,12 @@ static inline unsigned long pmd_pfn(pmd_t pmd) { return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; } + +static inline int pud_large(pud_t pud) +{ + return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == + (_PAGE_PSE | _PAGE_PRESENT); +} #endif /* PAGETABLE_LEVELS > 2 */ #if PAGETABLE_LEVELS > 3 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 133fc4e4529..ad7830bdc9a 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -95,8 +95,6 @@ extern unsigned long pg0[]; # include #endif -static inline int pud_large(pud_t pud) { return 0; } - #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index a8bfb75c76a..a85ac14df35 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -184,12 +184,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; } /* PUD - Level3 access */ -static inline int pud_large(pud_t pte) -{ - return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == - (_PAGE_PSE | _PAGE_PRESENT); -} - /* PMD - Level 2 access */ #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ -- cgit v1.2.3 From 30f103167fcf2b08de64f5f37ece6bfff7392290 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:09 -0800 Subject: x86: unify pgd_bad Impact: cleanup Unify and demacro pgd_bad. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 5 ----- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 0c734e2a90c..ebcb60e6a96 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -574,6 +574,11 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) { return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); } + +static inline int pgd_bad(pgd_t pgd) +{ + return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; +} #endif /* PAGETABLE_LEVELS > 3 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index a85ac14df35..1dfc44932f6 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -154,11 +154,6 @@ static inline void native_pgd_clear(pgd_t *pgd) #ifndef __ASSEMBLY__ -static inline int pgd_bad(pgd_t pgd) -{ - return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; -} - static inline int pud_bad(pud_t pud) { return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; -- cgit v1.2.3 From a61bb29af47b0e4052566d25f3391894306a23fd Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:10 -0800 Subject: x86: unify pgd_bad Impact: cleanup Unify and demacro pgd_bad. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-3level.h | 5 ----- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 5 ----- 3 files changed, 5 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 7ad9d05710b..b92524eec20 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -23,11 +23,6 @@ static inline int pud_none(pud_t pud) return pud_val(pud) == 0; } -static inline int pud_bad(pud_t pud) -{ - return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; -} - /* Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index ebcb60e6a96..38882f6cc82 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -546,6 +546,11 @@ static inline int pud_large(pud_t pud) return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == (_PAGE_PSE | _PAGE_PRESENT); } + +static inline int pud_bad(pud_t pud) +{ + return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; +} #endif /* PAGETABLE_LEVELS > 2 */ #if PAGETABLE_LEVELS > 3 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 1dfc44932f6..fe8be33df3d 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -154,11 +154,6 @@ static inline void native_pgd_clear(pgd_t *pgd) #ifndef __ASSEMBLY__ -static inline int pud_bad(pud_t pud) -{ - return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; -} - static inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; -- cgit v1.2.3 From 99510238bb428091e7caba020bc5e18b5f30b619 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:11 -0800 Subject: x86: unify pmd_bad Impact: cleanup Unify and demacro pmd_bad. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_32.h | 2 -- arch/x86/include/asm/pgtable_64.h | 5 ----- 3 files changed, 5 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 38882f6cc82..72bf53ef60b 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -514,6 +514,11 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); } +static inline int pmd_bad(pmd_t pmd) +{ + return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index ad7830bdc9a..5362632df75 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -85,8 +85,6 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; -#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) - #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #ifdef CONFIG_X86_PAE diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index fe8be33df3d..d230e28a5f3 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -154,11 +154,6 @@ static inline void native_pgd_clear(pgd_t *pgd) #ifndef __ASSEMBLY__ -static inline int pmd_bad(pmd_t pmd) -{ - return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; -} - #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ /* -- cgit v1.2.3 From cc290ca38cc4c78b0d6175633232f05b8d8732ab Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:12 -0800 Subject: x86: unify pages_to_mb Impact: cleanup Unify and demacro pages_to_mb. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_32.h | 2 -- arch/x86/include/asm/pgtable_64.h | 2 -- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 72bf53ef60b..d4cbc8188c8 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -519,6 +519,11 @@ static inline int pmd_bad(pmd_t pmd) return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; } +static inline unsigned long pages_to_mb(unsigned long npg) +{ + return npg >> (20 - PAGE_SHIFT); +} + #if PAGETABLE_LEVELS > 2 static inline int pud_present(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 5362632df75..10c71abae07 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -85,8 +85,6 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; -#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) - #ifdef CONFIG_X86_PAE # include #else diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index d230e28a5f3..6c6c3c34bc5 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -154,8 +154,6 @@ static inline void native_pgd_clear(pgd_t *pgd) #ifndef __ASSEMBLY__ -#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ - /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. -- cgit v1.2.3 From deb79cfb365c96ff960570d1bcf2c205424b6195 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:13 -0800 Subject: x86: unify pud_none Impact: cleanup Unify and demacro pud_none. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-3level.h | 5 ----- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 1 - 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index b92524eec20..3f13cdf6115 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -18,11 +18,6 @@ printk("%s:%d: bad pgd %p(%016Lx).\n", \ __FILE__, __LINE__, &(e), pgd_val(e)) -static inline int pud_none(pud_t pud) -{ - return pud_val(pud) == 0; -} - /* Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index d4cbc8188c8..0ef49f3ebc8 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -525,6 +525,11 @@ static inline unsigned long pages_to_mb(unsigned long npg) } #if PAGETABLE_LEVELS > 2 +static inline int pud_none(pud_t pud) +{ + return pud_val(pud) == 0; +} + static inline int pud_present(pud_t pud) { return pud_val(pud) & _PAGE_PRESENT; diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 6c6c3c34bc5..d58c2ee15c3 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -68,7 +68,6 @@ extern void paging_init(void); __FILE__, __LINE__, &(e), pgd_val(e)) #define pgd_none(x) (!pgd_val(x)) -#define pud_none(x) (!pud_val(x)) struct mm_struct; -- cgit v1.2.3 From 7325cc2e333cdaaabd2103552458876ea85adb33 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:14 -0800 Subject: x86: unify pgd_none Impact: cleanup Unify and demacro pgd_none. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/include/asm/pgtable_64.h | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 0ef49f3ebc8..18afcd31e76 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -599,6 +599,11 @@ static inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; } + +static inline int pgd_none(pgd_t pgd) +{ + return !pgd_val(pgd); +} #endif /* PAGETABLE_LEVELS > 3 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index d58c2ee15c3..3b92a4ca403 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -67,8 +67,6 @@ extern void paging_init(void); printk("%s:%d: bad pgd %p(%016lx).\n", \ __FILE__, __LINE__, &(e), pgd_val(e)) -#define pgd_none(x) (!pgd_val(x)) - struct mm_struct; void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); -- cgit v1.2.3 From 6cf7150084500962b8e225e2409ec01ed06a2c71 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:15 -0800 Subject: x86: unify io_remap_pfn_range Impact: cleanup Unify io_remap_pfn_range. Don't demacro yet. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 3 +++ arch/x86/include/asm/pgtable_32.h | 3 --- arch/x86/include/asm/pgtable_64.h | 3 --- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 18afcd31e76..9754d06ffe6 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -524,6 +524,9 @@ static inline unsigned long pages_to_mb(unsigned long npg) return npg >> (20 - PAGE_SHIFT); } +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #if PAGETABLE_LEVELS > 2 static inline int pud_none(pud_t pud) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 10c71abae07..1952bb762aa 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -133,7 +133,4 @@ do { \ #define kern_addr_valid(kaddr) (0) #endif -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - #endif /* _ASM_X86_PGTABLE_32_H */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 3b92a4ca403..100ac483a0b 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -205,9 +205,6 @@ extern int direct_gbpages; extern int kern_addr_valid(unsigned long addr); extern void cleanup_highmap(void); -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -- cgit v1.2.3 From 18a7a199f97a7509fb987722e543f1aac3d7ada5 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:16 -0800 Subject: x86: add and use pgd/pud/pmd_flags Add pgd/pud/pmd_flags which are analogous to pte_flags, and use them where-ever we only care about testing the flags portions of the respective entries. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/page.h | 15 +++++++++++++++ arch/x86/include/asm/pgtable.h | 16 ++++++++-------- 2 files changed, 23 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index e9873a2e869..0b16b64a8fe 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -95,6 +95,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd) return pgd.pgd; } +static inline pgdval_t pgd_flags(pgd_t pgd) +{ + return native_pgd_val(pgd) & PTE_FLAGS_MASK; +} + #if PAGETABLE_LEVELS >= 3 #if PAGETABLE_LEVELS == 4 typedef struct { pudval_t pud; } pud_t; @@ -117,6 +122,11 @@ static inline pudval_t native_pud_val(pud_t pud) } #endif /* PAGETABLE_LEVELS == 4 */ +static inline pudval_t pud_flags(pud_t pud) +{ + return native_pud_val(pud) & PTE_FLAGS_MASK; +} + typedef struct { pmdval_t pmd; } pmd_t; static inline pmd_t native_make_pmd(pmdval_t val) @@ -128,6 +138,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) { return pmd.pmd; } + +static inline pmdval_t pmd_flags(pmd_t pmd) +{ + return native_pmd_val(pmd) & PTE_FLAGS_MASK; +} #else /* PAGETABLE_LEVELS == 2 */ #include diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 9754d06ffe6..c811d76d6fd 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -236,7 +236,7 @@ static inline unsigned long pte_pfn(pte_t pte) static inline int pmd_large(pmd_t pte) { - return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == + return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == (_PAGE_PSE | _PAGE_PRESENT); } @@ -458,7 +458,7 @@ static inline int pte_present(pte_t a) static inline int pmd_present(pmd_t pmd) { - return pmd_val(pmd) & _PAGE_PRESENT; + return pmd_flags(pmd) & _PAGE_PRESENT; } static inline int pmd_none(pmd_t pmd) @@ -516,7 +516,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) static inline int pmd_bad(pmd_t pmd) { - return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; + return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; } static inline unsigned long pages_to_mb(unsigned long npg) @@ -535,7 +535,7 @@ static inline int pud_none(pud_t pud) static inline int pud_present(pud_t pud) { - return pud_val(pud) & _PAGE_PRESENT; + return pud_flags(pud) & _PAGE_PRESENT; } static inline unsigned long pud_page_vaddr(pud_t pud) @@ -561,20 +561,20 @@ static inline unsigned long pmd_pfn(pmd_t pmd) static inline int pud_large(pud_t pud) { - return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == + return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == (_PAGE_PSE | _PAGE_PRESENT); } static inline int pud_bad(pud_t pud) { - return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; + return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; } #endif /* PAGETABLE_LEVELS > 2 */ #if PAGETABLE_LEVELS > 3 static inline int pgd_present(pgd_t pgd) { - return pgd_val(pgd) & _PAGE_PRESENT; + return pgd_flags(pgd) & _PAGE_PRESENT; } static inline unsigned long pgd_page_vaddr(pgd_t pgd) @@ -600,7 +600,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) static inline int pgd_bad(pgd_t pgd) { - return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; + return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) -- cgit v1.2.3 From 26c8e3179933c5c9071b16db76ab6de58a787d06 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 5 Feb 2009 11:31:17 -0800 Subject: x86: make pgd/pud/pmd/pte_none consistent The _none test is done differently for every level of the pagetable. Standardize them by: 1: Use the native_X_val to extract the raw entry, with no need to go via paravirt_ops, diff -r 1d0646d0d319 arch/x86/include/asm/pgtable.h, and 2: Compare with 0 rather than using a boolean !, since they are actually values and not booleans. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index c811d76d6fd..a80a956ae65 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -465,7 +465,7 @@ static inline int pmd_none(pmd_t pmd) { /* Only check low word on 32-bit platforms, since it might be out of sync with upper half. */ - return !(unsigned long)native_pmd_val(pmd); + return (unsigned long)native_pmd_val(pmd) == 0; } static inline unsigned long pmd_page_vaddr(pmd_t pmd) @@ -530,7 +530,7 @@ static inline unsigned long pages_to_mb(unsigned long npg) #if PAGETABLE_LEVELS > 2 static inline int pud_none(pud_t pud) { - return pud_val(pud) == 0; + return native_pud_val(pud) == 0; } static inline int pud_present(pud_t pud) @@ -605,7 +605,7 @@ static inline int pgd_bad(pgd_t pgd) static inline int pgd_none(pgd_t pgd) { - return !pgd_val(pgd); + return !native_pgd_val(pgd); } #endif /* PAGETABLE_LEVELS > 3 */ -- cgit v1.2.3 From 976e8f677e42757e5586ea04a9ac8bb8ddaa037e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 13:29:44 -0800 Subject: x86: asm/io.h: unify virt_to_phys/phys_to_virt Impact: unify identical code asm/io_32.h and _64.h has functionally identical definitions for virt_to_phys, phys_to_virt, page_to_phys, and the isa_* variants, so just unify them. The only slightly functional change is using phys_addr_t for the physical address argument and return val. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/io.h | 59 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/io_32.h | 57 ------------------------------------------ arch/x86/include/asm/io_64.h | 37 --------------------------- 3 files changed, 59 insertions(+), 94 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 1dbbdf4be9b..919e3b19f3c 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -5,6 +5,7 @@ #include #include +#include #define build_mmio_read(name, size, type, reg, barrier) \ static inline type name(const volatile void __iomem *addr) \ @@ -80,6 +81,64 @@ static inline void writeq(__u64 val, volatile void __iomem *addr) #define readq readq #define writeq writeq +/** + * virt_to_phys - map virtual addresses to physical + * @address: address to remap + * + * The returned physical address is the physical (CPU) mapping for + * the memory address given. It is only valid to use this function on + * addresses directly mapped or allocated via kmalloc. + * + * This function does not give bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ + +static inline phys_addr_t virt_to_phys(volatile void *address) +{ + return __pa(address); +} + +/** + * phys_to_virt - map physical address to virtual + * @address: address to remap + * + * The returned virtual address is a current CPU mapping for + * the memory address given. It is only valid to use this function on + * addresses that have a kernel mapping + * + * This function does not handle bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ + +static inline void *phys_to_virt(phys_addr_t address) +{ + return __va(address); +} + +/* + * Change "struct page" to physical address. + */ +#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) + +/* + * ISA I/O bus memory addresses are 1:1 with the physical address. + */ +#define isa_virt_to_bus virt_to_phys +#define isa_page_to_bus page_to_phys +#define isa_bus_to_virt phys_to_virt + +/* + * However PCI ones are not necessarily 1:1 and therefore these interfaces + * are forbidden in portable PCI drivers. + * + * Allow them on x86 for legacy drivers, though. + */ +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + + #ifdef CONFIG_X86_32 # include "io_32.h" #else diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h index d8e242e1b39..2b687cb8609 100644 --- a/arch/x86/include/asm/io_32.h +++ b/arch/x86/include/asm/io_32.h @@ -53,47 +53,6 @@ */ #define xlate_dev_kmem_ptr(p) p -/** - * virt_to_phys - map virtual addresses to physical - * @address: address to remap - * - * The returned physical address is the physical (CPU) mapping for - * the memory address given. It is only valid to use this function on - * addresses directly mapped or allocated via kmalloc. - * - * This function does not give bus mappings for DMA transfers. In - * almost all conceivable cases a device driver should not be using - * this function - */ - -static inline unsigned long virt_to_phys(volatile void *address) -{ - return __pa(address); -} - -/** - * phys_to_virt - map physical address to virtual - * @address: address to remap - * - * The returned virtual address is a current CPU mapping for - * the memory address given. It is only valid to use this function on - * addresses that have a kernel mapping - * - * This function does not handle bus mappings for DMA transfers. In - * almost all conceivable cases a device driver should not be using - * this function - */ - -static inline void *phys_to_virt(unsigned long address) -{ - return __va(address); -} - -/* - * Change "struct page" to physical address. - */ -#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) - /** * ioremap - map bus memory into CPU space * @offset: bus address of the memory @@ -123,22 +82,6 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) extern void iounmap(volatile void __iomem *addr); -/* - * ISA I/O bus memory addresses are 1:1 with the physical address. - */ -#define isa_virt_to_bus virt_to_phys -#define isa_page_to_bus page_to_phys -#define isa_bus_to_virt phys_to_virt - -/* - * However PCI ones are not necessarily 1:1 and therefore these interfaces - * are forbidden in portable PCI drivers. - * - * Allow them on x86 for legacy drivers, though. - */ -#define virt_to_bus virt_to_phys -#define bus_to_virt phys_to_virt - static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) { diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h index 563c16270ba..e71b5550877 100644 --- a/arch/x86/include/asm/io_64.h +++ b/arch/x86/include/asm/io_64.h @@ -142,27 +142,6 @@ __OUTS(l) #include -#ifndef __i386__ -/* - * Change virtual addresses to physical addresses and vv. - * These are pretty trivial - */ -static inline unsigned long virt_to_phys(volatile void *address) -{ - return __pa(address); -} - -static inline void *phys_to_virt(unsigned long address) -{ - return __va(address); -} -#endif - -/* - * Change "struct page" to physical address. - */ -#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) - #include /* @@ -187,22 +166,6 @@ extern void iounmap(volatile void __iomem *addr); extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); -/* - * ISA I/O bus memory addresses are 1:1 with the physical address. - */ -#define isa_virt_to_bus virt_to_phys -#define isa_page_to_bus page_to_phys -#define isa_bus_to_virt phys_to_virt - -/* - * However PCI ones are not necessarily 1:1 and therefore these interfaces - * are forbidden in portable PCI drivers. - * - * Allow them on x86 for legacy drivers, though. - */ -#define virt_to_bus virt_to_phys -#define bus_to_virt phys_to_virt - void __memcpy_fromio(void *, unsigned long, unsigned); void __memcpy_toio(unsigned long, const void *, unsigned); -- cgit v1.2.3 From 133822c5c038b265ddb6545cda3a4c88815c7d3d Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 13:29:52 -0800 Subject: x86: asm/io.h: unify ioremap prototypes Impact: unify identical code asm/io_32.h and _64.h have identical prototypes for the ioremap family of functions. The 32-bit header had a more descriptive comment. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/io.h | 31 +++++++++++++++++++++++++++++++ arch/x86/include/asm/io_32.h | 29 ----------------------------- arch/x86/include/asm/io_64.h | 22 ---------------------- 3 files changed, 31 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 919e3b19f3c..f150b1ecf92 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -138,6 +138,37 @@ static inline void *phys_to_virt(phys_addr_t address) #define virt_to_bus virt_to_phys #define bus_to_virt phys_to_virt +/** + * ioremap - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * If the area you are trying to map is a PCI BAR you should have a + * look at pci_iomap(). + */ +extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, + unsigned long prot_val); + +/* + * The default ioremap() behavior is non-cached: + */ +static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) +{ + return ioremap_nocache(offset, size); +} + +extern void iounmap(volatile void __iomem *addr); + +extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); + #ifdef CONFIG_X86_32 # include "io_32.h" diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h index 2b687cb8609..2fbe7dd26bb 100644 --- a/arch/x86/include/asm/io_32.h +++ b/arch/x86/include/asm/io_32.h @@ -53,35 +53,6 @@ */ #define xlate_dev_kmem_ptr(p) p -/** - * ioremap - map bus memory into CPU space - * @offset: bus address of the memory - * @size: size of the resource to map - * - * ioremap performs a platform specific sequence of operations to - * make bus memory CPU accessible via the readb/readw/readl/writeb/ - * writew/writel functions and the other mmio helpers. The returned - * address is not guaranteed to be usable directly as a virtual - * address. - * - * If the area you are trying to map is a PCI BAR you should have a - * look at pci_iomap(). - */ -extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); -extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); -extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, - unsigned long prot_val); - -/* - * The default ioremap() behavior is non-cached: - */ -static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) -{ - return ioremap_nocache(offset, size); -} - -extern void iounmap(volatile void __iomem *addr); - static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) { diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h index e71b5550877..0424c07246f 100644 --- a/arch/x86/include/asm/io_64.h +++ b/arch/x86/include/asm/io_64.h @@ -144,28 +144,6 @@ __OUTS(l) #include -/* - * This one maps high address device memory and turns off caching for that area. - * it's useful if some control registers are in such an area and write combining - * or read caching is not desirable: - */ -extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); -extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); -extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, - unsigned long prot_val); - -/* - * The default ioremap() behavior is non-cached: - */ -static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) -{ - return ioremap_nocache(offset, size); -} - -extern void iounmap(volatile void __iomem *addr); - -extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); - void __memcpy_fromio(void *, unsigned long, unsigned); void __memcpy_toio(unsigned long, const void *, unsigned); -- cgit v1.2.3 From fb08b20fe7c8491a35a4369cce60fcb886d7609d Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 14:05:28 -0800 Subject: x86: Fix compile error in arch/x86/kernel/early_printk.c Fix compile problem: CC arch/x86/kernel/early_printk.o In file included from /home/jeremy/hg/xen/paravirt/linux/arch/x86/kernel/early_printk.c:17: /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h: In function 'pmd_page': /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h:516: error: implicit declaration of function '__pfn_to_section' /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h:516: warning: initialization makes pointer from integer without a cast /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h:516: error: implicit declaration of function '__section_mem_map_addr' /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h:516: warning: return makes pointer from integer without a cast /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h: In function 'pud_page': /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h:586: warning: initialization makes pointer from integer without a cast /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h:586: warning: return makes pointer from integer without a cast /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h: In function 'pgd_page': /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h:625: warning: initialization makes pointer from integer without a cast /home/jeremy/hg/xen/paravirt/linux/arch/x86/include/asm/pgtable.h:625: warning: return makes pointer from integer without a cast This is a cycling dependency between asm/pgtable.h and linux/mmzone.h when using CONFIG_SPARSEMEM. Rather than hacking up the headers some more, remove asm/pgtable.h, since early_printk.c doesn't actually need it. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/kernel/early_printk.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 504ad198e4a..6a36dd228b6 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From 4e7f78f815412fd25b207b8c63a698b637c9621d Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 5 Feb 2009 17:48:19 +0100 Subject: pxa/h5000: Setup I2S pins for pxa2xx-i2s The iPAQ h5000 has an AK4535 codec connected as I2S slave, PXA I2S providing SYSCLK. Signed-off-by: Philipp Zabel Acked-by: Eric Miao Signed-off-by: Mark Brown --- arch/arm/mach-pxa/h5000.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-pxa/h5000.c b/arch/arm/mach-pxa/h5000.c index da6e4422c0f..295ec413d80 100644 --- a/arch/arm/mach-pxa/h5000.c +++ b/arch/arm/mach-pxa/h5000.c @@ -153,6 +153,13 @@ static unsigned long h5000_pin_config[] __initdata = { GPIO23_SSP1_SCLK, GPIO25_SSP1_TXD, GPIO26_SSP1_RXD, + + /* I2S */ + GPIO28_I2S_BITCLK_OUT, + GPIO29_I2S_SDATA_IN, + GPIO30_I2S_SDATA_OUT, + GPIO31_I2S_SYNC, + GPIO32_I2S_SYSCLK, }; /* -- cgit v1.2.3 From 772885c1dc42f1992ac4fc937f1ed4ae9d42a31e Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 5 Feb 2009 17:48:20 +0100 Subject: pxa/spitz: Setup I2S pins for pxa2xx-i2s The spitz has a WM8750 codec connected as I2S slave but doesn't use the PXA I2S system clock. Signed-off-by: Philipp Zabel Acked-by: Eric Miao Signed-off-by: Mark Brown --- arch/arm/mach-pxa/spitz.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 6d447c9ce8a..0d62d311d41 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -105,6 +105,12 @@ static unsigned long spitz_pin_config[] __initdata = { GPIO57_nIOIS16, GPIO104_PSKTSEL, + /* I2S */ + GPIO28_I2S_BITCLK_OUT, + GPIO29_I2S_SDATA_IN, + GPIO30_I2S_SDATA_OUT, + GPIO31_I2S_SYNC, + /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, -- cgit v1.2.3 From f1ee5548a6507d2b4293635c61ddbf12e2c00e94 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 8 Feb 2009 16:18:03 -0800 Subject: x86/irq: optimize nr_irqs Impact: make nr_irqs depend more on cards used in a system depend on nr_irq_gsi more, and have a ratio for MSI. v2: make nr_irqs less than NR_VECTORS * nr_cpu_ids aka if only one cpu, we only can support nr_irqs = NR_VECTORS Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 9578d33f20a..43f95d7b13a 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3479,9 +3479,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) sub_handle = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { irq = create_irq_nr(irq_want); - irq_want++; if (irq == 0) return -1; + irq_want = irq + 1; #ifdef CONFIG_INTR_REMAP if (!intr_remapping_enabled) goto no_ir; @@ -3825,11 +3825,17 @@ int __init arch_probe_nr_irqs(void) { int nr; - nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ? - (NR_VECTORS + (8 * nr_cpu_ids)) : - (NR_VECTORS + (32 * nr_ioapics))); + if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) + nr_irqs = NR_VECTORS * nr_cpu_ids; - if (nr < nr_irqs && nr > nr_irqs_gsi) + nr = nr_irqs_gsi + 8 * nr_cpu_ids; +#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) + /* + * for MSI and HT dyn irq + */ + nr += nr_irqs_gsi * 16; +#endif + if (nr < nr_irqs) nr_irqs = nr; return 0; -- cgit v1.2.3 From abcaa2b8319a7673e76c2391cb5de3045ab1b401 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 8 Feb 2009 16:18:03 -0800 Subject: x86: use NR_IRQS_LEGACY to replace 16 Impact: cleanup also could kill platform_legacy_irq Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/hw_irq.h | 4 +--- arch/x86/kernel/io_apic.c | 8 ++++---- 2 files changed, 5 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 1a20e3d1200..1b82781b898 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -25,8 +25,6 @@ #include #include -#define platform_legacy_irq(irq) ((irq) < 16) - /* Interrupt handlers registered during init_IRQ */ extern void apic_timer_interrupt(void); extern void error_interrupt(void); @@ -58,7 +56,7 @@ extern void make_8259A_irq(unsigned int irq); extern void init_8259A(int aeoi); /* IOAPIC */ -#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) +#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) extern unsigned long io_apic_irqs; extern void init_VISWS_APIC_irqs(void); diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 43f95d7b13a..e5be9f35ea5 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3165,6 +3165,7 @@ static int __init ioapic_init_sysfs(void) device_initcall(ioapic_init_sysfs); +static int nr_irqs_gsi = NR_IRQS_LEGACY; /* * Dynamic irq allocate and deallocation */ @@ -3179,11 +3180,11 @@ unsigned int create_irq_nr(unsigned int irq_want) struct irq_desc *desc_new = NULL; irq = 0; + if (irq_want < nr_irqs_gsi) + irq_want = nr_irqs_gsi; + spin_lock_irqsave(&vector_lock, flags); for (new = irq_want; new < nr_irqs; new++) { - if (platform_legacy_irq(new)) - continue; - desc_new = irq_to_desc_alloc_cpu(new, cpu); if (!desc_new) { printk(KERN_INFO "can not get irq_desc for %d\n", new); @@ -3208,7 +3209,6 @@ unsigned int create_irq_nr(unsigned int irq_want) return irq; } -static int nr_irqs_gsi = NR_IRQS_LEGACY; int create_irq(void) { unsigned int irq_want; -- cgit v1.2.3 From f72dccace737df74d04a96461785a3ad61724b9f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 8 Feb 2009 16:18:03 -0800 Subject: x86: check_timer cleanup Impact: make check-timer more robust potentially solve boot fragility For edge trigger io-apic routing, we already unmasked the pin via setup_IO_APIC_irq(), so don't unmask it again. Also call local_irq_disable() between timer_irq_works(), because it calls local_irq_enable() inside. Also remove not needed apic version reading for 64-bit Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index e5be9f35ea5..855209a1b17 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1657,7 +1657,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, * to the first CPU. */ entry.dest_mode = apic->irq_dest_mode; - entry.mask = 1; /* mask IRQ now */ + entry.mask = 0; /* don't mask IRQ for edge */ entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); entry.delivery_mode = apic->irq_delivery_mode; entry.polarity = 0; @@ -2863,14 +2863,10 @@ static inline void __init check_timer(void) int cpu = boot_cpu_id; int apic1, pin1, apic2, pin2; unsigned long flags; - unsigned int ver; int no_pin1 = 0; local_irq_save(flags); - ver = apic_read(APIC_LVR); - ver = GET_APIC_VERSION(ver); - /* * get/set the timer IRQ vector: */ @@ -2889,7 +2885,13 @@ static inline void __init check_timer(void) apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); init_8259A(1); #ifdef CONFIG_X86_32 - timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); + { + unsigned int ver; + + ver = apic_read(APIC_LVR); + ver = GET_APIC_VERSION(ver); + timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); + } #endif pin1 = find_isa_irq_pin(0, mp_INT); @@ -2928,8 +2930,17 @@ static inline void __init check_timer(void) if (no_pin1) { add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); + } else { + /* for edge trigger, setup_IO_APIC_irq already + * leave it unmasked. + * so only need to unmask if it is level-trigger + * do we really have level trigger timer? + */ + int idx; + idx = find_irq_entry(apic1, pin1, mp_INT); + if (idx != -1 && irq_trigger(idx)) + unmask_IO_APIC_irq_desc(desc); } - unmask_IO_APIC_irq_desc(desc); if (timer_irq_works()) { if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); @@ -2943,6 +2954,7 @@ static inline void __init check_timer(void) if (intr_remapping_enabled) panic("timer doesn't work through Interrupt-remapped IO-APIC"); #endif + local_irq_disable(); clear_IO_APIC_pin(apic1, pin1); if (!no_pin1) apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " @@ -2957,7 +2969,6 @@ static inline void __init check_timer(void) */ replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); - unmask_IO_APIC_irq_desc(desc); enable_8259A_irq(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); @@ -2972,6 +2983,7 @@ static inline void __init check_timer(void) /* * Cleanup, just in case ... */ + local_irq_disable(); disable_8259A_irq(0); clear_IO_APIC_pin(apic2, pin2); apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); @@ -2997,6 +3009,7 @@ static inline void __init check_timer(void) apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } + local_irq_disable(); disable_8259A_irq(0); apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); @@ -3014,6 +3027,7 @@ static inline void __init check_timer(void) apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } + local_irq_disable(); apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " "report. Then try booting with the 'noapic' option.\n"); -- cgit v1.2.3 From cc6c50066ec1ac98bef97117e2f078bb89bbccc7 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 8 Feb 2009 16:18:03 -0800 Subject: x86: find nr_irqs_gsi with mp_ioapic_routing Impact: find right nr_irqs_gsi on some systems. One test-system has gap between gsi's: [ 0.000000] ACPI: IOAPIC (id[0x04] address[0xfec00000] gsi_base[0]) [ 0.000000] IOAPIC[0]: apic_id 4, version 0, address 0xfec00000, GSI 0-23 [ 0.000000] ACPI: IOAPIC (id[0x05] address[0xfeafd000] gsi_base[48]) [ 0.000000] IOAPIC[1]: apic_id 5, version 0, address 0xfeafd000, GSI 48-54 [ 0.000000] ACPI: IOAPIC (id[0x06] address[0xfeafc000] gsi_base[56]) [ 0.000000] IOAPIC[2]: apic_id 6, version 0, address 0xfeafc000, GSI 56-62 ... [ 0.000000] nr_irqs_gsi: 38 So nr_irqs_gsi is not right. some irq for MSI will overwrite with io_apic. need to get that with acpi_probe_gsi when acpi io_apic is used Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec.h | 6 ++++++ arch/x86/kernel/acpi/boot.c | 23 +++++++++++++++++++++++ arch/x86/kernel/io_apic.c | 20 +++++++++++++++----- 3 files changed, 44 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index d22f732eab8..8c5620147c4 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -73,6 +73,7 @@ extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); extern void mp_config_acpi_legacy_irqs(void); extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); +extern int acpi_probe_gsi(void); #ifdef CONFIG_X86_IO_APIC extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, u32 gsi, int triggering, int polarity); @@ -84,6 +85,11 @@ mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, return 0; } #endif +#else /* !CONFIG_ACPI: */ +static inline int acpi_probe_gsi(void) +{ + return 0; +} #endif /* CONFIG_ACPI */ #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3efa996b036..c334fe75dcd 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -961,6 +961,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) nr_ioapics++; } +int __init acpi_probe_gsi(void) +{ + int idx; + int gsi; + int max_gsi = 0; + + if (acpi_disabled) + return 0; + + if (!acpi_ioapic) + return 0; + + max_gsi = 0; + for (idx = 0; idx < nr_ioapics; idx++) { + gsi = mp_ioapic_routing[idx].gsi_end; + + if (gsi > max_gsi) + max_gsi = gsi; + } + + return max_gsi + 1; +} + static void assign_to_mp_irq(struct mpc_intsrc *m, struct mpc_intsrc *mp_irq) { diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 855209a1b17..56e51eb551a 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3824,14 +3824,24 @@ int __init io_apic_get_redir_entries (int ioapic) void __init probe_nr_irqs_gsi(void) { - int idx; int nr = 0; - for (idx = 0; idx < nr_ioapics; idx++) - nr += io_apic_get_redir_entries(idx) + 1; - - if (nr > nr_irqs_gsi) + nr = acpi_probe_gsi(); + if (nr > nr_irqs_gsi) { nr_irqs_gsi = nr; + } else { + /* for acpi=off or acpi is not compiled in */ + int idx; + + nr = 0; + for (idx = 0; idx < nr_ioapics; idx++) + nr += io_apic_get_redir_entries(idx) + 1; + + if (nr > nr_irqs_gsi) + nr_irqs_gsi = nr; + } + + printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); } #ifdef CONFIG_SPARSE_IRQ -- cgit v1.2.3 From 2c344e9d6e1938fdf15e93c56d6fe42f8410e9d3 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 7 Feb 2009 12:23:37 -0800 Subject: x86: don't pretend that non-framepointer stack traces are reliable Without frame pointers enabled, the x86 stack traces should not pretend to be reliable; instead they should just be what they are: unreliable. The effect of this is that they have a '?' printed in the stacktrace, to warn the reader that these entries are guesses rather than known based on more reliable information. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar --- arch/x86/kernel/dumpstack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 6b1f6f6f866..87d103ded1c 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -99,7 +99,7 @@ print_context_stack(struct thread_info *tinfo, frame = frame->next_frame; bp = (unsigned long) frame; } else { - ops->address(data, addr, bp == 0); + ops->address(data, addr, 0); } print_ftrace_graph_addr(addr, data, ops, tinfo, graph); } -- cgit v1.2.3 From d3770449d3cb058b94ca1d050d5ced4a66c75ce4 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Sun, 8 Feb 2009 09:58:38 -0500 Subject: percpu: make PER_CPU_BASE_SECTION overridable by arches Impact: bug fix IA-64 needs to put percpu data in the seperate section even on UP. Fixes regression caused by "percpu: refactor percpu.h" Signed-off-by: Brian Gerst Acked-by: Tony Luck Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/percpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 77f30b664b4..30cf46534dd 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -27,12 +27,12 @@ extern void *per_cpu_init(void); #else /* ! SMP */ -#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu"))) - #define per_cpu_init() (__phys_per_cpu_start) #endif /* SMP */ +#define PER_CPU_BASE_SECTION ".data.percpu" + /* * Be extremely careful when taking the address of this variable! Due to virtual * remapping, it is different from the canonical address returned by __get_cpu_var(var)! -- cgit v1.2.3 From 2add8e235cbe0dcd672c33fc322754e15500238c Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Sun, 8 Feb 2009 09:58:39 -0500 Subject: x86: use linker to offset symbols by __per_cpu_load Impact: cleanup and bug fix Use the linker to create symbols for certain per-cpu variables that are offset by __per_cpu_load. This allows the removal of the runtime fixup of the GDT pointer, which fixes a bug with resume reported by Jiri Slaby. Reported-by: Jiri Slaby Signed-off-by: Brian Gerst Acked-by: Jiri Slaby Signed-off-by: Ingo Molnar --- arch/x86/include/asm/percpu.h | 22 ++++++++++++++++++++++ arch/x86/include/asm/processor.h | 2 ++ arch/x86/kernel/cpu/common.c | 6 +----- arch/x86/kernel/head_64.S | 21 ++------------------- arch/x86/kernel/vmlinux_64.lds.S | 8 ++++++++ 5 files changed, 35 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0b64af4f13a..aee103b26d0 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -34,6 +34,12 @@ #define PER_CPU_VAR(var) per_cpu__##var #endif /* SMP */ +#ifdef CONFIG_X86_64_SMP +#define INIT_PER_CPU_VAR(var) init_per_cpu__##var +#else +#define INIT_PER_CPU_VAR(var) per_cpu__##var +#endif + #else /* ...!ASSEMBLY */ #include @@ -45,6 +51,22 @@ #define __percpu_arg(x) "%" #x #endif +/* + * Initialized pointers to per-cpu variables needed for the boot + * processor need to use these macros to get the proper address + * offset from __per_cpu_load on SMP. + * + * There also must be an entry in vmlinux_64.lds.S + */ +#define DECLARE_INIT_PER_CPU(var) \ + extern typeof(per_cpu_var(var)) init_per_cpu_var(var) + +#ifdef CONFIG_X86_64_SMP +#define init_per_cpu_var(var) init_per_cpu__##var +#else +#define init_per_cpu_var(var) per_cpu_var(var) +#endif + /* For arch-specific code, we can use direct single-insn ops (they * don't give an lvalue though). */ extern void __bad_percpu_size(void); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 656d02ea509..373d3f628d2 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -393,6 +393,8 @@ union irq_stack_union { }; DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); +DECLARE_INIT_PER_CPU(irq_stack_union); + DECLARE_PER_CPU(char *, irq_stack_ptr); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0f73ea42308..41b0de6df87 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -902,12 +902,8 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; DEFINE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __aligned(PAGE_SIZE); -#ifdef CONFIG_SMP -DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */ -#else DEFINE_PER_CPU(char *, irq_stack_ptr) = - per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; -#endif + init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; DEFINE_PER_CPU(unsigned long, kernel_stack) = (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index a0a2b5ca9b7..2e648e3a5ea 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -205,19 +205,6 @@ ENTRY(secondary_startup_64) pushq $0 popfq -#ifdef CONFIG_SMP - /* - * Fix up static pointers that need __per_cpu_load added. The assembler - * is unable to do this directly. This is only needed for the boot cpu. - * These values are set up with the correct base addresses by C code for - * secondary cpus. - */ - movq initial_gs(%rip), %rax - cmpl $0, per_cpu__cpu_number(%rax) - jne 1f - addq %rax, early_gdt_descr_base(%rip) -1: -#endif /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace @@ -275,11 +262,7 @@ ENTRY(secondary_startup_64) ENTRY(initial_code) .quad x86_64_start_kernel ENTRY(initial_gs) -#ifdef CONFIG_SMP - .quad __per_cpu_load -#else - .quad PER_CPU_VAR(irq_stack_union) -#endif + .quad INIT_PER_CPU_VAR(irq_stack_union) __FINITDATA ENTRY(stack_start) @@ -425,7 +408,7 @@ NEXT_PAGE(level2_spare_pgt) early_gdt_descr: .word GDT_ENTRIES*8-1 early_gdt_descr_base: - .quad per_cpu__gdt_page + .quad INIT_PER_CPU_VAR(gdt_page) ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 07f62d287ff..087a7f2c639 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -257,6 +257,14 @@ SECTIONS DWARF_DEBUG } + /* + * Per-cpu symbols which need to be offset from __per_cpu_load + * for the boot processor. + */ +#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load +INIT_PER_CPU(gdt_page); +INIT_PER_CPU(irq_stack_union); + /* * Build-time check on the image size: */ -- cgit v1.2.3 From 44581a28e805a31661469c4b466b9cd14b36e7b6 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Sun, 8 Feb 2009 09:58:40 -0500 Subject: x86: fix abuse of per_cpu_offset Impact: bug fix Don't use per_cpu_offset() to determine if it valid to access a per-cpu variable for a given cpu number. It is not a valid assumption on x86-64 anymore. Use cpu_possible() instead. Signed-off-by: Brian Gerst Signed-off-by: Ingo Molnar --- arch/x86/mm/numa_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 08d140fbc31..deb1c1ab786 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -702,7 +702,7 @@ void __cpuinit numa_set_node(int cpu, int node) } #ifdef CONFIG_DEBUG_PER_CPU_MAPS - if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { + if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); dump_stack(); return; @@ -790,7 +790,7 @@ int early_cpu_to_node(int cpu) if (early_per_cpu_ptr(x86_cpu_to_node_map)) return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; - if (!per_cpu_offset(cpu)) { + if (!cpu_possible(cpu)) { printk(KERN_WARNING "early_cpu_to_node(%d): no per_cpu area!\n", cpu); dump_stack(); -- cgit v1.2.3 From 726c0d95b6bd06cb83efd36a76ccf03fa9a770f0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Feb 2009 11:32:17 +0100 Subject: x86: early_printk.c - fix pgtable.h unification fallout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit arch/x86/kernel/early_printk.c: In function ‘early_dbgp_init’: arch/x86/kernel/early_printk.c:827: error: ‘PAGE_KERNEL_NOCACHE’ undeclared (first use in this function) arch/x86/kernel/early_printk.c:827: error: (Each undeclared identifier is reported only once arch/x86/kernel/early_printk.c:827: error: for each function it appears in.) Signed-off-by: Ingo Molnar --- arch/x86/kernel/early_printk.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 6a36dd228b6..639ad98238a 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -14,6 +14,7 @@ #include #include #include +#include #include /* Simple VGA output */ -- cgit v1.2.3 From e5f7f202f31fd05e9de7e1ba5a7b30de7855f5aa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Feb 2009 11:42:57 +0100 Subject: x86, pgtable.h: macro-ify *_page() methods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The p?d_page() methods still rely on highlevel types and methods: In file included from arch/x86/kernel/early_printk.c:18: /home/mingo/tip/arch/x86/include/asm/pgtable.h: In function ‘pmd_page’: /home/mingo/tip/arch/x86/include/asm/pgtable.h:516: error: implicit declaration of function ‘__pfn_to_section’ /home/mingo/tip/arch/x86/include/asm/pgtable.h:516: error: initialization makes pointer from integer without a cast /home/mingo/tip/arch/x86/include/asm/pgtable.h:516: error: implicit declaration of function ‘__section_mem_map_addr’ /home/mingo/tip/arch/x86/include/asm/pgtable.h:516: error: return makes pointer from integer without a cast So convert them to macros and document the type dependency. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable.h | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a80a956ae65..76696e98f5b 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -473,10 +473,11 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); } -static inline struct page *pmd_page(pmd_t pmd) -{ - return pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT); -} +/* + * Currently stuck as a macro due to indirect forward reference to + * linux/mmzone.h's __section_mem_map_addr() definition: + */ +#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) /* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] @@ -543,10 +544,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud) return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); } -static inline struct page *pud_page(pud_t pud) -{ - return pfn_to_page(pud_val(pud) >> PAGE_SHIFT); -} +/* + * Currently stuck as a macro due to indirect forward reference to + * linux/mmzone.h's __section_mem_map_addr() definition: + */ +#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) /* Find an entry in the second-level page table.. */ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) @@ -582,10 +584,11 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); } -static inline struct page *pgd_page(pgd_t pgd) -{ - return pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT); -} +/* + * Currently stuck as a macro due to indirect forward reference to + * linux/mmzone.h's __section_mem_map_addr() definition: + */ +#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) /* to find an entry in a page-table-directory. */ static inline unsigned pud_index(unsigned long address) -- cgit v1.2.3 From c47c1b1f3a9d6973108020df1dcab7604f7774dd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Feb 2009 11:57:45 +0100 Subject: x86, pgtable.h: fix 2-level 32-bit build - pmd_flags() needs to be available on 2-levels too - provide pud_large() wrapper as well - include page.h - it provides basic types relied on by pgtable.h Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page.h | 9 +++++---- arch/x86/include/asm/pgtable.h | 9 +++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 0b16b64a8fe..823cc931363 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -139,10 +139,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) return pmd.pmd; } -static inline pmdval_t pmd_flags(pmd_t pmd) -{ - return native_pmd_val(pmd) & PTE_FLAGS_MASK; -} #else /* PAGETABLE_LEVELS == 2 */ #include @@ -152,6 +148,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) } #endif /* PAGETABLE_LEVELS >= 3 */ +static inline pmdval_t pmd_flags(pmd_t pmd) +{ + return native_pmd_val(pmd) & PTE_FLAGS_MASK; +} + static inline pte_t native_make_pte(pteval_t val) { return (pte_t) { .pte = val }; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 76696e98f5b..178205305ac 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_PGTABLE_H #define _ASM_X86_PGTABLE_H +#include + #define FIRST_USER_ADDRESS 0 #define _PAGE_BIT_PRESENT 0 /* is present */ @@ -528,6 +530,13 @@ static inline unsigned long pages_to_mb(unsigned long npg) #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) +#if PAGETABLE_LEVELS == 2 +static inline int pud_large(pud_t pud) +{ + return 0; +} +#endif + #if PAGETABLE_LEVELS > 2 static inline int pud_none(pud_t pud) { -- cgit v1.2.3 From 9b2b76a3344146c4d8d300874e73af8161204f87 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 14:09:40 -0800 Subject: x86: add handle_irq() to allow interrupt injection Xen uses a different interrupt path, so introduce handle_irq() to allow interrupts to be inserted into the normal interrupt path. This is handled slightly differently on 32 and 64-bit. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq.h | 1 + arch/x86/kernel/irq_32.c | 34 +++++++++++++++++++++------------- arch/x86/kernel/irq_64.c | 23 ++++++++++++++++------- 3 files changed, 38 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 592688ed04d..d0f6f7d1771 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -39,6 +39,7 @@ extern void fixup_irqs(void); extern unsigned int do_IRQ(struct pt_regs *regs); extern void init_IRQ(void); extern void native_init_IRQ(void); +extern bool handle_irq(unsigned irq, struct pt_regs *regs); /* Interrupt vector management */ extern DECLARE_BITMAP(used_vectors, NR_VECTORS); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index d802c844991..61f09fb969e 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -191,6 +191,26 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } #endif +bool handle_irq(unsigned irq, struct pt_regs *regs) +{ + struct irq_desc *desc; + int overflow; + + overflow = check_stack_overflow(); + + desc = irq_to_desc(irq); + if (unlikely(!desc)) + return false; + + if (!execute_on_irq_stack(overflow, desc, irq)) { + if (unlikely(overflow)) + print_stack_overflow(); + desc->handle_irq(irq, desc); + } + + return true; +} + /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific @@ -200,31 +220,19 @@ unsigned int do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs; /* high bit used in ret_from_ code */ - int overflow; unsigned vector = ~regs->orig_ax; - struct irq_desc *desc; unsigned irq; - old_regs = set_irq_regs(regs); irq_enter(); irq = __get_cpu_var(vector_irq)[vector]; - overflow = check_stack_overflow(); - - desc = irq_to_desc(irq); - if (unlikely(!desc)) { + if (!handle_irq(irq, regs)) { printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n", __func__, irq, vector, smp_processor_id()); BUG(); } - if (!execute_on_irq_stack(overflow, desc, irq)) { - if (unlikely(overflow)) - print_stack_overflow(); - desc->handle_irq(irq, desc); - } - irq_exit(); set_irq_regs(old_regs); return 1; diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 018963aa6ee..a93f3b0dc7f 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -48,6 +48,20 @@ static inline void stack_overflow_check(struct pt_regs *regs) #endif } +bool handle_irq(unsigned irq, struct pt_regs *regs) +{ + struct irq_desc *desc; + + stack_overflow_check(regs); + + desc = irq_to_desc(irq); + if (unlikely(!desc)) + return false; + + generic_handle_irq_desc(irq, desc); + return true; +} + /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific @@ -56,7 +70,6 @@ static inline void stack_overflow_check(struct pt_regs *regs) asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - struct irq_desc *desc; /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; @@ -64,14 +77,10 @@ asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs) exit_idle(); irq_enter(); - irq = __get_cpu_var(vector_irq)[vector]; - stack_overflow_check(regs); + irq = __get_cpu_var(vector_irq)[vector]; - desc = irq_to_desc(irq); - if (likely(desc)) - generic_handle_irq_desc(irq, desc); - else { + if (!handle_irq(irq, regs)) { if (!disable_apic) ack_APIC_irq(); -- cgit v1.2.3 From 7c1d7cdcef1b54f4a78892b6b99d19f12c4f398e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 14:09:41 -0800 Subject: x86: unify do_IRQ() With the differences in interrupt handling hoisted into handle_irq(), do_IRQ is more or less identical between 32 and 64 bit, so unify it. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq.h | 3 ++- arch/x86/kernel/irq.c | 38 ++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/irq_32.c | 27 --------------------------- arch/x86/kernel/irq_64.c | 33 --------------------------------- 4 files changed, 40 insertions(+), 61 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index d0f6f7d1771..107eb219669 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -36,11 +36,12 @@ static inline int irq_canonicalize(int irq) extern void fixup_irqs(void); #endif -extern unsigned int do_IRQ(struct pt_regs *regs); extern void init_IRQ(void); extern void native_init_IRQ(void); extern bool handle_irq(unsigned irq, struct pt_regs *regs); +extern unsigned int do_IRQ(struct pt_regs *regs); + /* Interrupt vector management */ extern DECLARE_BITMAP(used_vectors, NR_VECTORS); extern int vector_used_by_percpu_irq(unsigned int vector); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 8b30d0c2512..f13ca1650aa 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -6,10 +6,12 @@ #include #include #include +#include #include #include #include +#include atomic_t irq_err_count; @@ -188,4 +190,40 @@ u64 arch_irq_stat(void) return sum; } + +/* + * do_IRQ handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ +unsigned int __irq_entry do_IRQ(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + /* high bit used in ret_from_ code */ + unsigned vector = ~regs->orig_ax; + unsigned irq; + + exit_idle(); + irq_enter(); + + irq = __get_cpu_var(vector_irq)[vector]; + + if (!handle_irq(irq, regs)) { +#ifdef CONFIG_X86_64 + if (!disable_apic) + ack_APIC_irq(); +#endif + + if (printk_ratelimit()) + printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n", + __func__, smp_processor_id(), vector, irq); + } + + irq_exit(); + + set_irq_regs(old_regs); + return 1; +} + EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 61f09fb969e..4beb9a13873 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -211,33 +211,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) return true; } -/* - * do_IRQ handles all normal device IRQ's (the special - * SMP cross-CPU interrupts have their own specific - * handlers). - */ -unsigned int do_IRQ(struct pt_regs *regs) -{ - struct pt_regs *old_regs; - /* high bit used in ret_from_ code */ - unsigned vector = ~regs->orig_ax; - unsigned irq; - - old_regs = set_irq_regs(regs); - irq_enter(); - irq = __get_cpu_var(vector_irq)[vector]; - - if (!handle_irq(irq, regs)) { - printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n", - __func__, irq, vector, smp_processor_id()); - BUG(); - } - - irq_exit(); - set_irq_regs(old_regs); - return 1; -} - #ifdef CONFIG_HOTPLUG_CPU #include diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index a93f3b0dc7f..977d8b43a0d 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -62,39 +62,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) return true; } -/* - * do_IRQ handles all normal device IRQ's (the special - * SMP cross-CPU interrupts have their own specific - * handlers). - */ -asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - - /* high bit used in ret_from_ code */ - unsigned vector = ~regs->orig_ax; - unsigned irq; - - exit_idle(); - irq_enter(); - - irq = __get_cpu_var(vector_irq)[vector]; - - if (!handle_irq(irq, regs)) { - if (!disable_apic) - ack_APIC_irq(); - - if (printk_ratelimit()) - printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", - __func__, smp_processor_id(), vector); - } - - irq_exit(); - - set_irq_regs(old_regs); - return 1; -} - #ifdef CONFIG_HOTPLUG_CPU /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) -- cgit v1.2.3 From 792dc4f6cdacf50d3f2b93756d282fc04ee34bd5 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 14:09:43 -0800 Subject: xen: use our own eventchannel->irq path Rather than overloading vectors for event channels, take full responsibility for mapping an event channel to irq directly. With this patch Xen has its own irq allocator. When the kernel gets an event channel upcall, it maps the event channel number to an irq and injects it into the normal interrupt path. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/xen/events.h | 6 ------ arch/x86/xen/irq.c | 17 +---------------- 2 files changed, 1 insertion(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h index 19144184983..1df35417c41 100644 --- a/arch/x86/include/asm/xen/events.h +++ b/arch/x86/include/asm/xen/events.h @@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) return raw_irqs_disabled_flags(regs->flags); } -static inline void xen_do_IRQ(int irq, struct pt_regs *regs) -{ - regs->orig_ax = ~irq; - do_IRQ(regs); -} - #endif /* _ASM_X86_XEN_EVENTS_H */ diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 5a070900ad3..cfd17799bd6 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -19,21 +19,6 @@ void xen_force_evtchn_callback(void) (void)HYPERVISOR_xen_version(0, NULL); } -static void __init __xen_init_IRQ(void) -{ - int i; - - /* Create identity vector->irq map */ - for(i = 0; i < NR_VECTORS; i++) { - int cpu; - - for_each_possible_cpu(cpu) - per_cpu(vector_irq, cpu)[i] = i; - } - - xen_init_IRQ(); -} - static unsigned long xen_save_fl(void) { struct vcpu_info *vcpu; @@ -127,7 +112,7 @@ static void xen_halt(void) } static const struct pv_irq_ops xen_irq_ops __initdata = { - .init_IRQ = __xen_init_IRQ, + .init_IRQ = xen_init_IRQ, .save_fl = PV_CALLEE_SAVE(xen_save_fl), .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), -- cgit v1.2.3 From 1c14fa4937eb73509e07ac12bf8db1fdf4c42a59 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sat, 7 Feb 2009 15:39:38 -0800 Subject: x86: use early_ioremap in __acpi_map_table __acpi_map_table() effectively reimplements early_ioremap(). Rather than have that duplication, just implement it in terms of early_ioremap(). However, unlike early_ioremap(), __acpi_map_table() just maintains a single mapping which gets replaced each call, and has no corresponding unmap function. Implement this by just removing the previous mapping each time its called. Unfortunately, this will leave a stray mapping at the end. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/acpi.h | 3 --- arch/x86/include/asm/fixmap_32.h | 4 ---- arch/x86/include/asm/fixmap_64.h | 4 ---- arch/x86/kernel/acpi/boot.c | 27 +++++++-------------------- 4 files changed, 7 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 9830681446a..4518dc50090 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -102,9 +102,6 @@ static inline void disable_acpi(void) acpi_noirq = 1; } -/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ -#define FIX_ACPI_PAGES 4 - extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); static inline void acpi_noirq_set(void) { acpi_noirq = 1; } diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h index c7115c1d721..047d9bab2b3 100644 --- a/arch/x86/include/asm/fixmap_32.h +++ b/arch/x86/include/asm/fixmap_32.h @@ -95,10 +95,6 @@ enum fixed_addresses { (__end_of_permanent_fixed_addresses & 255), FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, FIX_WP_TEST, -#ifdef CONFIG_ACPI - FIX_ACPI_BEGIN, - FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, -#endif #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT FIX_OHCI1394_BASE, #endif diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h index 00a30ab9b1a..298d9ba3fae 100644 --- a/arch/x86/include/asm/fixmap_64.h +++ b/arch/x86/include/asm/fixmap_64.h @@ -50,10 +50,6 @@ enum fixed_addresses { FIX_PARAVIRT_BOOTMAP, #endif __end_of_permanent_fixed_addresses, -#ifdef CONFIG_ACPI - FIX_ACPI_BEGIN, - FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, -#endif #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT FIX_OHCI1394_BASE, #endif diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index d37593c2f43..c518599e426 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -121,8 +121,8 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; */ char *__init __acpi_map_table(unsigned long phys, unsigned long size) { - unsigned long base, offset, mapped_size; - int idx; + static char *prev_map; + static unsigned long prev_size; if (!phys || !size) return NULL; @@ -130,26 +130,13 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT)) return __va(phys); - offset = phys & (PAGE_SIZE - 1); - mapped_size = PAGE_SIZE - offset; - clear_fixmap(FIX_ACPI_END); - set_fixmap(FIX_ACPI_END, phys); - base = fix_to_virt(FIX_ACPI_END); + if (prev_map) + early_iounmap(prev_map, prev_size); - /* - * Most cases can be covered by the below. - */ - idx = FIX_ACPI_END; - while (mapped_size < size) { - if (--idx < FIX_ACPI_BEGIN) - return NULL; /* cannot handle this */ - phys += PAGE_SIZE; - clear_fixmap(idx); - set_fixmap(idx, phys); - mapped_size += PAGE_SIZE; - } + prev_size = size; + prev_map = early_ioremap(phys, size); - return ((unsigned char *)base + offset); + return prev_map; } #ifdef CONFIG_PCI_MMCONFIG -- cgit v1.2.3 From eecb9a697f0b790e5840dae8a8b866bea49a86ee Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sat, 7 Feb 2009 15:39:38 -0800 Subject: x86: always explicitly map acpi memory Always map acpi tables, rather than assuming we can use the normal linear mapping to access the acpi tables. This is necessary in a virtual environment where the linear mappings are to pseudo-physical memory, but the acpi tables exist at a real physical address. It doesn't hurt to map in the normal non-virtual case, so just do it unconditionally. Signed-off-by: Jeremy Fitzhardinge Acked-by: Len Brown Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c518599e426..5424a18f2e4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -127,9 +127,6 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) if (!phys || !size) return NULL; - if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT)) - return __va(phys); - if (prev_map) early_iounmap(prev_map, prev_size); -- cgit v1.2.3 From 05876f88ed9a66b26af613e222795ae790616252 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sat, 7 Feb 2009 15:39:40 -0800 Subject: acpi: remove final __acpi_map_table mapping before setting acpi_gbl_permanent_mmap On x86, __acpi_map_table uses early_ioremap() to create the mapping, replacing the previous mapping with a new one. Once enough of the kernel is up an running it switches to using normal ioremap(). At that point, we need to clean up the final mapping to avoid a warning from the early_ioremap subsystem. This can be removed after all the instances in the ACPI code are fixed that rely on early-ioremap's implicit overmapping of previously mapped tables. Signed-off-by: Jeremy Fitzhardinge Acked-by: Len Brown Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 5424a18f2e4..7217834f6b1 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -124,12 +124,14 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) static char *prev_map; static unsigned long prev_size; + if (prev_map) { + early_iounmap(prev_map, prev_size); + prev_map = NULL; + } + if (!phys || !size) return NULL; - if (prev_map) - early_iounmap(prev_map, prev_size); - prev_size = size; prev_map = early_ioremap(phys, size); -- cgit v1.2.3 From 7d97277b754d3ee098a5ec69b6aaafb00c94e2f2 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 7 Feb 2009 15:39:41 -0800 Subject: acpi/x86: introduce __apci_map_table, v4 to prevent wrongly overwriting fixmap that still want to use. ACPI used to rely on low mappings being all linearly mapped and grew a habit: it never really unmapped certain kinds of tables after use. This can cause problems - for example the hypothetical case when some spurious access still references it. v2: remove prev_map and prev_size in __apci_map_table v3: let acpi_os_unmap_memory() call early_iounmap too, so remove extral calling to early_acpi_os_unmap_memory v4: fix typo in one acpi_get_table_with_size calling Signed-off-by: Yinghai Lu Acked-by: Len Brown Signed-off-by: Ingo Molnar --- arch/ia64/kernel/acpi.c | 4 ++++ arch/x86/kernel/acpi/boot.c | 17 +++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index d541671caf4..2363ed17319 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) return __va(phys_addr); } +char *__init __acpi_unmap_table(unsigned long virt_addr, unsigned long size) +{ +} + /* -------------------------------------------------------------------------- Boot-time Table Parsing -------------------------------------------------------------------------- */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 7217834f6b1..4c2aaea4293 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -121,21 +121,18 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; */ char *__init __acpi_map_table(unsigned long phys, unsigned long size) { - static char *prev_map; - static unsigned long prev_size; - - if (prev_map) { - early_iounmap(prev_map, prev_size); - prev_map = NULL; - } if (!phys || !size) return NULL; - prev_size = size; - prev_map = early_ioremap(phys, size); + return early_ioremap(phys, size); +} +void __init __acpi_unmap_table(char *map, unsigned long size) +{ + if (!map || !size) + return; - return prev_map; + early_iounmap(map, size); } #ifdef CONFIG_PCI_MMCONFIG -- cgit v1.2.3 From b825e6cc7b1401862531df497a4a4daff8102ed5 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 7 Feb 2009 15:39:41 -0800 Subject: x86, es7000: fix ACPI table mappings Signed-off-by: Ingo Molnar --- arch/x86/kernel/es7000_32.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 53699c931ad..71d7be624d4 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -292,24 +292,31 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; int i = 0; + acpi_size tbl_size; - while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) { + while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { struct oem_table *t = (struct oem_table *)header; oem_addrX = t->OEMTableAddr; oem_size = t->OEMTableSize; + early_acpi_os_unmap_memory(header, tbl_size); *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size); return 0; } + early_acpi_os_unmap_memory(header, tbl_size); } return -1; } void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) { + if (!oem_addr) + return; + + __acpi_unmap_table((char *)oem_addr, oem_size); } #endif -- cgit v1.2.3 From 3c552ac8a747d6c26d13302c54d71dae9f56f4ac Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 9 Feb 2009 12:05:47 -0800 Subject: x86: make apic_* operations inline functions Mainly to get proper type-checking and consistency. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/apic.h | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index b03711d7990..f4835a1be36 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -139,12 +139,35 @@ struct apic_ops { extern struct apic_ops *apic_ops; -#define apic_read (apic_ops->read) -#define apic_write (apic_ops->write) -#define apic_icr_read (apic_ops->icr_read) -#define apic_icr_write (apic_ops->icr_write) -#define apic_wait_icr_idle (apic_ops->wait_icr_idle) -#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle) +static inline u32 apic_read(u32 reg) +{ + return apic_ops->read(reg); +} + +static inline void apic_write(u32 reg, u32 val) +{ + apic_ops->write(reg, val); +} + +static inline u64 apic_icr_read(void) +{ + return apic_ops->icr_read(); +} + +static inline void apic_icr_write(u32 low, u32 high) +{ + apic_ops->icr_write(low, high); +} + +static inline void apic_wait_icr_idle(void) +{ + apic_ops->wait_icr_idle(); +} + +static inline u32 safe_apic_wait_icr_idle(void) +{ + return apic_ops->safe_wait_icr_idle(); +} extern int get_physical_broadcast(void); -- cgit v1.2.3 From 4924e228ae039029a9503ad571d91086e4042c90 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 9 Feb 2009 12:05:47 -0800 Subject: x86: unstatic mp_find_ioapic so it can be used elsewhere Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/mpspec.h | 1 + arch/x86/kernel/acpi/boot.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 8c5620147c4..b59371a312f 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -77,6 +77,7 @@ extern int acpi_probe_gsi(void); #ifdef CONFIG_X86_IO_APIC extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, u32 gsi, int triggering, int polarity); +extern int mp_find_ioapic(int gsi); #else static inline int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c334fe75dcd..068b900f4b0 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -872,7 +872,7 @@ static struct { DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); } mp_ioapic_routing[MAX_IO_APICS]; -static int mp_find_ioapic(int gsi) +int mp_find_ioapic(int gsi) { int i = 0; -- cgit v1.2.3 From c3e137d1e882c4fab9adcce7ae2be9bf3eb64c4c Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 9 Feb 2009 12:05:47 -0800 Subject: x86: add mp_find_ioapic_pin Add mp_find_ioapic_pin() to find an IO APIC's specific pin from a GSI, and use this function within acpi/boot. Make it non-static so other code can use it too. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/mpspec.h | 1 + arch/x86/kernel/acpi/boot.c | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index b59371a312f..5916c8df09d 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -78,6 +78,7 @@ extern int acpi_probe_gsi(void); extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, u32 gsi, int triggering, int polarity); extern int mp_find_ioapic(int gsi); +extern int mp_find_ioapic_pin(int ioapic, int gsi); #else static inline int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 068b900f4b0..bba162c81d5 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -887,6 +887,16 @@ int mp_find_ioapic(int gsi) return -1; } +int mp_find_ioapic_pin(int ioapic, int gsi) +{ + if (WARN_ON(ioapic == -1)) + return -1; + if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end)) + return -1; + + return gsi - mp_ioapic_routing[ioapic].gsi_base; +} + static u8 __init uniq_ioapic_id(u8 id) { #ifdef CONFIG_X86_32 @@ -1022,7 +1032,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return; - pin = gsi - mp_ioapic_routing[ioapic].gsi_base; + pin = mp_find_ioapic_pin(ioapic, gsi); /* * TBD: This check is for faulty timer entries, where the override @@ -1142,7 +1152,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) return gsi; } - ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; + ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); #ifdef CONFIG_X86_32 if (ioapic_renumber_irq) @@ -1231,7 +1241,7 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); ioapic = mp_find_ioapic(gsi); mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id; - mp_irq.dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base; + mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); save_mp_irq(&mp_irq); #endif -- cgit v1.2.3 From ca97ab90164c7b978abf9d82dc82d6dc2cbac4a0 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 9 Feb 2009 12:05:47 -0800 Subject: x86: unstatic ioapic entry funcs Unstatic ioapic_write_entry and setup_ioapic_entry functions so that the Xen code can do its own ioapic routing setup. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/io_apic.h | 6 ++++++ arch/x86/kernel/io_apic.c | 10 +++++----- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 309d0e23193..59cb4a1317b 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -169,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int); extern void probe_nr_irqs_gsi(void); +extern int setup_ioapic_entry(int apic, int irq, + struct IO_APIC_route_entry *entry, + unsigned int destination, int trigger, + int polarity, int vector); +extern void ioapic_write_entry(int apic, int pin, + struct IO_APIC_route_entry e); #else /* !CONFIG_X86_IO_APIC */ #define io_apic_assign_pci_irqs 0 static const int timer_through_8259 = 0; diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 56e51eb551a..7248ca11bdc 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -486,7 +486,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) io_apic_write(apic, 0x10 + 2*pin, eu.w1); } -static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) +void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); @@ -1478,10 +1478,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t handle_edge_irq, "edge"); } -static int setup_ioapic_entry(int apic_id, int irq, - struct IO_APIC_route_entry *entry, - unsigned int destination, int trigger, - int polarity, int vector) +int setup_ioapic_entry(int apic_id, int irq, + struct IO_APIC_route_entry *entry, + unsigned int destination, int trigger, + int polarity, int vector) { /* * add it to the IO-APIC irq-routing table: -- cgit v1.2.3 From bf56957d176c279175464f385f3eb03d65819328 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Mon, 9 Feb 2009 12:05:48 -0800 Subject: xen: expose enable_IO_APIC for 32-bit enable_IO_APIC() is defined for both 32- and 64-bit x86, so it should be declared for both. Signed-off-by: Ian Campbell Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/hw_irq.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 1b82781b898..370e1c83bb4 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -65,9 +65,7 @@ extern void disable_IO_APIC(void); extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); extern void setup_ioapic_dest(void); -#ifdef CONFIG_X86_64 extern void enable_IO_APIC(void); -#endif /* Statistics */ extern atomic_t irq_err_count; -- cgit v1.2.3 From 76397f72fb9f4c9a96dfe05462887811c81b0e17 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 9 Feb 2009 22:17:39 +0900 Subject: x86: stackprotector.h misc update Impact: misc udpate * wrap content with CONFIG_CC_STACK_PROTECTOR so that other arch files can include it directly * add missing includes This will help future changes. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/stackprotector.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 36a700acaf2..ee275e9f48a 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -1,8 +1,12 @@ #ifndef _ASM_STACKPROTECTOR_H #define _ASM_STACKPROTECTOR_H 1 +#ifdef CONFIG_CC_STACKPROTECTOR + #include #include +#include +#include /* * Initialize the stackprotector canary value. @@ -35,4 +39,5 @@ static __always_inline void boot_init_stack_canary(void) percpu_write(irq_stack_union.stack_canary, canary); } -#endif +#endif /* CC_STACKPROTECTOR */ +#endif /* _ASM_STACKPROTECTOR_H */ -- cgit v1.2.3 From 5d707e9c8ef2a3596ed5c975c6ff05cec890c2b4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 9 Feb 2009 22:17:39 +0900 Subject: stackprotector: update make rules Impact: no default -fno-stack-protector if stackp is enabled, cleanup Stackprotector make rules had the following problems. * cc support test and warning are scattered across makefile and kernel/panic.c. * -fno-stack-protector was always added regardless of configuration. Update such that cc support test and warning are contained in makefile and -fno-stack-protector is added iff stackp is turned off. While at it, prepare for 32bit support. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/Makefile | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index cacee981d16..ab48ab497e5 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -70,14 +70,17 @@ else # this works around some issues with generating unwind tables in older gccs # newer gccs do it by default KBUILD_CFLAGS += -maccumulate-outgoing-args +endif - stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh - stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \ - "$(CC)" "-fstack-protector -DGCC_HAS_SP" ) - stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \ - "$(CC)" -fstack-protector-all ) - - KBUILD_CFLAGS += $(stackp-y) +ifdef CONFIG_CC_STACKPROTECTOR + cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh + ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y) + stackp-y := -fstack-protector + stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all + KBUILD_CFLAGS += $(stackp-y) + else + $(warning stack protector enabled but no compiler support) + endif endif # Stackpointer is addressed different for 32 bit and 64 bit x86 -- cgit v1.2.3 From d627ded5ab2f7818154d57369e3a8cdb40db2569 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 9 Feb 2009 22:17:40 +0900 Subject: x86: no stack protector for vdso Impact: avoid crash on vsyscall Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/vdso/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 4d6ef0a336d..16a9020c8f1 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -38,7 +38,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ - $(filter -g%,$(KBUILD_CFLAGS)) + $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) $(vobjs): KBUILD_CFLAGS += $(CFL) -- cgit v1.2.3 From f0d96110f9fd98a1a22e03b8adba69508843d910 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 9 Feb 2009 22:17:40 +0900 Subject: x86: use asm .macro instead of cpp #define in entry_32.S Impact: cleanup Use .macro instead of cpp #define where approriate. This cleans up code and will ease future changes. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_32.S | 293 +++++++++++++++++++++++---------------------- 1 file changed, 151 insertions(+), 142 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index a0b91aac72a..c461925d3b6 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -101,121 +101,127 @@ #define resume_userspace_sig resume_userspace #endif -#define SAVE_ALL \ - cld; \ - pushl %fs; \ - CFI_ADJUST_CFA_OFFSET 4;\ - /*CFI_REL_OFFSET fs, 0;*/\ - pushl %es; \ - CFI_ADJUST_CFA_OFFSET 4;\ - /*CFI_REL_OFFSET es, 0;*/\ - pushl %ds; \ - CFI_ADJUST_CFA_OFFSET 4;\ - /*CFI_REL_OFFSET ds, 0;*/\ - pushl %eax; \ - CFI_ADJUST_CFA_OFFSET 4;\ - CFI_REL_OFFSET eax, 0;\ - pushl %ebp; \ - CFI_ADJUST_CFA_OFFSET 4;\ - CFI_REL_OFFSET ebp, 0;\ - pushl %edi; \ - CFI_ADJUST_CFA_OFFSET 4;\ - CFI_REL_OFFSET edi, 0;\ - pushl %esi; \ - CFI_ADJUST_CFA_OFFSET 4;\ - CFI_REL_OFFSET esi, 0;\ - pushl %edx; \ - CFI_ADJUST_CFA_OFFSET 4;\ - CFI_REL_OFFSET edx, 0;\ - pushl %ecx; \ - CFI_ADJUST_CFA_OFFSET 4;\ - CFI_REL_OFFSET ecx, 0;\ - pushl %ebx; \ - CFI_ADJUST_CFA_OFFSET 4;\ - CFI_REL_OFFSET ebx, 0;\ - movl $(__USER_DS), %edx; \ - movl %edx, %ds; \ - movl %edx, %es; \ - movl $(__KERNEL_PERCPU), %edx; \ +.macro SAVE_ALL + cld + pushl %fs + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET fs, 0;*/ + pushl %es + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET es, 0;*/ + pushl %ds + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET ds, 0;*/ + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET eax, 0 + pushl %ebp + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebp, 0 + pushl %edi + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET edi, 0 + pushl %esi + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET esi, 0 + pushl %edx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET edx, 0 + pushl %ecx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ecx, 0 + pushl %ebx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebx, 0 + movl $(__USER_DS), %edx + movl %edx, %ds + movl %edx, %es + movl $(__KERNEL_PERCPU), %edx movl %edx, %fs +.endm -#define RESTORE_INT_REGS \ - popl %ebx; \ - CFI_ADJUST_CFA_OFFSET -4;\ - CFI_RESTORE ebx;\ - popl %ecx; \ - CFI_ADJUST_CFA_OFFSET -4;\ - CFI_RESTORE ecx;\ - popl %edx; \ - CFI_ADJUST_CFA_OFFSET -4;\ - CFI_RESTORE edx;\ - popl %esi; \ - CFI_ADJUST_CFA_OFFSET -4;\ - CFI_RESTORE esi;\ - popl %edi; \ - CFI_ADJUST_CFA_OFFSET -4;\ - CFI_RESTORE edi;\ - popl %ebp; \ - CFI_ADJUST_CFA_OFFSET -4;\ - CFI_RESTORE ebp;\ - popl %eax; \ - CFI_ADJUST_CFA_OFFSET -4;\ +.macro RESTORE_INT_REGS + popl %ebx + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE ebx + popl %ecx + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE ecx + popl %edx + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE edx + popl %esi + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE esi + popl %edi + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE edi + popl %ebp + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE ebp + popl %eax + CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE eax +.endm -#define RESTORE_REGS \ - RESTORE_INT_REGS; \ -1: popl %ds; \ - CFI_ADJUST_CFA_OFFSET -4;\ - /*CFI_RESTORE ds;*/\ -2: popl %es; \ - CFI_ADJUST_CFA_OFFSET -4;\ - /*CFI_RESTORE es;*/\ -3: popl %fs; \ - CFI_ADJUST_CFA_OFFSET -4;\ - /*CFI_RESTORE fs;*/\ -.pushsection .fixup,"ax"; \ -4: movl $0,(%esp); \ - jmp 1b; \ -5: movl $0,(%esp); \ - jmp 2b; \ -6: movl $0,(%esp); \ - jmp 3b; \ -.section __ex_table,"a";\ - .align 4; \ - .long 1b,4b; \ - .long 2b,5b; \ - .long 3b,6b; \ +.macro RESTORE_REGS + RESTORE_INT_REGS +1: popl %ds + CFI_ADJUST_CFA_OFFSET -4 + /*CFI_RESTORE ds;*/ +2: popl %es + CFI_ADJUST_CFA_OFFSET -4 + /*CFI_RESTORE es;*/ +3: popl %fs + CFI_ADJUST_CFA_OFFSET -4 + /*CFI_RESTORE fs;*/ +.pushsection .fixup, "ax" +4: movl $0, (%esp) + jmp 1b +5: movl $0, (%esp) + jmp 2b +6: movl $0, (%esp) + jmp 3b +.section __ex_table, "a" + .align 4 + .long 1b, 4b + .long 2b, 5b + .long 3b, 6b .popsection +.endm -#define RING0_INT_FRAME \ - CFI_STARTPROC simple;\ - CFI_SIGNAL_FRAME;\ - CFI_DEF_CFA esp, 3*4;\ - /*CFI_OFFSET cs, -2*4;*/\ +.macro RING0_INT_FRAME + CFI_STARTPROC simple + CFI_SIGNAL_FRAME + CFI_DEF_CFA esp, 3*4 + /*CFI_OFFSET cs, -2*4;*/ CFI_OFFSET eip, -3*4 +.endm -#define RING0_EC_FRAME \ - CFI_STARTPROC simple;\ - CFI_SIGNAL_FRAME;\ - CFI_DEF_CFA esp, 4*4;\ - /*CFI_OFFSET cs, -2*4;*/\ +.macro RING0_EC_FRAME + CFI_STARTPROC simple + CFI_SIGNAL_FRAME + CFI_DEF_CFA esp, 4*4 + /*CFI_OFFSET cs, -2*4;*/ CFI_OFFSET eip, -3*4 +.endm -#define RING0_PTREGS_FRAME \ - CFI_STARTPROC simple;\ - CFI_SIGNAL_FRAME;\ - CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\ - /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\ - CFI_OFFSET eip, PT_EIP-PT_OLDESP;\ - /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\ - /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\ - CFI_OFFSET eax, PT_EAX-PT_OLDESP;\ - CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\ - CFI_OFFSET edi, PT_EDI-PT_OLDESP;\ - CFI_OFFSET esi, PT_ESI-PT_OLDESP;\ - CFI_OFFSET edx, PT_EDX-PT_OLDESP;\ - CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\ +.macro RING0_PTREGS_FRAME + CFI_STARTPROC simple + CFI_SIGNAL_FRAME + CFI_DEF_CFA esp, PT_OLDESP-PT_EBX + /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/ + CFI_OFFSET eip, PT_EIP-PT_OLDESP + /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/ + /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/ + CFI_OFFSET eax, PT_EAX-PT_OLDESP + CFI_OFFSET ebp, PT_EBP-PT_OLDESP + CFI_OFFSET edi, PT_EDI-PT_OLDESP + CFI_OFFSET esi, PT_ESI-PT_OLDESP + CFI_OFFSET edx, PT_EDX-PT_OLDESP + CFI_OFFSET ecx, PT_ECX-PT_OLDESP CFI_OFFSET ebx, PT_EBX-PT_OLDESP +.endm ENTRY(ret_from_fork) CFI_STARTPROC @@ -595,28 +601,30 @@ syscall_badsys: END(syscall_badsys) CFI_ENDPROC -#define FIXUP_ESPFIX_STACK \ - /* since we are on a wrong stack, we cant make it a C code :( */ \ - PER_CPU(gdt_page, %ebx); \ - GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \ - addl %esp, %eax; \ - pushl $__KERNEL_DS; \ - CFI_ADJUST_CFA_OFFSET 4; \ - pushl %eax; \ - CFI_ADJUST_CFA_OFFSET 4; \ - lss (%esp), %esp; \ - CFI_ADJUST_CFA_OFFSET -8; -#define UNWIND_ESPFIX_STACK \ - movl %ss, %eax; \ - /* see if on espfix stack */ \ - cmpw $__ESPFIX_SS, %ax; \ - jne 27f; \ - movl $__KERNEL_DS, %eax; \ - movl %eax, %ds; \ - movl %eax, %es; \ - /* switch to normal stack */ \ - FIXUP_ESPFIX_STACK; \ -27:; +.macro FIXUP_ESPFIX_STACK + /* since we are on a wrong stack, we cant make it a C code :( */ + PER_CPU(gdt_page, %ebx) + GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) + addl %esp, %eax + pushl $__KERNEL_DS + CFI_ADJUST_CFA_OFFSET 4 + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + lss (%esp), %esp + CFI_ADJUST_CFA_OFFSET -8 +.endm +.macro UNWIND_ESPFIX_STACK + movl %ss, %eax + /* see if on espfix stack */ + cmpw $__ESPFIX_SS, %ax + jne 27f + movl $__KERNEL_DS, %eax + movl %eax, %ds + movl %eax, %es + /* switch to normal stack */ + FIXUP_ESPFIX_STACK +27: +.endm /* * Build the entry stubs and pointer table with some assembler magic. @@ -1136,26 +1144,27 @@ END(page_fault) * by hand onto the new stack - while updating the return eip past * the instruction that would have done it for sysenter. */ -#define FIX_STACK(offset, ok, label) \ - cmpw $__KERNEL_CS,4(%esp); \ - jne ok; \ -label: \ - movl TSS_sysenter_sp0+offset(%esp),%esp; \ - CFI_DEF_CFA esp, 0; \ - CFI_UNDEFINED eip; \ - pushfl; \ - CFI_ADJUST_CFA_OFFSET 4; \ - pushl $__KERNEL_CS; \ - CFI_ADJUST_CFA_OFFSET 4; \ - pushl $sysenter_past_esp; \ - CFI_ADJUST_CFA_OFFSET 4; \ +.macro FIX_STACK offset ok label + cmpw $__KERNEL_CS, 4(%esp) + jne \ok +\label: + movl TSS_sysenter_sp0 + \offset(%esp), %esp + CFI_DEF_CFA esp, 0 + CFI_UNDEFINED eip + pushfl + CFI_ADJUST_CFA_OFFSET 4 + pushl $__KERNEL_CS + CFI_ADJUST_CFA_OFFSET 4 + pushl $sysenter_past_esp + CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET eip, 0 +.endm ENTRY(debug) RING0_INT_FRAME cmpl $ia32_sysenter_target,(%esp) jne debug_stack_correct - FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) + FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn debug_stack_correct: pushl $-1 # mark this as an int CFI_ADJUST_CFA_OFFSET 4 @@ -1213,7 +1222,7 @@ nmi_stack_correct: nmi_stack_fixup: RING0_INT_FRAME - FIX_STACK(12,nmi_stack_correct, 1) + FIX_STACK 12, nmi_stack_correct, 1 jmp nmi_stack_correct nmi_debug_stack_check: @@ -1224,7 +1233,7 @@ nmi_debug_stack_check: jb nmi_stack_correct cmpl $debug_esp_fix_insn,(%esp) ja nmi_stack_correct - FIX_STACK(24,nmi_stack_correct, 1) + FIX_STACK 24, nmi_stack_correct, 1 jmp nmi_stack_correct nmi_espfix_stack: -- cgit v1.2.3 From d9a89a26e02ef9ed03f74a755a8b4d8f3a066622 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 9 Feb 2009 22:17:40 +0900 Subject: x86: add %gs accessors for x86_32 Impact: cleanup On x86_32, %gs is handled lazily. It's not saved and restored on kernel entry/exit but only when necessary which usually is during task switch but there are few other places. Currently, it's done by calling savesegment() and loadsegment() explicitly. Define get_user_gs(), set_user_gs() and task_user_gs() and use them instead. While at it, clean up register access macros in signal.c. This cleans up code a bit and will help future changes. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/a.out-core.h | 2 +- arch/x86/include/asm/elf.h | 2 +- arch/x86/include/asm/mmu_context.h | 2 +- arch/x86/include/asm/system.h | 9 +++++++++ arch/x86/kernel/process_32.c | 6 +++--- arch/x86/kernel/ptrace.c | 14 ++++++------- arch/x86/kernel/signal.c | 41 +++++++++++++++----------------------- arch/x86/kernel/vm86_32.c | 4 ++-- arch/x86/math-emu/get_address.c | 6 ++---- 9 files changed, 41 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index 3c601f8224b..bb70e397aa8 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h @@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->regs.ds = (u16)regs->ds; dump->regs.es = (u16)regs->es; dump->regs.fs = (u16)regs->fs; - savesegment(gs, dump->regs.gs); + dump->regs.gs = get_user_gs(regs); dump->regs.orig_ax = regs->orig_ax; dump->regs.ip = regs->ip; dump->regs.cs = (u16)regs->cs; diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index f51a3ddde01..39b0aac1675 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -124,7 +124,7 @@ do { \ pr_reg[7] = regs->ds & 0xffff; \ pr_reg[8] = regs->es & 0xffff; \ pr_reg[9] = regs->fs & 0xffff; \ - savesegment(gs, pr_reg[10]); \ + pr_reg[10] = get_user_gs(regs); \ pr_reg[11] = regs->orig_ax; \ pr_reg[12] = regs->ip; \ pr_reg[13] = regs->cs & 0xffff; \ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 52948df9cd1..4955165682c 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -79,7 +79,7 @@ do { \ #ifdef CONFIG_X86_32 #define deactivate_mm(tsk, mm) \ do { \ - loadsegment(gs, 0); \ + set_user_gs(task_pt_regs(tsk), 0); \ } while (0) #else #define deactivate_mm(tsk, mm) \ diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 2fcc70bc85f..70c74b8db87 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -182,6 +182,15 @@ extern void native_load_gs_index(unsigned); #define savesegment(seg, value) \ asm("mov %%" #seg ",%0":"=r" (value) : : "memory") +/* + * x86_32 user gs accessors. + */ +#ifdef CONFIG_X86_32 +#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) +#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) +#define task_user_gs(tsk) ((tsk)->thread.gs) +#endif + static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 1a1ae8edc40..d58a340e1be 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -131,7 +131,7 @@ void __show_regs(struct pt_regs *regs, int all) if (user_mode_vm(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; - savesegment(gs, gs); + gs = get_user_gs(regs); } else { sp = (unsigned long) (®s->sp); savesegment(ss, ss); @@ -304,7 +304,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, p->thread.ip = (unsigned long) ret_from_fork; - savesegment(gs, p->thread.gs); + task_user_gs(p) = get_user_gs(regs); tsk = current; if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { @@ -342,7 +342,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { - __asm__("movl %0, %%gs" : : "r"(0)); + set_user_gs(regs, 0); regs->fs = 0; set_fs(USER_DS); regs->ds = __USER_DS; diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 0a5df5f82fb..508b6b57d0c 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -90,9 +90,10 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset) if (offset != offsetof(struct user_regs_struct, gs)) retval = *pt_regs_access(task_pt_regs(task), offset); else { - retval = task->thread.gs; if (task == current) - savesegment(gs, retval); + retval = get_user_gs(task_pt_regs(task)); + else + retval = task_user_gs(task); } return retval; } @@ -126,13 +127,10 @@ static int set_segment_reg(struct task_struct *task, break; case offsetof(struct user_regs_struct, gs): - task->thread.gs = value; if (task == current) - /* - * The user-mode %gs is not affected by - * kernel entry, so we must update the CPU. - */ - loadsegment(gs, value); + set_user_gs(task_pt_regs(task), value); + else + task_user_gs(task) = value; } return 0; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 7fc78b01981..8562387c75a 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -50,27 +50,23 @@ # define FIX_EFLAGS __FIX_EFLAGS #endif -#define COPY(x) { \ - get_user_ex(regs->x, &sc->x); \ -} +#define COPY(x) do { \ + get_user_ex(regs->x, &sc->x); \ +} while (0) -#define COPY_SEG(seg) { \ - unsigned short tmp; \ - get_user_ex(tmp, &sc->seg); \ - regs->seg = tmp; \ -} +#define GET_SEG(seg) ({ \ + unsigned short tmp; \ + get_user_ex(tmp, &sc->seg); \ + tmp; \ +}) -#define COPY_SEG_CPL3(seg) { \ - unsigned short tmp; \ - get_user_ex(tmp, &sc->seg); \ - regs->seg = tmp | 3; \ -} +#define COPY_SEG(seg) do { \ + regs->seg = GET_SEG(seg); \ +} while (0) -#define GET_SEG(seg) { \ - unsigned short tmp; \ - get_user_ex(tmp, &sc->seg); \ - loadsegment(seg, tmp); \ -} +#define COPY_SEG_CPL3(seg) do { \ + regs->seg = GET_SEG(seg) | 3; \ +} while (0) static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, @@ -86,7 +82,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, get_user_try { #ifdef CONFIG_X86_32 - GET_SEG(gs); + set_user_gs(regs, GET_SEG(gs)); COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); @@ -138,12 +134,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, put_user_try { #ifdef CONFIG_X86_32 - { - unsigned int tmp; - - savesegment(gs, tmp); - put_user_ex(tmp, (unsigned int __user *)&sc->gs); - } + put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs); put_user_ex(regs->fs, (unsigned int __user *)&sc->fs); put_user_ex(regs->es, (unsigned int __user *)&sc->es); put_user_ex(regs->ds, (unsigned int __user *)&sc->ds); diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 4eeb5cf9720..55ea30d2a3d 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -158,7 +158,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) ret = KVM86->regs32; ret->fs = current->thread.saved_fs; - loadsegment(gs, current->thread.saved_gs); + set_user_gs(ret, current->thread.saved_gs); return ret; } @@ -323,7 +323,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk info->regs32->ax = 0; tsk->thread.saved_sp0 = tsk->thread.sp0; tsk->thread.saved_fs = info->regs32->fs; - savesegment(gs, tsk->thread.saved_gs); + tsk->thread.saved_gs = get_user_gs(info->regs32); tss = &per_cpu(init_tss, get_cpu()); tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c index 420b3b6e391..6ef5e99380f 100644 --- a/arch/x86/math-emu/get_address.c +++ b/arch/x86/math-emu/get_address.c @@ -150,11 +150,9 @@ static long pm_address(u_char FPU_modrm, u_char segment, #endif /* PARANOID */ switch (segment) { - /* gs isn't used by the kernel, so it still has its - user-space value. */ case PREFIX_GS_ - 1: - /* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */ - savesegment(gs, addr->selector); + /* user gs handling can be lazy, use special accessors */ + addr->selector = get_user_gs(FPU_info->regs); break; default: addr->selector = PM_REG_(segment); -- cgit v1.2.3 From ccbeed3a05908d201b47b6c3dd1a373138bba566 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 9 Feb 2009 22:17:40 +0900 Subject: x86: make lazy %gs optional on x86_32 Impact: pt_regs changed, lazy gs handling made optional, add slight overhead to SAVE_ALL, simplifies error_code path a bit On x86_32, %gs hasn't been used by kernel and handled lazily. pt_regs doesn't have place for it and gs is saved/loaded only when necessary. In preparation for stack protector support, this patch makes lazy %gs handling optional by doing the followings. * Add CONFIG_X86_32_LAZY_GS and place for gs in pt_regs. * Save and restore %gs along with other registers in entry_32.S unless LAZY_GS. Note that this unfortunately adds "pushl $0" on SAVE_ALL even when LAZY_GS. However, it adds no overhead to common exit path and simplifies entry path with error code. * Define different user_gs accessors depending on LAZY_GS and add lazy_save_gs() and lazy_load_gs() which are noop if !LAZY_GS. The lazy_*_gs() ops are used to save, load and clear %gs lazily. * Define ELF_CORE_COPY_KERNEL_REGS() which always read %gs directly. xen and lguest changes need to be verified. Signed-off-by: Tejun Heo Cc: Jeremy Fitzhardinge Cc: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 4 ++ arch/x86/include/asm/elf.h | 15 ++++- arch/x86/include/asm/mmu_context.h | 2 +- arch/x86/include/asm/ptrace.h | 4 +- arch/x86/include/asm/system.h | 12 +++- arch/x86/kernel/asm-offsets_32.c | 1 + arch/x86/kernel/entry_32.S | 132 +++++++++++++++++++++++++++++++------ arch/x86/kernel/process_32.c | 4 +- arch/x86/kernel/ptrace.c | 5 +- arch/x86/lguest/boot.c | 2 +- arch/x86/xen/enlighten.c | 17 ++--- 11 files changed, 158 insertions(+), 40 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5c8e353c112..5bcdede71ba 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -207,6 +207,10 @@ config X86_TRAMPOLINE depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP) default y +config X86_32_LAZY_GS + def_bool y + depends on X86_32 + config KTIME_SCALAR def_bool X86_32 source "init/Kconfig" diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 39b0aac1675..83c1bc8d2e8 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -112,7 +112,7 @@ extern unsigned int vdso_enabled; * now struct_user_regs, they are different) */ -#define ELF_CORE_COPY_REGS(pr_reg, regs) \ +#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \ do { \ pr_reg[0] = regs->bx; \ pr_reg[1] = regs->cx; \ @@ -124,7 +124,6 @@ do { \ pr_reg[7] = regs->ds & 0xffff; \ pr_reg[8] = regs->es & 0xffff; \ pr_reg[9] = regs->fs & 0xffff; \ - pr_reg[10] = get_user_gs(regs); \ pr_reg[11] = regs->orig_ax; \ pr_reg[12] = regs->ip; \ pr_reg[13] = regs->cs & 0xffff; \ @@ -133,6 +132,18 @@ do { \ pr_reg[16] = regs->ss & 0xffff; \ } while (0); +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ +do { \ + ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\ + pr_reg[10] = get_user_gs(regs); \ +} while (0); + +#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \ +do { \ + ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\ + savesegment(gs, pr_reg[10]); \ +} while (0); + #define ELF_PLATFORM (utsname()->machine) #define set_personality_64bit() do { } while (0) diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 4955165682c..f923203dc39 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -79,7 +79,7 @@ do { \ #ifdef CONFIG_X86_32 #define deactivate_mm(tsk, mm) \ do { \ - set_user_gs(task_pt_regs(tsk), 0); \ + lazy_load_gs(0); \ } while (0) #else #define deactivate_mm(tsk, mm) \ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 6d34d954c22..e304b66abee 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -28,7 +28,7 @@ struct pt_regs { int xds; int xes; int xfs; - /* int gs; */ + int xgs; long orig_eax; long eip; int xcs; @@ -50,7 +50,7 @@ struct pt_regs { unsigned long ds; unsigned long es; unsigned long fs; - /* int gs; */ + unsigned long gs; unsigned long orig_ax; unsigned long ip; unsigned long cs; diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 70c74b8db87..79b98e5b96f 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -186,10 +186,20 @@ extern void native_load_gs_index(unsigned); * x86_32 user gs accessors. */ #ifdef CONFIG_X86_32 +#ifdef CONFIG_X86_32_LAZY_GS #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) #define task_user_gs(tsk) ((tsk)->thread.gs) -#endif +#define lazy_save_gs(v) savesegment(gs, (v)) +#define lazy_load_gs(v) loadsegment(gs, (v)) +#else /* X86_32_LAZY_GS */ +#define get_user_gs(regs) (u16)((regs)->gs) +#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) +#define task_user_gs(tsk) (task_pt_regs(tsk)->gs) +#define lazy_save_gs(v) do { } while (0) +#define lazy_load_gs(v) do { } while (0) +#endif /* X86_32_LAZY_GS */ +#endif /* X86_32 */ static inline unsigned long get_limit(unsigned long segment) { diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index ee4df08feee..fbf2f33e308 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -75,6 +75,7 @@ void foo(void) OFFSET(PT_DS, pt_regs, ds); OFFSET(PT_ES, pt_regs, es); OFFSET(PT_FS, pt_regs, fs); + OFFSET(PT_GS, pt_regs, gs); OFFSET(PT_ORIG_EAX, pt_regs, orig_ax); OFFSET(PT_EIP, pt_regs, ip); OFFSET(PT_CS, pt_regs, cs); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c461925d3b6..82e6868bee4 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -30,12 +30,13 @@ * 1C(%esp) - %ds * 20(%esp) - %es * 24(%esp) - %fs - * 28(%esp) - orig_eax - * 2C(%esp) - %eip - * 30(%esp) - %cs - * 34(%esp) - %eflags - * 38(%esp) - %oldesp - * 3C(%esp) - %oldss + * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS + * 2C(%esp) - orig_eax + * 30(%esp) - %eip + * 34(%esp) - %cs + * 38(%esp) - %eflags + * 3C(%esp) - %oldesp + * 40(%esp) - %oldss * * "current" is in register %ebx during any slow entries. */ @@ -101,8 +102,99 @@ #define resume_userspace_sig resume_userspace #endif +/* + * User gs save/restore + * + * %gs is used for userland TLS and kernel only uses it for stack + * canary which is required to be at %gs:20 by gcc. Read the comment + * at the top of stackprotector.h for more info. + * + * Local labels 98 and 99 are used. + */ +#ifdef CONFIG_X86_32_LAZY_GS + + /* unfortunately push/pop can't be no-op */ +.macro PUSH_GS + pushl $0 + CFI_ADJUST_CFA_OFFSET 4 +.endm +.macro POP_GS pop=0 + addl $(4 + \pop), %esp + CFI_ADJUST_CFA_OFFSET -(4 + \pop) +.endm +.macro POP_GS_EX +.endm + + /* all the rest are no-op */ +.macro PTGS_TO_GS +.endm +.macro PTGS_TO_GS_EX +.endm +.macro GS_TO_REG reg +.endm +.macro REG_TO_PTGS reg +.endm +.macro SET_KERNEL_GS reg +.endm + +#else /* CONFIG_X86_32_LAZY_GS */ + +.macro PUSH_GS + pushl %gs + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET gs, 0*/ +.endm + +.macro POP_GS pop=0 +98: popl %gs + CFI_ADJUST_CFA_OFFSET -4 + /*CFI_RESTORE gs*/ + .if \pop <> 0 + add $\pop, %esp + CFI_ADJUST_CFA_OFFSET -\pop + .endif +.endm +.macro POP_GS_EX +.pushsection .fixup, "ax" +99: movl $0, (%esp) + jmp 98b +.section __ex_table, "a" + .align 4 + .long 98b, 99b +.popsection +.endm + +.macro PTGS_TO_GS +98: mov PT_GS(%esp), %gs +.endm +.macro PTGS_TO_GS_EX +.pushsection .fixup, "ax" +99: movl $0, PT_GS(%esp) + jmp 98b +.section __ex_table, "a" + .align 4 + .long 98b, 99b +.popsection +.endm + +.macro GS_TO_REG reg + movl %gs, \reg + /*CFI_REGISTER gs, \reg*/ +.endm +.macro REG_TO_PTGS reg + movl \reg, PT_GS(%esp) + /*CFI_REL_OFFSET gs, PT_GS*/ +.endm +.macro SET_KERNEL_GS reg + xorl \reg, \reg + movl \reg, %gs +.endm + +#endif /* CONFIG_X86_32_LAZY_GS */ + .macro SAVE_ALL cld + PUSH_GS pushl %fs CFI_ADJUST_CFA_OFFSET 4 /*CFI_REL_OFFSET fs, 0;*/ @@ -138,6 +230,7 @@ movl %edx, %es movl $(__KERNEL_PERCPU), %edx movl %edx, %fs + SET_KERNEL_GS %edx .endm .macro RESTORE_INT_REGS @@ -164,7 +257,7 @@ CFI_RESTORE eax .endm -.macro RESTORE_REGS +.macro RESTORE_REGS pop=0 RESTORE_INT_REGS 1: popl %ds CFI_ADJUST_CFA_OFFSET -4 @@ -175,6 +268,7 @@ 3: popl %fs CFI_ADJUST_CFA_OFFSET -4 /*CFI_RESTORE fs;*/ + POP_GS \pop .pushsection .fixup, "ax" 4: movl $0, (%esp) jmp 1b @@ -188,6 +282,7 @@ .long 2b, 5b .long 3b, 6b .popsection + POP_GS_EX .endm .macro RING0_INT_FRAME @@ -368,6 +463,7 @@ sysenter_exit: xorl %ebp,%ebp TRACE_IRQS_ON 1: mov PT_FS(%esp), %fs + PTGS_TO_GS ENABLE_INTERRUPTS_SYSEXIT #ifdef CONFIG_AUDITSYSCALL @@ -416,6 +512,7 @@ sysexit_audit: .align 4 .long 1b,2b .popsection + PTGS_TO_GS_EX ENDPROC(ia32_sysenter_target) # system call handler stub @@ -458,8 +555,7 @@ restore_all: restore_nocheck: TRACE_IRQS_IRET restore_nocheck_notrace: - RESTORE_REGS - addl $4, %esp # skip orig_eax/error_code + RESTORE_REGS 4 # skip orig_eax/error_code CFI_ADJUST_CFA_OFFSET -4 irq_return: INTERRUPT_RETURN @@ -1078,7 +1174,10 @@ ENTRY(page_fault) CFI_ADJUST_CFA_OFFSET 4 ALIGN error_code: - /* the function address is in %fs's slot on the stack */ + /* the function address is in %gs's slot on the stack */ + pushl %fs + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET fs, 0*/ pushl %es CFI_ADJUST_CFA_OFFSET 4 /*CFI_REL_OFFSET es, 0*/ @@ -1107,20 +1206,15 @@ error_code: CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ebx, 0 cld - pushl %fs - CFI_ADJUST_CFA_OFFSET 4 - /*CFI_REL_OFFSET fs, 0*/ movl $(__KERNEL_PERCPU), %ecx movl %ecx, %fs UNWIND_ESPFIX_STACK - popl %ecx - CFI_ADJUST_CFA_OFFSET -4 - /*CFI_REGISTER es, ecx*/ - movl PT_FS(%esp), %edi # get the function address + GS_TO_REG %ecx + movl PT_GS(%esp), %edi # get the function address movl PT_ORIG_EAX(%esp), %edx # get the error code movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart - mov %ecx, PT_FS(%esp) - /*CFI_REL_OFFSET fs, ES*/ + REG_TO_PTGS %ecx + SET_KERNEL_GS %ecx movl $(__USER_DS), %ecx movl %ecx, %ds movl %ecx, %es diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index d58a340e1be..86122fa2a1b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -539,7 +539,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * used %fs or %gs (it does not today), or if the kernel is * running inside of a hypervisor layer. */ - savesegment(gs, prev->gs); + lazy_save_gs(prev->gs); /* * Load the per-thread Thread-Local Storage descriptor. @@ -585,7 +585,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) - loadsegment(gs, next->gs); + lazy_load_gs(next->gs); percpu_write(current_task, next_p); diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 508b6b57d0c..7ec39ab37a2 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -75,10 +75,7 @@ static inline bool invalid_selector(u16 value) static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); - regno >>= 2; - if (regno > FS) - --regno; - return ®s->bx + regno; + return ®s->bx + (regno >> 2); } static u16 get_segment_reg(struct task_struct *task, unsigned long offset) diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 19e33b6cd59..da2e314f61b 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -283,7 +283,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) /* There's one problem which normal hardware doesn't have: the Host * can't handle us removing entries we're currently using. So we clear * the GS register here: if it's needed it'll be reloaded anyway. */ - loadsegment(gs, 0); + lazy_load_gs(0); lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0); } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 37230342c2c..95ff6a0e942 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -323,13 +323,14 @@ static void load_TLS_descriptor(struct thread_struct *t, static void xen_load_tls(struct thread_struct *t, unsigned int cpu) { /* - * XXX sleazy hack: If we're being called in a lazy-cpu zone, - * it means we're in a context switch, and %gs has just been - * saved. This means we can zero it out to prevent faults on - * exit from the hypervisor if the next process has no %gs. - * Either way, it has been saved, and the new value will get - * loaded properly. This will go away as soon as Xen has been - * modified to not save/restore %gs for normal hypercalls. + * XXX sleazy hack: If we're being called in a lazy-cpu zone + * and lazy gs handling is enabled, it means we're in a + * context switch, and %gs has just been saved. This means we + * can zero it out to prevent faults on exit from the + * hypervisor if the next process has no %gs. Either way, it + * has been saved, and the new value will get loaded properly. + * This will go away as soon as Xen has been modified to not + * save/restore %gs for normal hypercalls. * * On x86_64, this hack is not used for %gs, because gs points * to KERNEL_GS_BASE (and uses it for PDA references), so we @@ -341,7 +342,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu) */ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { #ifdef CONFIG_X86_32 - loadsegment(gs, 0); + lazy_load_gs(0); #else loadsegment(fs, 0); #endif -- cgit v1.2.3 From 60a5317ff0f42dd313094b88f809f63041568b08 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 9 Feb 2009 22:17:40 +0900 Subject: x86: implement x86_32 stack protector Impact: stack protector for x86_32 Implement stack protector for x86_32. GDT entry 28 is used for it. It's set to point to stack_canary-20 and have the length of 24 bytes. CONFIG_CC_STACKPROTECTOR turns off CONFIG_X86_32_LAZY_GS and sets %gs to the stack canary segment on entry. As %gs is otherwise unused by the kernel, the canary can be anywhere. It's defined as a percpu variable. x86_32 exception handlers take register frame on stack directly as struct pt_regs. With -fstack-protector turned on, gcc copies the whole structure after the stack canary and (of course) doesn't copy back on return thus losing all changed. For now, -fno-stack-protector is added to all files which contain those functions. We definitely need something better. Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 3 +- arch/x86/include/asm/processor.h | 4 ++ arch/x86/include/asm/segment.h | 9 +++- arch/x86/include/asm/stackprotector.h | 91 +++++++++++++++++++++++++++++++++-- arch/x86/include/asm/system.h | 21 ++++++++ arch/x86/kernel/Makefile | 18 +++++++ arch/x86/kernel/cpu/common.c | 17 ++++--- arch/x86/kernel/entry_32.S | 2 +- arch/x86/kernel/head_32.S | 20 +++++++- arch/x86/kernel/process_32.c | 1 + arch/x86/kernel/setup_percpu.c | 2 + 11 files changed, 172 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5bcdede71ba..f760a22f95d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -209,7 +209,7 @@ config X86_TRAMPOLINE config X86_32_LAZY_GS def_bool y - depends on X86_32 + depends on X86_32 && !CC_STACKPROTECTOR config KTIME_SCALAR def_bool X86_32 @@ -1356,7 +1356,6 @@ config CC_STACKPROTECTOR_ALL config CC_STACKPROTECTOR bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - depends on X86_64 select CC_STACKPROTECTOR_ALL help This option turns on the -fstack-protector GCC feature. This diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 9763eb70013..5a947210425 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -396,7 +396,11 @@ DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); DECLARE_INIT_PER_CPU(irq_stack_union); DECLARE_PER_CPU(char *, irq_stack_ptr); +#else /* X86_64 */ +#ifdef CONFIG_CC_STACKPROTECTOR +DECLARE_PER_CPU(unsigned long, stack_canary); #endif +#endif /* X86_64 */ extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int xstate_size; diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 1dc1b51ac62..14e0ed86a6f 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -61,7 +61,7 @@ * * 26 - ESPFIX small SS * 27 - per-cpu [ offset to per-cpu data area ] - * 28 - unused + * 28 - stack_canary-20 [ for stack protector ] * 29 - unused * 30 - unused * 31 - TSS for double fault handler @@ -95,6 +95,13 @@ #define __KERNEL_PERCPU 0 #endif +#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16) +#ifdef CONFIG_CC_STACKPROTECTOR +#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8) +#else +#define __KERNEL_STACK_CANARY 0 +#endif + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index ee275e9f48a..fa7e5bd6fbe 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -1,3 +1,35 @@ +/* + * GCC stack protector support. + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary + * and unfortunately gcc requires it to be at a fixed offset from %gs. + * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64 + * and x86_32 use segment registers differently and thus handles this + * requirement differently. + * + * On x86_64, %gs is shared by percpu area and stack canary. All + * percpu symbols are zero based and %gs points to the base of percpu + * area. The first occupant of the percpu area is always + * irq_stack_union which contains stack_canary at offset 40. Userland + * %gs is always saved and restored on kernel entry and exit using + * swapgs, so stack protector doesn't add any complexity there. + * + * On x86_32, it's slightly more complicated. As in x86_64, %gs is + * used for userland TLS. Unfortunately, some processors are much + * slower at loading segment registers with different value when + * entering and leaving the kernel, so the kernel uses %fs for percpu + * area and manages %gs lazily so that %gs is switched only when + * necessary, usually during task switch. + * + * As gcc requires the stack canary at %gs:20, %gs can't be managed + * lazily if stack protector is enabled, so the kernel saves and + * restores userland %gs on kernel entry and exit. This behavior is + * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in + * system.h to hide the details. + */ + #ifndef _ASM_STACKPROTECTOR_H #define _ASM_STACKPROTECTOR_H 1 @@ -6,8 +38,18 @@ #include #include #include +#include +#include #include +/* + * 24 byte read-only segment initializer for stack canary. Linker + * can't handle the address bit shifting. Address will be set in + * head_32 for boot CPU and setup_per_cpu_areas() for others. + */ +#define GDT_STACK_CANARY_INIT \ + [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } }, + /* * Initialize the stackprotector canary value. * @@ -19,12 +61,9 @@ static __always_inline void boot_init_stack_canary(void) u64 canary; u64 tsc; - /* - * Build time only check to make sure the stack_canary is at - * offset 40 in the pda; this is a gcc ABI requirement - */ +#ifdef CONFIG_X86_64 BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40); - +#endif /* * We both use the random pool and the current TSC as a source * of randomness. The TSC only matters for very early init, @@ -36,7 +75,49 @@ static __always_inline void boot_init_stack_canary(void) canary += tsc + (tsc << 32UL); current->stack_canary = canary; +#ifdef CONFIG_X86_64 percpu_write(irq_stack_union.stack_canary, canary); +#else + percpu_write(stack_canary, canary); +#endif +} + +static inline void setup_stack_canary_segment(int cpu) +{ +#ifdef CONFIG_X86_32 + unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); + struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); + struct desc_struct desc; + + desc = gdt_table[GDT_ENTRY_STACK_CANARY]; + desc.base0 = canary & 0xffff; + desc.base1 = (canary >> 16) & 0xff; + desc.base2 = (canary >> 24) & 0xff; + write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); +#endif +} + +static inline void load_stack_canary_segment(void) +{ +#ifdef CONFIG_X86_32 + asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory"); +#endif +} + +#else /* CC_STACKPROTECTOR */ + +#define GDT_STACK_CANARY_INIT + +/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */ + +static inline void setup_stack_canary_segment(int cpu) +{ } + +static inline void load_stack_canary_segment(void) +{ +#ifdef CONFIG_X86_32 + asm volatile ("mov %0, %%gs" : : "r" (0)); +#endif } #endif /* CC_STACKPROTECTOR */ diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 79b98e5b96f..2692ee8ef03 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -23,6 +23,22 @@ struct task_struct *__switch_to(struct task_struct *prev, #ifdef CONFIG_X86_32 +#ifdef CONFIG_CC_STACKPROTECTOR +#define __switch_canary \ + "movl "__percpu_arg([current_task])",%%ebx\n\t" \ + "movl %P[task_canary](%%ebx),%%ebx\n\t" \ + "movl %%ebx,"__percpu_arg([stack_canary])"\n\t" +#define __switch_canary_oparam \ + , [stack_canary] "=m" (per_cpu_var(stack_canary)) +#define __switch_canary_iparam \ + , [current_task] "m" (per_cpu_var(current_task)) \ + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) +#else /* CC_STACKPROTECTOR */ +#define __switch_canary +#define __switch_canary_oparam +#define __switch_canary_iparam +#endif /* CC_STACKPROTECTOR */ + /* * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. @@ -46,6 +62,7 @@ do { \ "pushl %[next_ip]\n\t" /* restore EIP */ \ "jmp __switch_to\n" /* regparm call */ \ "1:\t" \ + __switch_canary \ "popl %%ebp\n\t" /* restore EBP */ \ "popfl\n" /* restore flags */ \ \ @@ -58,6 +75,8 @@ do { \ "=b" (ebx), "=c" (ecx), "=d" (edx), \ "=S" (esi), "=D" (edi) \ \ + __switch_canary_oparam \ + \ /* input parameters: */ \ : [next_sp] "m" (next->thread.sp), \ [next_ip] "m" (next->thread.ip), \ @@ -66,6 +85,8 @@ do { \ [prev] "a" (prev), \ [next] "d" (next) \ \ + __switch_canary_iparam \ + \ : /* reloaded segment registers */ \ "memory"); \ } while (0) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 37fa30bada1..b1f8be33300 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -24,6 +24,24 @@ CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) CFLAGS_hpet.o := $(nostackp) CFLAGS_tsc.o := $(nostackp) CFLAGS_paravirt.o := $(nostackp) +# +# On x86_32, register frame is passed verbatim on stack as struct +# pt_regs. gcc considers the parameter to belong to the callee and +# with -fstack-protector it copies pt_regs to the callee's stack frame +# to put the structure after the stack canary causing changes made by +# the exception handlers to be lost. Turn off stack protector for all +# files containing functions which take struct pt_regs from register +# frame. +# +# The proper way to fix this is to teach gcc that the argument belongs +# to the caller for these functions, oh well... +# +ifdef CONFIG_X86_32 +CFLAGS_process_32.o := $(nostackp) +CFLAGS_vm86_32.o := $(nostackp) +CFLAGS_signal.o := $(nostackp) +CFLAGS_traps.o := $(nostackp) +endif obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 41b0de6df87..260fe4cb2c8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "cpu.h" @@ -122,6 +123,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, + GDT_STACK_CANARY_INIT #endif } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); @@ -261,6 +263,7 @@ void load_percpu_segment(int cpu) loadsegment(gs, 0); wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); #endif + load_stack_canary_segment(); } /* Current gdt points %fs at the "master" per-cpu area: after this, @@ -946,16 +949,21 @@ unsigned long kernel_eflags; */ DEFINE_PER_CPU(struct orig_ist, orig_ist); -#else +#else /* x86_64 */ + +#ifdef CONFIG_CC_STACKPROTECTOR +DEFINE_PER_CPU(unsigned long, stack_canary); +#endif -/* Make sure %fs is initialized properly in idle threads */ +/* Make sure %fs and %gs are initialized properly in idle threads */ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) { memset(regs, 0, sizeof(struct pt_regs)); regs->fs = __KERNEL_PERCPU; + regs->gs = __KERNEL_STACK_CANARY; return regs; } -#endif +#endif /* x86_64 */ /* * cpu_init() initializes state that is per-CPU. Some data is already @@ -1120,9 +1128,6 @@ void __cpuinit cpu_init(void) __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); #endif - /* Clear %gs. */ - asm volatile ("mov %0, %%gs" : : "r" (0)); - /* Clear all 6 debug registers: */ set_debugreg(0, 0); set_debugreg(0, 1); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 82e6868bee4..5f5bd22adcd 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -186,7 +186,7 @@ /*CFI_REL_OFFSET gs, PT_GS*/ .endm .macro SET_KERNEL_GS reg - xorl \reg, \reg + movl $(__KERNEL_STACK_CANARY), \reg movl \reg, %gs .endm diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 24c0e5cd71e..924e31615fb 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -19,6 +19,7 @@ #include #include #include +#include /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) @@ -437,8 +438,25 @@ is386: movl $2,%ecx # set MP movl $(__KERNEL_PERCPU), %eax movl %eax,%fs # set this cpu's percpu - xorl %eax,%eax # Clear GS and LDT +#ifdef CONFIG_CC_STACKPROTECTOR + /* + * The linker can't handle this by relocation. Manually set + * base address in stack canary segment descriptor. + */ + cmpb $0,ready + jne 1f + movl $per_cpu__gdt_page,%eax + movl $per_cpu__stack_canary,%ecx + movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) + shrl $16, %ecx + movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) + movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) +1: +#endif + movl $(__KERNEL_STACK_CANARY),%eax movl %eax,%gs + + xorl %eax,%eax # Clear LDT lldt %ax cld # gcc2 wants the direction flag cleared at all times diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 86122fa2a1b..9a62383e7c3 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -212,6 +212,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) regs.ds = __USER_DS; regs.es = __USER_DS; regs.fs = __KERNEL_PERCPU; + regs.gs = __KERNEL_STACK_CANARY; regs.orig_ax = -1; regs.ip = (unsigned long) kernel_thread_helper; regs.cs = __KERNEL_CS | get_kernel_rpl(); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index ef91747bbed..d992e6cff73 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef CONFIG_DEBUG_PER_CPU_MAPS # define DBG(x...) printk(KERN_DEBUG x) @@ -95,6 +96,7 @@ void __init setup_per_cpu_areas(void) per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; setup_percpu_segment(cpu); + setup_stack_canary_segment(cpu); /* * Copy data used in early init routines from the * initial arrays to the per cpu data areas. These -- cgit v1.2.3 From 0e81cb59c7120191c0243c686b591797bc79e5c6 Mon Sep 17 00:00:00 2001 From: Alok Kataria Date: Tue, 10 Feb 2009 16:45:37 -0800 Subject: x86, apic: fix initialization of wakeup_cpu With refactoring of wake_cpu macros the 32bit code in tip doesn't execute generic_apic_probe if CONFIG_X86_32_NON_STANDARD is not set. Even on a x86 STANDARD cpu we need to execute the generic_apic_probe function, as we rely on this function to execute the update_genapic quirk which initilizes apic->wakeup_cpu. Failing to do so results in we making a call to a null function in do_boot_cpu. The stack trace without the patch goes like this. Booting processor 1 APIC 0x1 ip 0x6000 BUG: unable to handle kernel NULL pointer dereference at (null) IP: [<(null)>] (null) *pdpt = 0000000000839001 *pde = 0000000000c97067 *pte = 0000000000000163 Oops: 0000 [#1] SMP last sysfs file: Modules linked in: Pid: 1, comm: swapper Not tainted (2.6.29-rc4-tip #18) VMware Virtual Platform EIP: 0062:[<00000000>] EFLAGS: 00010293 CPU: 0 EIP is at 0x0 EAX: 00000001 EBX: 00006000 ECX: c077ed00 EDX: 00006000 ESI: 00000001 EDI: 00000001 EBP: ef04cf40 ESP: ef04cf1c DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 006a Process swapper (pid: 1, ti=ef04c000 task=ef050000 task.ti=ef04c000) Stack: c0644e52 00000000 ef04cf24 ef04cf24 c064468d c0886dc0 00000000 c0702aea ef055480 00000001 00000101 dead4ead ffffffff ffffffff c08af530 00000000 c0709715 ef04cf60 ef04cf60 00000001 00000000 00000000 dead4ead ffffffff Call Trace: [] ? native_cpu_up+0x2de/0x45b [] ? do_fork_idle+0x0/0x19 [] ? _cpu_up+0x88/0xe8 [] ? cpu_up+0x42/0x4e [] ? kernel_init+0x99/0x14b [] ? kernel_init+0x0/0x14b [] ? kernel_thread_helper+0x7/0x10 Code: Bad EIP value. EIP: [<00000000>] 0x0 SS:ESP 006a:ef04cf1c I think we should call generic_apic_probe unconditionally for 32 bit now. Signed-off-by: Alok N Kataria Acked-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 8d8fa992c9a..150e6d0a3b4 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -936,7 +936,7 @@ void __init setup_arch(char **cmdline_p) map_vsyscall(); #endif -#if defined(CONFIG_X86_32_NON_STANDARD) || defined(CONFIG_X86_BIGSMP) +#ifdef CONFIG_X86_32 generic_apic_probe(); #endif -- cgit v1.2.3 From 160d8dac12932ad6eb4a359b66521e2e3282ea7d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 11 Feb 2009 11:27:39 +0100 Subject: x86, apic: make generic_apic_probe() generally available Impact: build fix Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 6 ++++++ arch/x86/kernel/setup.c | 2 -- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index f4835a1be36..fba49f66228 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -33,7 +33,13 @@ } while (0) +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) extern void generic_apic_probe(void); +#else +static inline void generic_apic_probe(void) +{ +} +#endif #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 150e6d0a3b4..8fce6c71451 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -936,9 +936,7 @@ void __init setup_arch(char **cmdline_p) map_vsyscall(); #endif -#ifdef CONFIG_X86_32 generic_apic_probe(); -#endif early_quirks(); -- cgit v1.2.3 From 5c79d2a517a9905599d192db8ce77ab5f1a2faca Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 11 Feb 2009 16:31:00 +0900 Subject: x86: fix x86_32 stack protector bugs Impact: fix x86_32 stack protector Brian Gerst found out that %gs was being initialized to stack_canary instead of stack_canary - 20, which basically gave the same canary value for all threads. Fixing this also exposed the following bugs. * cpu_idle() didn't call boot_init_stack_canary() * stack canary switching in switch_to() was being done too late making the initial run of a new thread use the old stack canary value. Fix all of them and while at it update comment in cpu_idle() about calling boot_init_stack_canary(). Reported-by: Brian Gerst Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/stackprotector.h | 2 +- arch/x86/include/asm/system.h | 8 +++----- arch/x86/kernel/head_32.S | 1 + arch/x86/kernel/process_32.c | 10 ++++++++++ arch/x86/kernel/process_64.c | 11 +++++------ 5 files changed, 20 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index fa7e5bd6fbe..c2d742c6e15 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -85,7 +85,7 @@ static __always_inline void boot_init_stack_canary(void) static inline void setup_stack_canary_segment(int cpu) { #ifdef CONFIG_X86_32 - unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); + unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20; struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); struct desc_struct desc; diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 2692ee8ef03..7a80f72bec4 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -25,13 +25,11 @@ struct task_struct *__switch_to(struct task_struct *prev, #ifdef CONFIG_CC_STACKPROTECTOR #define __switch_canary \ - "movl "__percpu_arg([current_task])",%%ebx\n\t" \ - "movl %P[task_canary](%%ebx),%%ebx\n\t" \ - "movl %%ebx,"__percpu_arg([stack_canary])"\n\t" + "movl %P[task_canary](%[next]), %%ebx\n\t" \ + "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" #define __switch_canary_oparam \ , [stack_canary] "=m" (per_cpu_var(stack_canary)) #define __switch_canary_iparam \ - , [current_task] "m" (per_cpu_var(current_task)) \ , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) #else /* CC_STACKPROTECTOR */ #define __switch_canary @@ -60,9 +58,9 @@ do { \ "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ "pushl %[next_ip]\n\t" /* restore EIP */ \ + __switch_canary \ "jmp __switch_to\n" /* regparm call */ \ "1:\t" \ - __switch_canary \ "popl %%ebp\n\t" /* restore EBP */ \ "popfl\n" /* restore flags */ \ \ diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 924e31615fb..cf21fd0cf6a 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -447,6 +447,7 @@ is386: movl $2,%ecx # set MP jne 1f movl $per_cpu__gdt_page,%eax movl $per_cpu__stack_canary,%ecx + subl $20, %ecx movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) shrl $16, %ecx movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 9a62383e7c3..b50604bb1e4 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -11,6 +11,7 @@ #include +#include #include #include #include @@ -91,6 +92,15 @@ void cpu_idle(void) { int cpu = smp_processor_id(); + /* + * If we're the non-boot CPU, nothing set the stack canary up + * for us. CPU0 already has it initialized but no harm in + * doing it again. This is a good place for updating it, as + * we wont ever return from this function (so the invalid + * canaries already on the stack wont ever trigger). + */ + boot_init_stack_canary(); + current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 8eb169e4558..836ef6575f0 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -120,12 +120,11 @@ void cpu_idle(void) current_thread_info()->status |= TS_POLLING; /* - * If we're the non-boot CPU, nothing set the PDA stack - * canary up for us - and if we are the boot CPU we have - * a 0 stack canary. This is a good place for updating - * it, as we wont ever return from this function (so the - * invalid canaries already on the stack wont ever - * trigger): + * If we're the non-boot CPU, nothing set the stack canary up + * for us. CPU0 already has it initialized but no harm in + * doing it again. This is a good place for updating it, as + * we wont ever return from this function (so the invalid + * canaries already on the stack wont ever trigger). */ boot_init_stack_canary(); -- cgit v1.2.3 From aa78bcfa01dec3cdbde3cda098ce32abbd9c3bf6 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 10 Feb 2009 09:51:45 -0500 Subject: x86: use pt_regs pointer in do_device_not_available() The generic exception handler (error_code) passes in the pt_regs pointer and the error code (unused in this case). The commit "x86: fix math_emu register frame access" changed this to pass by value, which doesn't work correctly with stack protector enabled. Change it back to use the pt_regs pointer. Signed-off-by: Brian Gerst Acked-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/traps.h | 2 +- arch/x86/kernel/traps.c | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index cf3bb053da0..0d5342515b8 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long); dotraplinkage void do_overflow(struct pt_regs *, long); dotraplinkage void do_bounds(struct pt_regs *, long); dotraplinkage void do_invalid_op(struct pt_regs *, long); -dotraplinkage void do_device_not_available(struct pt_regs); +dotraplinkage void do_device_not_available(struct pt_regs *, long); dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); dotraplinkage void do_invalid_TSS(struct pt_regs *, long); dotraplinkage void do_segment_not_present(struct pt_regs *, long); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 3b7b2e19020..71a8f871331 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -905,19 +905,20 @@ void math_emulate(struct math_emu_info *info) } #endif /* CONFIG_MATH_EMULATION */ -dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs) +dotraplinkage void __kprobes +do_device_not_available(struct pt_regs *regs, long error_code) { #ifdef CONFIG_X86_32 if (read_cr0() & X86_CR0_EM) { struct math_emu_info info = { }; - conditional_sti(®s); + conditional_sti(regs); - info.regs = ®s; + info.regs = regs; math_emulate(&info); } else { math_state_restore(); /* interrupts still off */ - conditional_sti(®s); + conditional_sti(regs); } #else math_state_restore(); -- cgit v1.2.3 From 253f29a4ae9cc6cdc7b94f96517f27a93885a6ce Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 10 Feb 2009 09:51:46 -0500 Subject: x86: pass in pt_regs pointer for syscalls that need it Some syscalls need to access the pt_regs structure, either to copy user register state or to modifiy it. This patch adds stubs to load the address of the pt_regs struct into the %eax register, and changes the syscalls to regparm(1) to receive the pt_regs pointer as the first argument. Signed-off-by: Brian Gerst Acked-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/linkage.h | 7 +++++++ arch/x86/include/asm/syscalls.h | 25 +++++++++++++++---------- arch/x86/kernel/entry_32.S | 20 ++++++++++++++++++++ arch/x86/kernel/ioport.c | 4 +--- arch/x86/kernel/process_32.c | 35 ++++++++++++++--------------------- arch/x86/kernel/signal.c | 35 +++++++---------------------------- arch/x86/kernel/syscall_table_32.S | 20 ++++++++++---------- arch/x86/kernel/vm86_32.c | 15 +++++++-------- 8 files changed, 81 insertions(+), 80 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 5d98d0b68ff..2fd5926fb97 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -17,6 +17,13 @@ */ #define asmregparm __attribute__((regparm(3))) +/* + * For syscalls that need a pointer to the pt_regs struct (ie. fork). + * The regs pointer is passed in %eax as the first argument. The + * remaining function arguments remain on the stack. + */ +#define ptregscall __attribute__((regparm(1))) + /* * Make sure the compiler doesn't do anything stupid with the * arguments on the stack - they are owned by the *caller*, not diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index c0b0bda754e..617295255a1 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -29,21 +29,26 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *); /* X86_32 only */ #ifdef CONFIG_X86_32 /* kernel/process_32.c */ -asmlinkage int sys_fork(struct pt_regs); -asmlinkage int sys_clone(struct pt_regs); -asmlinkage int sys_vfork(struct pt_regs); -asmlinkage int sys_execve(struct pt_regs); +ptregscall int sys_fork(struct pt_regs *); +ptregscall int sys_clone(struct pt_regs *, unsigned long, + unsigned long, int __user *, + unsigned long, int __user *); +ptregscall int sys_vfork(struct pt_regs *); +ptregscall int sys_execve(struct pt_regs *, char __user *, + char __user * __user *, + char __user * __user *); /* kernel/signal_32.c */ asmlinkage int sys_sigsuspend(int, int, old_sigset_t); asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, struct old_sigaction __user *); -asmlinkage int sys_sigaltstack(unsigned long); -asmlinkage unsigned long sys_sigreturn(unsigned long); -asmlinkage int sys_rt_sigreturn(unsigned long); +ptregscall int sys_sigaltstack(struct pt_regs *, const stack_t __user *, + stack_t __user *); +ptregscall unsigned long sys_sigreturn(struct pt_regs *); +ptregscall int sys_rt_sigreturn(struct pt_regs *); /* kernel/ioport.c */ -asmlinkage long sys_iopl(unsigned long); +ptregscall long sys_iopl(struct pt_regs *, unsigned int); /* kernel/sys_i386_32.c */ asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, @@ -59,8 +64,8 @@ struct oldold_utsname; asmlinkage int sys_olduname(struct oldold_utsname __user *); /* kernel/vm86_32.c */ -asmlinkage int sys_vm86old(struct pt_regs); -asmlinkage int sys_vm86(struct pt_regs); +ptregscall int sys_vm86old(struct pt_regs *, struct vm86_struct __user *); +ptregscall int sys_vm86(struct pt_regs *, unsigned long, unsigned long); #else /* CONFIG_X86_32 */ diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5f5bd22adcd..3de7b5710dc 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -697,6 +697,26 @@ syscall_badsys: END(syscall_badsys) CFI_ENDPROC +/* + * System calls that need a pt_regs pointer. + */ +#define PTREGSCALL(name) \ + ALIGN; \ +ptregs_##name: \ + leal 4(%esp),%eax; \ + jmp sys_##name; + +PTREGSCALL(iopl) +PTREGSCALL(fork) +PTREGSCALL(clone) +PTREGSCALL(vfork) +PTREGSCALL(execve) +PTREGSCALL(sigaltstack) +PTREGSCALL(sigreturn) +PTREGSCALL(rt_sigreturn) +PTREGSCALL(vm86) +PTREGSCALL(vm86old) + .macro FIXUP_ESPFIX_STACK /* since we are on a wrong stack, we cant make it a C code :( */ PER_CPU(gdt_page, %ebx) diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index b12208f4dfe..7ec14864631 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -131,10 +131,8 @@ static int do_iopl(unsigned int level, struct pt_regs *regs) } #ifdef CONFIG_X86_32 -asmlinkage long sys_iopl(unsigned long regsp) +ptregscall long sys_iopl(struct pt_regs *regs, unsigned int level) { - struct pt_regs *regs = (struct pt_regs *)®sp; - unsigned int level = regs->bx; struct thread_struct *t = ¤t->thread; int rc; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index b50604bb1e4..5a9dcfb01f7 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -603,24 +603,18 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) return prev_p; } -asmlinkage int sys_fork(struct pt_regs regs) +ptregscall int sys_fork(struct pt_regs *regs) { - return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); + return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); } -asmlinkage int sys_clone(struct pt_regs regs) +ptregscall int sys_clone(struct pt_regs *regs, unsigned long clone_flags, + unsigned long newsp, int __user *parent_tidptr, + unsigned long unused, int __user *child_tidptr) { - unsigned long clone_flags; - unsigned long newsp; - int __user *parent_tidptr, *child_tidptr; - - clone_flags = regs.bx; - newsp = regs.cx; - parent_tidptr = (int __user *)regs.dx; - child_tidptr = (int __user *)regs.di; if (!newsp) - newsp = regs.sp; - return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); + newsp = regs->sp; + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } /* @@ -633,27 +627,26 @@ asmlinkage int sys_clone(struct pt_regs regs) * do not have enough call-clobbered registers to hold all * the information you need. */ -asmlinkage int sys_vfork(struct pt_regs regs) +ptregscall int sys_vfork(struct pt_regs *regs) { - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL); } /* * sys_execve() executes a new program. */ -asmlinkage int sys_execve(struct pt_regs regs) +ptregscall int sys_execve(struct pt_regs *regs, char __user *u_filename, + char __user * __user *argv, + char __user * __user *envp) { int error; char *filename; - filename = getname((char __user *) regs.bx); + filename = getname(u_filename); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; - error = do_execve(filename, - (char __user * __user *) regs.cx, - (char __user * __user *) regs.dx, - ®s); + error = do_execve(filename, argv, envp, regs); if (error == 0) { /* Make sure we don't return using sysenter.. */ set_thread_flag(TIF_IRET); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 8562387c75a..d7a158367e3 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -549,39 +549,28 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, #endif /* CONFIG_X86_32 */ #ifdef CONFIG_X86_32 -asmlinkage int sys_sigaltstack(unsigned long bx) -{ - /* - * This is needed to make gcc realize it doesn't own the - * "struct pt_regs" - */ - struct pt_regs *regs = (struct pt_regs *)&bx; - const stack_t __user *uss = (const stack_t __user *)bx; - stack_t __user *uoss = (stack_t __user *)regs->cx; - - return do_sigaltstack(uss, uoss, regs->sp); -} +ptregscall int +sys_sigaltstack(struct pt_regs *regs, const stack_t __user *uss, + stack_t __user *uoss) #else /* !CONFIG_X86_32 */ asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, struct pt_regs *regs) +#endif /* CONFIG_X86_32 */ { return do_sigaltstack(uss, uoss, regs->sp); } -#endif /* CONFIG_X86_32 */ /* * Do a signal return; undo the signal stack. */ #ifdef CONFIG_X86_32 -asmlinkage unsigned long sys_sigreturn(unsigned long __unused) +ptregscall unsigned long sys_sigreturn(struct pt_regs *regs) { struct sigframe __user *frame; - struct pt_regs *regs; unsigned long ax; sigset_t set; - regs = (struct pt_regs *) &__unused; frame = (struct sigframe __user *)(regs->sp - 8); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) @@ -640,23 +629,13 @@ badframe: } #ifdef CONFIG_X86_32 -/* - * Note: do not pass in pt_regs directly as with tail-call optimization - * GCC will incorrectly stomp on the caller's frame and corrupt user-space - * register state: - */ -asmlinkage int sys_rt_sigreturn(unsigned long __unused) -{ - struct pt_regs *regs = (struct pt_regs *)&__unused; - - return do_rt_sigreturn(regs); -} +ptregscall int sys_rt_sigreturn(struct pt_regs *regs) #else /* !CONFIG_X86_32 */ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +#endif /* CONFIG_X86_32 */ { return do_rt_sigreturn(regs); } -#endif /* CONFIG_X86_32 */ /* * OK, we're invoking a handler: diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index e2e86a08f31..3bdb64829b8 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -1,7 +1,7 @@ ENTRY(sys_call_table) .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ .long sys_exit - .long sys_fork + .long ptregs_fork .long sys_read .long sys_write .long sys_open /* 5 */ @@ -10,7 +10,7 @@ ENTRY(sys_call_table) .long sys_creat .long sys_link .long sys_unlink /* 10 */ - .long sys_execve + .long ptregs_execve .long sys_chdir .long sys_time .long sys_mknod @@ -109,17 +109,17 @@ ENTRY(sys_call_table) .long sys_newlstat .long sys_newfstat .long sys_uname - .long sys_iopl /* 110 */ + .long ptregs_iopl /* 110 */ .long sys_vhangup .long sys_ni_syscall /* old "idle" system call */ - .long sys_vm86old + .long ptregs_vm86old .long sys_wait4 .long sys_swapoff /* 115 */ .long sys_sysinfo .long sys_ipc .long sys_fsync - .long sys_sigreturn - .long sys_clone /* 120 */ + .long ptregs_sigreturn + .long ptregs_clone /* 120 */ .long sys_setdomainname .long sys_newuname .long sys_modify_ldt @@ -165,14 +165,14 @@ ENTRY(sys_call_table) .long sys_mremap .long sys_setresuid16 .long sys_getresuid16 /* 165 */ - .long sys_vm86 + .long ptregs_vm86 .long sys_ni_syscall /* Old sys_query_module */ .long sys_poll .long sys_nfsservctl .long sys_setresgid16 /* 170 */ .long sys_getresgid16 .long sys_prctl - .long sys_rt_sigreturn + .long ptregs_rt_sigreturn .long sys_rt_sigaction .long sys_rt_sigprocmask /* 175 */ .long sys_rt_sigpending @@ -185,11 +185,11 @@ ENTRY(sys_call_table) .long sys_getcwd .long sys_capget .long sys_capset /* 185 */ - .long sys_sigaltstack + .long ptregs_sigaltstack .long sys_sendfile .long sys_ni_syscall /* reserved for streams1 */ .long sys_ni_syscall /* reserved for streams2 */ - .long sys_vfork /* 190 */ + .long ptregs_vfork /* 190 */ .long sys_getrlimit .long sys_mmap2 .long sys_truncate64 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 55ea30d2a3d..8fa6ba7c923 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -197,9 +197,8 @@ out: static int do_vm86_irq_handling(int subfunction, int irqnumber); static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); -asmlinkage int sys_vm86old(struct pt_regs regs) +ptregscall int sys_vm86old(struct pt_regs *regs, struct vm86_struct __user *v86) { - struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx; struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we @@ -218,7 +217,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs) if (tmp) goto out; memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); - info.regs32 = ®s; + info.regs32 = regs; tsk->thread.vm86_info = v86; do_sys_vm86(&info, tsk); ret = 0; /* we never return here */ @@ -227,7 +226,7 @@ out: } -asmlinkage int sys_vm86(struct pt_regs regs) +ptregscall int sys_vm86(struct pt_regs *regs, unsigned long cmd, unsigned long arg) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. @@ -239,12 +238,12 @@ asmlinkage int sys_vm86(struct pt_regs regs) struct vm86plus_struct __user *v86; tsk = current; - switch (regs.bx) { + switch (cmd) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: - ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); + ret = do_vm86_irq_handling(cmd, (int)arg); goto out; case VM86_PLUS_INSTALL_CHECK: /* @@ -261,14 +260,14 @@ asmlinkage int sys_vm86(struct pt_regs regs) ret = -EPERM; if (tsk->thread.saved_sp0) goto out; - v86 = (struct vm86plus_struct __user *)regs.cx; + v86 = (struct vm86plus_struct __user *)arg; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, regs32) - sizeof(info.regs)); ret = -EFAULT; if (tmp) goto out; - info.regs32 = ®s; + info.regs32 = regs; info.vm86plus.is_vm86pus = 1; tsk->thread.vm86_info = (struct vm86_struct __user *)v86; do_sys_vm86(&info, tsk); -- cgit v1.2.3 From 9c8bb6b534d1c89a20bf9bb45e1471cf8f4747c0 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Tue, 10 Feb 2009 09:51:47 -0500 Subject: x86: drop -fno-stack-protector annotations after pt_regs fixes Now that no functions rely on struct pt_regs being passed by value, various "no stack protector" annotations can be dropped. Signed-off-by: Brian Gerst Acked-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index b1f8be33300..37fa30bada1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -24,24 +24,6 @@ CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) CFLAGS_hpet.o := $(nostackp) CFLAGS_tsc.o := $(nostackp) CFLAGS_paravirt.o := $(nostackp) -# -# On x86_32, register frame is passed verbatim on stack as struct -# pt_regs. gcc considers the parameter to belong to the callee and -# with -fstack-protector it copies pt_regs to the callee's stack frame -# to put the structure after the stack canary causing changes made by -# the exception handlers to be lost. Turn off stack protector for all -# files containing functions which take struct pt_regs from register -# frame. -# -# The proper way to fix this is to teach gcc that the argument belongs -# to the caller for these functions, oh well... -# -ifdef CONFIG_X86_32 -CFLAGS_process_32.o := $(nostackp) -CFLAGS_vm86_32.o := $(nostackp) -CFLAGS_signal.o := $(nostackp) -CFLAGS_traps.o := $(nostackp) -endif obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o -- cgit v1.2.3 From 1c0040047d5499599cc231ca3f105be3ceff8562 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Mon, 9 Feb 2009 10:25:20 -0600 Subject: SGI IA64 UV: fix ia64 build error in the linux-next tree Fix the ia64 build error that occurs in the linux-next tree by introducing an ia64 version of uv.h. Additionally, clean up the usage of is_uv_system(). Signed-off-by: Dean Nelson Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/uv/uv.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 arch/ia64/include/asm/uv/uv.h (limited to 'arch') diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h new file mode 100644 index 00000000000..61b5bdfd980 --- /dev/null +++ b/arch/ia64/include/asm/uv/uv.h @@ -0,0 +1,13 @@ +#ifndef _ASM_IA64_UV_UV_H +#define _ASM_IA64_UV_UV_H + +#include +#include + +static inline int is_uv_system(void) +{ + /* temporary support for running on hardware simulator */ + return IS_MEDUSA() || ia64_platform_is("uv"); +} + +#endif /* _ASM_IA64_UV_UV_H */ -- cgit v1.2.3 From c5c606d9dce6e0852a401c57a8b0542acdbb6796 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Mon, 9 Feb 2009 18:18:14 -0800 Subject: x86: cleanup, rename CONFIG_X86_NON_STANDARD to CONFIG_X86_EXTENDED_PLATFORM Patch to rename the CONFIG_X86_NON_STANDARD to CONFIG_X86_EXTENDED_PLATFORM. The new name represents the subarches better. Also, default this to 'y' so that many of the sub architectures that were not easily visible now become visible. Also re-organize the extended architecture platform and non standard platform list alphabetically as suggested by Ingo. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 83 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 46 insertions(+), 37 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 270ecf90bdb..4a27aa41f6d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -268,65 +268,48 @@ config X86_BIGSMP ---help--- This option is needed for the systems that have more than 8 CPUs -config X86_NON_STANDARD - bool "Support for non-standard x86 platforms" +config X86_EXTENDED_PLATFORM + bool "Support for extended (non-PC) x86 platforms" + default y ---help--- If you disable this option then the kernel will only support standard PC platforms. (which covers the vast majority of systems out there.) If you enable this option then you'll be able to select a number - of less common non-PC x86 platforms: VisWS, RDC321, SGI/UV. + of non-PC x86 platforms. If you have one of these systems, or if you want to build a generic distribution kernel, say Y here - otherwise say N. -config X86_VISWS - bool "SGI 320/540 (Visual Workstation)" - depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT - depends on X86_NON_STANDARD - ---help--- - The SGI Visual Workstation series is an IA32-based workstation - based on SGI systems chips with some legacy PC hardware attached. +# This is an alphabetically sorted list of 64 bit extended platforms +# Please maintain the alphabetic order if and when there are additions - Say Y here to create a kernel to run on the SGI 320 or 540. - - A kernel compiled for the Visual Workstation will run on general - PCs as well. See for details. - -config X86_RDC321X - bool "RDC R-321x SoC" - depends on X86_32 - depends on X86_NON_STANDARD - select M486 - select X86_REBOOTFIXUPS +config X86_VSMP + bool "ScaleMP vSMP" + select PARAVIRT + depends on X86_64 && PCI + depends on X86_EXTENDED_PLATFORM ---help--- - This option is needed for RDC R-321x system-on-chip, also known - as R-8610-(G). - If you don't have one of these chips, you should say N here. + Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is + supposed to run on these EM64T-based machines. Only choose this option + if you have one of these machines. config X86_UV bool "SGI Ultraviolet" depends on X86_64 - depends on X86_NON_STANDARD + depends on X86_EXTENDED_PLATFORM ---help--- This option is needed in order to support SGI Ultraviolet systems. If you don't have one of these, you should say N here. -config X86_VSMP - bool "Support for ScaleMP vSMP" - select PARAVIRT - depends on X86_64 && PCI - depends on X86_NON_STANDARD - ---help--- - Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is - supposed to run on these EM64T-based machines. Only choose this option - if you have one of these machines. +# Following is an alphabetically sorted list of 32 bit extended platforms +# Please maintain the alphabetic order if and when there are additions config X86_ELAN bool "AMD Elan" depends on X86_32 - depends on X86_NON_STANDARD + depends on X86_EXTENDED_PLATFORM ---help--- Select this for an AMD Elan processor. @@ -334,16 +317,29 @@ config X86_ELAN If unsure, choose "PC-compatible" instead. +config X86_RDC321X + bool "RDC R-321x SoC" + depends on X86_32 + depends on X86_EXTENDED_PLATFORM + select M486 + select X86_REBOOTFIXUPS + ---help--- + This option is needed for RDC R-321x system-on-chip, also known + as R-8610-(G). + If you don't have one of these chips, you should say N here. + config X86_32_NON_STANDARD bool "Support non-standard 32-bit SMP architectures" depends on X86_32 && SMP - depends on X86_NON_STANDARD + depends on X86_EXTENDED_PLATFORM ---help--- This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default subarchitectures. It is intended for a generic binary kernel. if you select them all, kernel will probe it one by one. and will fallback to default. +# Alphabetically sorted list of Non standard 32 bit platforms + config X86_NUMAQ bool "NUMAQ (IBM/Sequent)" depends on X86_32_NON_STANDARD @@ -356,6 +352,19 @@ config X86_NUMAQ of Flat Logical. You will need a new lynxer.elf file to flash your firmware with - send email to . +config X86_VISWS + bool "SGI 320/540 (Visual Workstation)" + depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT + depends on X86_32_NON_STANDARD + ---help--- + The SGI Visual Workstation series is an IA32-based workstation + based on SGI systems chips with some legacy PC hardware attached. + + Say Y here to create a kernel to run on the SGI 320 or 540. + + A kernel compiled for the Visual Workstation will run on general + PCs as well. See for details. + config X86_SUMMIT bool "Summit/EXA (IBM x440)" depends on X86_32_NON_STANDARD @@ -364,7 +373,7 @@ config X86_SUMMIT In particular, it is needed for the x440. config X86_ES7000 - bool "Support for Unisys ES7000 IA32 series" + bool "Unisys ES7000 IA32 series" depends on X86_32_NON_STANDARD && X86_BIGSMP ---help--- Support for Unisys ES7000 systems. Say 'Y' here if this kernel is -- cgit v1.2.3 From 17993b49b1f540aace8e9b4242530d0b3376eb2a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 11 Feb 2009 17:20:51 +0100 Subject: x86: make hibernation always-possible This commit: aced3ce: x86/Voyager: remove HIBERNATION Kconfig quirk Made hibernation only available on UP - instead of making it available on all of x86. Fix it. Reported-by: Jiri Slaby Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4a27aa41f6d..148c112c9ca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -140,7 +140,6 @@ config HAVE_CPUMASK_OF_CPU_MAP config ARCH_HIBERNATION_POSSIBLE def_bool y - depends on !SMP config ARCH_SUSPEND_POSSIBLE def_bool y -- cgit v1.2.3 From 7651194fb715b2d57658c05a710408f6b8448951 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 11 Feb 2009 22:26:52 +0530 Subject: x86: mm/init_32.c fix compilation warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit arch/x86/mm/init_32.c: In function ‘find_low_pfn_range’: arch/x86/mm/init_32.c:696: warning: format ‘%u’ expects type ‘unsigned int’, but Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 2cef0507441..d48f2560364 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -692,7 +692,7 @@ void __init find_low_pfn_range(void) max_pfn = MAXMEM_PFN + highmem_pages; if (highmem_pages + MAXMEM_PFN > max_pfn) { printk(KERN_WARNING "only %luMB highmem pages " - "available, ignoring highmem size of %uMB.\n", + "available, ignoring highmem size of %luMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages)); highmem_pages = 0; -- cgit v1.2.3 From ba1511bf7fbda452138e4096bf10d5a382710f4f Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 11 Feb 2009 23:38:25 +0530 Subject: x86: kernel/mpparse.c fix compilation warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit arch/x86/kernel/mpparse.c: In function ‘smp_scan_config’: arch/x86/kernel/mpparse.c:696: warning: format ‘%08lx’ expects type ‘long unsigned int’, but argument 3 has type ‘phys_addr_t’ arch/x86/kernel/mpparse.c: In function ‘update_mp_table’: arch/x86/kernel/mpparse.c:1014: warning: format ‘%lx’ expects type ‘long unsigned int’, but argument 2 has type ‘phys_addr_t’ Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 66ebb823f39..20076445319 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -692,8 +692,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, #endif mpf_found = mpf; - printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", - mpf, virt_to_phys(mpf)); + printk(KERN_INFO "found SMP MP-table at [%p] %llx\n", + mpf, (u64)virt_to_phys(mpf)); if (!reserve) return 1; @@ -1011,7 +1011,7 @@ static int __init update_mp_table(void) if (!smp_check_mpc(mpc, oem, str)) return 0; - printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); + printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf)); printk(KERN_INFO "physptr: %x\n", mpf->physptr); if (mpc_new_phys && mpc->length > mpc_new_length) { -- cgit v1.2.3 From b12bdaf11f935d7be030207e3c77faeaeab8ded3 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Wed, 11 Feb 2009 16:43:58 -0500 Subject: x86: use regparm(3) for passed-in pt_regs pointer Some syscalls need to access the pt_regs structure, either to copy user register state or to modifiy it. This patch adds stubs to load the address of the pt_regs struct into the %eax register, and changes the syscalls to take the pointer as an argument instead of relying on the assumption that the pt_regs structure overlaps the function arguments. Drop the use of regparm(1) due to concern about gcc bugs, and to move in the direction of the eventual removal of regparm(0) for asmlinkage. Signed-off-by: Brian Gerst Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/linkage.h | 7 ------- arch/x86/include/asm/syscalls.h | 25 ++++++++++--------------- arch/x86/kernel/ioport.c | 3 ++- arch/x86/kernel/process_32.c | 27 +++++++++++++++++---------- arch/x86/kernel/signal.c | 21 ++++++++++++++------- arch/x86/kernel/vm86_32.c | 11 ++++++----- 6 files changed, 49 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 2fd5926fb97..5d98d0b68ff 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -17,13 +17,6 @@ */ #define asmregparm __attribute__((regparm(3))) -/* - * For syscalls that need a pointer to the pt_regs struct (ie. fork). - * The regs pointer is passed in %eax as the first argument. The - * remaining function arguments remain on the stack. - */ -#define ptregscall __attribute__((regparm(1))) - /* * Make sure the compiler doesn't do anything stupid with the * arguments on the stack - they are owned by the *caller*, not diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 617295255a1..77bb31a88ba 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -29,26 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *); /* X86_32 only */ #ifdef CONFIG_X86_32 /* kernel/process_32.c */ -ptregscall int sys_fork(struct pt_regs *); -ptregscall int sys_clone(struct pt_regs *, unsigned long, - unsigned long, int __user *, - unsigned long, int __user *); -ptregscall int sys_vfork(struct pt_regs *); -ptregscall int sys_execve(struct pt_regs *, char __user *, - char __user * __user *, - char __user * __user *); +int sys_fork(struct pt_regs *); +int sys_clone(struct pt_regs *); +int sys_vfork(struct pt_regs *); +int sys_execve(struct pt_regs *); /* kernel/signal_32.c */ asmlinkage int sys_sigsuspend(int, int, old_sigset_t); asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, struct old_sigaction __user *); -ptregscall int sys_sigaltstack(struct pt_regs *, const stack_t __user *, - stack_t __user *); -ptregscall unsigned long sys_sigreturn(struct pt_regs *); -ptregscall int sys_rt_sigreturn(struct pt_regs *); +int sys_sigaltstack(struct pt_regs *); +unsigned long sys_sigreturn(struct pt_regs *); +int sys_rt_sigreturn(struct pt_regs *); /* kernel/ioport.c */ -ptregscall long sys_iopl(struct pt_regs *, unsigned int); +long sys_iopl(struct pt_regs *); /* kernel/sys_i386_32.c */ asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, @@ -64,8 +59,8 @@ struct oldold_utsname; asmlinkage int sys_olduname(struct oldold_utsname __user *); /* kernel/vm86_32.c */ -ptregscall int sys_vm86old(struct pt_regs *, struct vm86_struct __user *); -ptregscall int sys_vm86(struct pt_regs *, unsigned long, unsigned long); +int sys_vm86old(struct pt_regs *); +int sys_vm86(struct pt_regs *); #else /* CONFIG_X86_32 */ diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 7ec14864631..e41980a373a 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -131,8 +131,9 @@ static int do_iopl(unsigned int level, struct pt_regs *regs) } #ifdef CONFIG_X86_32 -ptregscall long sys_iopl(struct pt_regs *regs, unsigned int level) +long sys_iopl(struct pt_regs *regs) { + unsigned int level = regs->bx; struct thread_struct *t = ¤t->thread; int rc; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 5a9dcfb01f7..fec79ad85dc 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -603,15 +603,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) return prev_p; } -ptregscall int sys_fork(struct pt_regs *regs) +int sys_fork(struct pt_regs *regs) { return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); } -ptregscall int sys_clone(struct pt_regs *regs, unsigned long clone_flags, - unsigned long newsp, int __user *parent_tidptr, - unsigned long unused, int __user *child_tidptr) +int sys_clone(struct pt_regs *regs) { + unsigned long clone_flags; + unsigned long newsp; + int __user *parent_tidptr, *child_tidptr; + + clone_flags = regs->bx; + newsp = regs->cx; + parent_tidptr = (int __user *)regs->dx; + child_tidptr = (int __user *)regs->di; if (!newsp) newsp = regs->sp; return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); @@ -627,7 +633,7 @@ ptregscall int sys_clone(struct pt_regs *regs, unsigned long clone_flags, * do not have enough call-clobbered registers to hold all * the information you need. */ -ptregscall int sys_vfork(struct pt_regs *regs) +int sys_vfork(struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL); } @@ -635,18 +641,19 @@ ptregscall int sys_vfork(struct pt_regs *regs) /* * sys_execve() executes a new program. */ -ptregscall int sys_execve(struct pt_regs *regs, char __user *u_filename, - char __user * __user *argv, - char __user * __user *envp) +int sys_execve(struct pt_regs *regs) { int error; char *filename; - filename = getname(u_filename); + filename = getname((char __user *) regs->bx); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; - error = do_execve(filename, argv, envp, regs); + error = do_execve(filename, + (char __user * __user *) regs->cx, + (char __user * __user *) regs->dx, + regs); if (error == 0) { /* Make sure we don't return using sysenter.. */ set_thread_flag(TIF_IRET); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index d7a158367e3..ccfb27412f0 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -549,23 +549,27 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, #endif /* CONFIG_X86_32 */ #ifdef CONFIG_X86_32 -ptregscall int -sys_sigaltstack(struct pt_regs *regs, const stack_t __user *uss, - stack_t __user *uoss) +int sys_sigaltstack(struct pt_regs *regs) +{ + const stack_t __user *uss = (const stack_t __user *)regs->bx; + stack_t __user *uoss = (stack_t __user *)regs->cx; + + return do_sigaltstack(uss, uoss, regs->sp); +} #else /* !CONFIG_X86_32 */ asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, struct pt_regs *regs) -#endif /* CONFIG_X86_32 */ { return do_sigaltstack(uss, uoss, regs->sp); } +#endif /* CONFIG_X86_32 */ /* * Do a signal return; undo the signal stack. */ #ifdef CONFIG_X86_32 -ptregscall unsigned long sys_sigreturn(struct pt_regs *regs) +unsigned long sys_sigreturn(struct pt_regs *regs) { struct sigframe __user *frame; unsigned long ax; @@ -629,13 +633,16 @@ badframe: } #ifdef CONFIG_X86_32 -ptregscall int sys_rt_sigreturn(struct pt_regs *regs) +int sys_rt_sigreturn(struct pt_regs *regs) +{ + return do_rt_sigreturn(regs); +} #else /* !CONFIG_X86_32 */ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) -#endif /* CONFIG_X86_32 */ { return do_rt_sigreturn(regs); } +#endif /* CONFIG_X86_32 */ /* * OK, we're invoking a handler: diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 8fa6ba7c923..d7ac84e7fc1 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -197,8 +197,9 @@ out: static int do_vm86_irq_handling(int subfunction, int irqnumber); static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); -ptregscall int sys_vm86old(struct pt_regs *regs, struct vm86_struct __user *v86) +int sys_vm86old(struct pt_regs *regs) { + struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs->bx; struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we @@ -226,7 +227,7 @@ out: } -ptregscall int sys_vm86(struct pt_regs *regs, unsigned long cmd, unsigned long arg) +int sys_vm86(struct pt_regs *regs) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. @@ -238,12 +239,12 @@ ptregscall int sys_vm86(struct pt_regs *regs, unsigned long cmd, unsigned long a struct vm86plus_struct __user *v86; tsk = current; - switch (cmd) { + switch (regs->bx) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: - ret = do_vm86_irq_handling(cmd, (int)arg); + ret = do_vm86_irq_handling(regs->bx, (int)regs->cx); goto out; case VM86_PLUS_INSTALL_CHECK: /* @@ -260,7 +261,7 @@ ptregscall int sys_vm86(struct pt_regs *regs, unsigned long cmd, unsigned long a ret = -EPERM; if (tsk->thread.saved_sp0) goto out; - v86 = (struct vm86plus_struct __user *)arg; + v86 = (struct vm86plus_struct __user *)regs->cx; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, regs32) - sizeof(info.regs)); -- cgit v1.2.3 From b924a28138572f03bc8647c2be8f876d27e2666a Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 11 Feb 2009 12:08:01 -0800 Subject: x86: rename *-defs.h to *-_types.h for consistency The kernel tends to call definition-only headers *_types.h, so rename the x86 page/pgtable headers accordingly. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable-2level-defs.h | 20 -------------------- arch/x86/include/asm/pgtable-2level_types.h | 20 ++++++++++++++++++++ arch/x86/include/asm/pgtable-3level-defs.h | 28 ---------------------------- arch/x86/include/asm/pgtable-3level_types.h | 28 ++++++++++++++++++++++++++++ arch/x86/include/asm/pgtable_32.h | 4 ++-- 5 files changed, 50 insertions(+), 50 deletions(-) delete mode 100644 arch/x86/include/asm/pgtable-2level-defs.h create mode 100644 arch/x86/include/asm/pgtable-2level_types.h delete mode 100644 arch/x86/include/asm/pgtable-3level-defs.h create mode 100644 arch/x86/include/asm/pgtable-3level_types.h (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-2level-defs.h b/arch/x86/include/asm/pgtable-2level-defs.h deleted file mode 100644 index d77db8990ea..00000000000 --- a/arch/x86/include/asm/pgtable-2level-defs.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H -#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H - -#define SHARED_KERNEL_PMD 0 - -/* - * traditional i386 two-level paging structure: - */ - -#define PGDIR_SHIFT 22 -#define PTRS_PER_PGD 1024 - -/* - * the i386 is two-level, so we don't really have any - * PMD directory physically. - */ - -#define PTRS_PER_PTE 1024 - -#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h new file mode 100644 index 00000000000..d77db8990ea --- /dev/null +++ b/arch/x86/include/asm/pgtable-2level_types.h @@ -0,0 +1,20 @@ +#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H +#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H + +#define SHARED_KERNEL_PMD 0 + +/* + * traditional i386 two-level paging structure: + */ + +#define PGDIR_SHIFT 22 +#define PTRS_PER_PGD 1024 + +/* + * the i386 is two-level, so we don't really have any + * PMD directory physically. + */ + +#define PTRS_PER_PTE 1024 + +#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable-3level-defs.h b/arch/x86/include/asm/pgtable-3level-defs.h deleted file mode 100644 index 62561367653..00000000000 --- a/arch/x86/include/asm/pgtable-3level-defs.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H -#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H - -#ifdef CONFIG_PARAVIRT -#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) -#else -#define SHARED_KERNEL_PMD 1 -#endif - -/* - * PGDIR_SHIFT determines what a top-level page table entry can map - */ -#define PGDIR_SHIFT 30 -#define PTRS_PER_PGD 4 - -/* - * PMD_SHIFT determines the size of the area a middle-level - * page table can map - */ -#define PMD_SHIFT 21 -#define PTRS_PER_PMD 512 - -/* - * entries per page directory level - */ -#define PTRS_PER_PTE 512 - -#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h new file mode 100644 index 00000000000..62561367653 --- /dev/null +++ b/arch/x86/include/asm/pgtable-3level_types.h @@ -0,0 +1,28 @@ +#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H +#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H + +#ifdef CONFIG_PARAVIRT +#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) +#else +#define SHARED_KERNEL_PMD 1 +#endif + +/* + * PGDIR_SHIFT determines what a top-level page table entry can map + */ +#define PGDIR_SHIFT 30 +#define PTRS_PER_PGD 4 + +/* + * PMD_SHIFT determines the size of the area a middle-level + * page table can map + */ +#define PMD_SHIFT 21 +#define PTRS_PER_PMD 512 + +/* + * entries per page directory level + */ +#define PTRS_PER_PTE 512 + +#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 1952bb762aa..72d20e2a40f 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -39,11 +39,11 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); * newer 3-level PAE-mode page tables. */ #ifdef CONFIG_X86_PAE -# include +# include # define PMD_SIZE (1UL << PMD_SHIFT) # define PMD_MASK (~(PMD_SIZE - 1)) #else -# include +# include #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) -- cgit v1.2.3 From 8d19c99faf6165ef095138dd595d46b9bbb34055 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sun, 8 Feb 2009 18:46:18 -0800 Subject: Split pgtable.h into pgtable_types.h and pgtable.h Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable.h | 212 +-------------------------------- arch/x86/include/asm/pgtable_types.h | 220 +++++++++++++++++++++++++++++++++++ 2 files changed, 221 insertions(+), 211 deletions(-) create mode 100644 arch/x86/include/asm/pgtable_types.h (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 860f1b635c4..10404e7bf32 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -3,164 +3,7 @@ #include -#define FIRST_USER_ADDRESS 0 - -#define _PAGE_BIT_PRESENT 0 /* is present */ -#define _PAGE_BIT_RW 1 /* writeable */ -#define _PAGE_BIT_USER 2 /* userspace addressable */ -#define _PAGE_BIT_PWT 3 /* page write through */ -#define _PAGE_BIT_PCD 4 /* page cache disabled */ -#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ -#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ -#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ -#define _PAGE_BIT_PAT 7 /* on 4KB pages */ -#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ -#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ -#define _PAGE_BIT_UNUSED3 11 -#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 -#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ - -/* If _PAGE_BIT_PRESENT is clear, we use these: */ -/* - if the user mapped it with PROT_NONE; pte_present gives true */ -#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL -/* - set: nonlinear file mapping, saved PTE; unset:swap */ -#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY - -#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) -#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) -#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) -#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) -#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) -#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) -#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) -#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) -#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) -#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) -#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) -#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) -#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) -#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) -#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) -#define __HAVE_ARCH_PTE_SPECIAL - -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) -#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) -#else -#define _PAGE_NX (_AT(pteval_t, 0)) -#endif - -#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) -#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) - -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_DIRTY) -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ - _PAGE_DIRTY) - -/* Set of bits not changed in pte_modify */ -#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ - _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) - -#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) -#define _PAGE_CACHE_WB (0) -#define _PAGE_CACHE_WC (_PAGE_PWT) -#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) -#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) - -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_NX) - -#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ - _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_ACCESSED) -#define PAGE_COPY PAGE_COPY_NOEXEC -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_ACCESSED) - -#define __PAGE_KERNEL_EXEC \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) -#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) - -#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) -#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) -#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) -#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) -#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) -#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) -#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) -#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) -#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) - -#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP) -#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP) -#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP) -#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP) - -#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) -#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) -#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) -#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC) -#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) -#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS) -#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE) -#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) -#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) -#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) -#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) -#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE) - -#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) -#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) -#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS) -#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC) - -/* xwr */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_EXEC -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - -/* - * early identity mapping pte attrib macros. - */ -#ifdef CONFIG_X86_64 -#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC -#else -/* - * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection - * bits are combined, this will alow user to access the high address mapped - * VDSO in the presence of CONFIG_COMPAT_VDSO - */ -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ -#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ -#endif +#include /* * Macro to mark a page protection value as UC- @@ -172,9 +15,6 @@ #ifndef __ASSEMBLY__ -#define pgprot_writecombine pgprot_writecombine -extern pgprot_t pgprot_writecombine(pgprot_t prot); - /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -316,8 +156,6 @@ static inline pte_t pte_mkspecial(pte_t pte) return pte_set_flags(pte, _PAGE_SPECIAL); } -extern pteval_t __supported_pte_mask; - static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | @@ -376,32 +214,6 @@ static inline int is_new_memtype_allowed(unsigned long flags, return 1; } -#ifndef __ASSEMBLY__ -/* Indicate that x86 has its own track and untrack pfn vma functions */ -#define __HAVE_PFNMAP_TRACKING - -#define __HAVE_PHYS_MEM_ACCESS_PROT -struct file; -pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t *vma_prot); -#endif - -/* Install a pte for a particular vaddr in kernel space. */ -void set_pte_vaddr(unsigned long vaddr, pte_t pte); - -#ifdef CONFIG_X86_32 -extern void native_pagetable_setup_start(pgd_t *base); -extern void native_pagetable_setup_done(pgd_t *base); -#else -static inline void native_pagetable_setup_start(pgd_t *base) {} -static inline void native_pagetable_setup_done(pgd_t *base) {} -#endif - -struct seq_file; -extern void arch_report_meminfo(struct seq_file *m); - #ifdef CONFIG_PARAVIRT #include #else /* !CONFIG_PARAVIRT */ @@ -662,28 +474,6 @@ static inline int pgd_none(pgd_t pgd) #ifndef __ASSEMBLY__ -enum { - PG_LEVEL_NONE, - PG_LEVEL_4K, - PG_LEVEL_2M, - PG_LEVEL_1G, - PG_LEVEL_NUM -}; - -#ifdef CONFIG_PROC_FS -extern void update_page_count(int level, unsigned long pages); -#else -static inline void update_page_count(int level, unsigned long pages) { } -#endif - -/* - * Helper function that returns the kernel pagetable entry controlling - * the virtual address 'address'. NULL means no pagetable entry present. - * NOTE: the return type is pte_t but if the pmd is PSE then we return it - * as a pte too. - */ -extern pte_t *lookup_address(unsigned long address, unsigned int *level); - /* local pte updates need not use xchg for locking */ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) { diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h new file mode 100644 index 00000000000..f80f5a66bb8 --- /dev/null +++ b/arch/x86/include/asm/pgtable_types.h @@ -0,0 +1,220 @@ +#ifndef _ASM_X86_PGTABLE_DEFS_H +#define _ASM_X86_PGTABLE_DEFS_H + +#include + +#define FIRST_USER_ADDRESS 0 + +#define _PAGE_BIT_PRESENT 0 /* is present */ +#define _PAGE_BIT_RW 1 /* writeable */ +#define _PAGE_BIT_USER 2 /* userspace addressable */ +#define _PAGE_BIT_PWT 3 /* page write through */ +#define _PAGE_BIT_PCD 4 /* page cache disabled */ +#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ +#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ +#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ +#define _PAGE_BIT_PAT 7 /* on 4KB pages */ +#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ +#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ +#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ +#define _PAGE_BIT_UNUSED3 11 +#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ +#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 +#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ + +/* If _PAGE_BIT_PRESENT is clear, we use these: */ +/* - if the user mapped it with PROT_NONE; pte_present gives true */ +#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL +/* - set: nonlinear file mapping, saved PTE; unset:swap */ +#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY + +#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) +#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) +#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) +#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) +#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) +#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) +#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) +#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) +#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) +#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) +#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) +#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) +#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) +#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) +#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) +#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) +#define __HAVE_ARCH_PTE_SPECIAL + +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) +#else +#define _PAGE_NX (_AT(pteval_t, 0)) +#endif + +#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) +#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) + +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_DIRTY) +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ + _PAGE_DIRTY) + +/* Set of bits not changed in pte_modify */ +#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ + _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) + +#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) +#define _PAGE_CACHE_WB (0) +#define _PAGE_CACHE_WC (_PAGE_PWT) +#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) +#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) + +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) + +#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ + _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) +#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) +#define PAGE_COPY PAGE_COPY_NOEXEC +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) +#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) + +#define __PAGE_KERNEL_EXEC \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) +#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) + +#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) +#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) +#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) +#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) +#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) +#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) + +#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP) +#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP) +#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP) +#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP) + +#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) +#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) +#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) +#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC) +#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) +#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS) +#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE) +#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) +#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) +#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) +#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) +#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE) + +#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) +#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) +#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS) +#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC) + +/* xwr */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY_EXEC +#define __P101 PAGE_READONLY_EXEC +#define __P110 PAGE_COPY_EXEC +#define __P111 PAGE_COPY_EXEC + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY_EXEC +#define __S101 PAGE_READONLY_EXEC +#define __S110 PAGE_SHARED_EXEC +#define __S111 PAGE_SHARED_EXEC + +/* + * early identity mapping pte attrib macros. + */ +#ifdef CONFIG_X86_64 +#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC +#else +/* + * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection + * bits are combined, this will alow user to access the high address mapped + * VDSO in the presence of CONFIG_COMPAT_VDSO + */ +#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ +#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ +#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ +#endif + +#ifndef __ASSEMBLY__ + +extern pteval_t __supported_pte_mask; + +#define pgprot_writecombine pgprot_writecombine +extern pgprot_t pgprot_writecombine(pgprot_t prot); + +/* Indicate that x86 has its own track and untrack pfn vma functions */ +#define __HAVE_PFNMAP_TRACKING + +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t *vma_prot); + +/* Install a pte for a particular vaddr in kernel space. */ +void set_pte_vaddr(unsigned long vaddr, pte_t pte); + +#ifdef CONFIG_X86_32 +extern void native_pagetable_setup_start(pgd_t *base); +extern void native_pagetable_setup_done(pgd_t *base); +#else +static inline void native_pagetable_setup_start(pgd_t *base) {} +static inline void native_pagetable_setup_done(pgd_t *base) {} +#endif + +struct seq_file; +extern void arch_report_meminfo(struct seq_file *m); + +enum { + PG_LEVEL_NONE, + PG_LEVEL_4K, + PG_LEVEL_2M, + PG_LEVEL_1G, + PG_LEVEL_NUM +}; + +#ifdef CONFIG_PROC_FS +extern void update_page_count(int level, unsigned long pages); +#else +static inline void update_page_count(int level, unsigned long pages) { } +#endif + +/* + * Helper function that returns the kernel pagetable entry controlling + * the virtual address 'address'. NULL means no pagetable entry present. + * NOTE: the return type is pte_t but if the pmd is PSE then we return it + * as a pte too. + */ +extern pte_t *lookup_address(unsigned long address, unsigned int *level); + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_X86_PGTABLE_DEFS_H */ -- cgit v1.2.3 From f402a65f93c7127b2bd93a4b2fe182cd859fb4c1 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sun, 8 Feb 2009 18:49:05 -0800 Subject: x86: Split pgtable_32.h into pgtable_32.h and pgtable_32_types.h Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable_32.h | 42 +----------------------------- arch/x86/include/asm/pgtable_32_types.h | 46 +++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 41 deletions(-) create mode 100644 arch/x86/include/asm/pgtable_32_types.h (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 72d20e2a40f..97612fc7632 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_PGTABLE_32_H #define _ASM_X86_PGTABLE_32_H +#include /* * The Linux memory management assumes a three-level page table setup. On @@ -33,47 +34,6 @@ void paging_init(void); extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); -/* - * The Linux x86 paging architecture is 'compile-time dual-mode', it - * implements both the traditional 2-level x86 page tables and the - * newer 3-level PAE-mode page tables. - */ -#ifdef CONFIG_X86_PAE -# include -# define PMD_SIZE (1UL << PMD_SHIFT) -# define PMD_MASK (~(PMD_SIZE - 1)) -#else -# include -#endif - -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE - 1)) - -/* Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8 * 1024 * 1024) -#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) -#ifdef CONFIG_X86_PAE -#define LAST_PKMAP 512 -#else -#define LAST_PKMAP 1024 -#endif - -#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ - & PMD_MASK) - -#ifdef CONFIG_HIGHMEM -# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) -#else -# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) -#endif - -#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) /* * Define this if things work differently on an i386 and an i486: diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h new file mode 100644 index 00000000000..bd8df3b2fe0 --- /dev/null +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -0,0 +1,46 @@ +#ifndef _ASM_X86_PGTABLE_32_DEFS_H +#define _ASM_X86_PGTABLE_32_DEFS_H + +/* + * The Linux x86 paging architecture is 'compile-time dual-mode', it + * implements both the traditional 2-level x86 page tables and the + * newer 3-level PAE-mode page tables. + */ +#ifdef CONFIG_X86_PAE +# include +# define PMD_SIZE (1UL << PMD_SHIFT) +# define PMD_MASK (~(PMD_SIZE - 1)) +#else +# include +#endif + +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + */ +#define VMALLOC_OFFSET (8 * 1024 * 1024) +#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) +#ifdef CONFIG_X86_PAE +#define LAST_PKMAP 512 +#else +#define LAST_PKMAP 1024 +#endif + +#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ + & PMD_MASK) + +#ifdef CONFIG_HIGHMEM +# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) +#else +# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) +#endif + +#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) + +#endif /* _ASM_X86_PGTABLE_32_DEFS_H */ -- cgit v1.2.3 From fb3551491b20442c0de0d4debd54f40c654bbe98 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sun, 8 Feb 2009 18:50:52 -0800 Subject: x86: Split pgtable_64.h into pgtable_64_types.h and pgtable_64.h Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable_64.h | 48 ++------------------------------- arch/x86/include/asm/pgtable_64_types.h | 46 +++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 46 deletions(-) create mode 100644 arch/x86/include/asm/pgtable_64_types.h (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 1c4e247c51f..6b87bc6d501 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -2,6 +2,8 @@ #define _ASM_X86_PGTABLE_64_H #include +#include + #ifndef __ASSEMBLY__ /* @@ -25,32 +27,6 @@ extern void paging_init(void); #endif /* !__ASSEMBLY__ */ -#define SHARED_KERNEL_PMD 0 - -/* - * PGDIR_SHIFT determines what a top-level page table entry can map - */ -#define PGDIR_SHIFT 39 -#define PTRS_PER_PGD 512 - -/* - * 3rd level page - */ -#define PUD_SHIFT 30 -#define PTRS_PER_PUD 512 - -/* - * PMD_SHIFT determines the size of the area a middle-level - * page table can map - */ -#define PMD_SHIFT 21 -#define PTRS_PER_PMD 512 - -/* - * entries per page directory level - */ -#define PTRS_PER_PTE 512 - #ifndef __ASSEMBLY__ #define pte_ERROR(e) \ @@ -130,26 +106,6 @@ static inline void native_pgd_clear(pgd_t *pgd) native_set_pgd(pgd, native_make_pgd(0)); } -#endif /* !__ASSEMBLY__ */ - -#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE - 1)) -#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) -#define PUD_MASK (~(PUD_SIZE - 1)) -#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE - 1)) - - -#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) -#define VMALLOC_START _AC(0xffffc20000000000, UL) -#define VMALLOC_END _AC(0xffffe1ffffffffff, UL) -#define VMEMMAP_START _AC(0xffffe20000000000, UL) -#define MODULES_VADDR _AC(0xffffffffa0000000, UL) -#define MODULES_END _AC(0xffffffffff000000, UL) -#define MODULES_LEN (MODULES_END - MODULES_VADDR) - -#ifndef __ASSEMBLY__ - /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h new file mode 100644 index 00000000000..ffaf1934068 --- /dev/null +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -0,0 +1,46 @@ +#ifndef _ASM_X86_PGTABLE_64_DEFS_H +#define _ASM_X86_PGTABLE_64_DEFS_H + +#define SHARED_KERNEL_PMD 0 + +/* + * PGDIR_SHIFT determines what a top-level page table entry can map + */ +#define PGDIR_SHIFT 39 +#define PTRS_PER_PGD 512 + +/* + * 3rd level page + */ +#define PUD_SHIFT 30 +#define PTRS_PER_PUD 512 + +/* + * PMD_SHIFT determines the size of the area a middle-level + * page table can map + */ +#define PMD_SHIFT 21 +#define PTRS_PER_PMD 512 + +/* + * entries per page directory level + */ +#define PTRS_PER_PTE 512 + +#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) +#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE - 1)) +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + + +#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) +#define VMALLOC_START _AC(0xffffc20000000000, UL) +#define VMALLOC_END _AC(0xffffe1ffffffffff, UL) +#define VMEMMAP_START _AC(0xffffe20000000000, UL) +#define MODULES_VADDR _AC(0xffffffffa0000000, UL) +#define MODULES_END _AC(0xffffffffff000000, UL) +#define MODULES_LEN (MODULES_END - MODULES_VADDR) + +#endif /* _ASM_X86_PGTABLE_64_DEFS_H */ -- cgit v1.2.3 From 1484096ceb4d5f2b27c0fe53f125ee0903eac9af Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sun, 8 Feb 2009 19:02:47 -0800 Subject: x86: Include pgtable_32|64_types.h in pgtable_types.h Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/pgtable_types.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index f80f5a66bb8..e5d5a8d3577 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -217,4 +217,10 @@ extern pte_t *lookup_address(unsigned long address, unsigned int *level); #endif /* !__ASSEMBLY__ */ +#ifdef CONFIG_X86_32 +# include "pgtable_32_types.h" +#else +# include "pgtable_64_types.h" +#endif + #endif /* _ASM_X86_PGTABLE_DEFS_H */ -- cgit v1.2.3 From 51c78eb3f0eb033f9fb4f2316851df1d9b07b953 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sun, 8 Feb 2009 22:52:14 -0800 Subject: x86: create _types.h counterparts for page*.h Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/page.h | 64 +------------------- arch/x86/include/asm/page_32.h | 89 +-------------------------- arch/x86/include/asm/page_32_types.h | 90 +++++++++++++++++++++++++++ arch/x86/include/asm/page_64.h | 101 +------------------------------ arch/x86/include/asm/page_64.h.rej | 114 +++++++++++++++++++++++++++++++++++ arch/x86/include/asm/page_64_types.h | 102 +++++++++++++++++++++++++++++++ arch/x86/include/asm/page_types.h | 77 +++++++++++++++++++++++ 7 files changed, 389 insertions(+), 248 deletions(-) create mode 100644 arch/x86/include/asm/page_32_types.h create mode 100644 arch/x86/include/asm/page_64.h.rej create mode 100644 arch/x86/include/asm/page_64_types.h create mode 100644 arch/x86/include/asm/page_types.h (limited to 'arch') diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 40226999cbf..3b2d2af951c 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -1,42 +1,9 @@ #ifndef _ASM_X86_PAGE_H #define _ASM_X86_PAGE_H -#include - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - #ifdef __KERNEL__ -#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) -#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) - -/* Cast PAGE_MASK to a signed type so that it is sign-extended if - virtual addresses are 32-bits but physical addresses are larger - (ie, 32-bit PAE). */ -#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) - -/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ -#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) - -/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ -#define PTE_FLAGS_MASK (~PTE_PFN_MASK) - -#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) -#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) - -#define HPAGE_SHIFT PMD_SHIFT -#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE - 1)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) - -#define HUGE_MAX_HSTATE 2 - -#ifndef __ASSEMBLY__ -#include -#endif +#include #ifdef CONFIG_X86_64 #include @@ -44,39 +11,18 @@ #include #endif /* CONFIG_X86_64 */ -#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) - -#define VM_DATA_DEFAULT_FLAGS \ - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - - #ifndef __ASSEMBLY__ -typedef struct { pgdval_t pgd; } pgd_t; -typedef struct { pgprotval_t pgprot; } pgprot_t; - -extern int page_is_ram(unsigned long pagenr); -extern int pagerange_is_ram(unsigned long start, unsigned long end); -extern int devmem_is_allowed(unsigned long pagenr); -extern void map_devmem(unsigned long pfn, unsigned long size, - pgprot_t vma_prot); -extern void unmap_devmem(unsigned long pfn, unsigned long size, - pgprot_t vma_prot); - -extern unsigned long max_low_pfn_mapped; -extern unsigned long max_pfn_mapped; - struct page; static inline void clear_user_page(void *page, unsigned long vaddr, - struct page *pg) + struct page *pg) { clear_page(page); } static inline void copy_user_page(void *to, void *from, unsigned long vaddr, - struct page *topage) + struct page *topage) { copy_page(to, from); } @@ -102,8 +48,6 @@ static inline pgdval_t pgd_flags(pgd_t pgd) #if PAGETABLE_LEVELS >= 3 #if PAGETABLE_LEVELS == 4 -typedef struct { pudval_t pud; } pud_t; - static inline pud_t native_make_pud(pmdval_t val) { return (pud_t) { val }; @@ -127,8 +71,6 @@ static inline pudval_t pud_flags(pud_t pud) return native_pud_val(pud) & PTE_FLAGS_MASK; } -typedef struct { pmdval_t pmd; } pmd_t; - static inline pmd_t native_make_pmd(pmdval_t val) { return (pmd_t) { val }; diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h index bcde0d7b432..b3f0bf79e84 100644 --- a/arch/x86/include/asm/page_32.h +++ b/arch/x86/include/asm/page_32.h @@ -1,82 +1,12 @@ #ifndef _ASM_X86_PAGE_32_H #define _ASM_X86_PAGE_32_H -/* - * This handles the memory map. - * - * A __PAGE_OFFSET of 0xC0000000 means that the kernel has - * a virtual address space of one gigabyte, which limits the - * amount of physical memory you can use to about 950MB. - * - * If you want more physical memory than this then see the CONFIG_HIGHMEM4G - * and CONFIG_HIGHMEM64G options in the kernel configuration. - */ -#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) - -#ifdef CONFIG_4KSTACKS -#define THREAD_ORDER 0 -#else -#define THREAD_ORDER 1 -#endif -#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) - -#define STACKFAULT_STACK 0 -#define DOUBLEFAULT_STACK 1 -#define NMI_STACK 0 -#define DEBUG_STACK 0 -#define MCE_STACK 0 -#define N_EXCEPTION_STACKS 1 - -#ifdef CONFIG_X86_PAE -/* 44=32+12, the limit we can fit into an unsigned long pfn */ -#define __PHYSICAL_MASK_SHIFT 44 -#define __VIRTUAL_MASK_SHIFT 32 -#define PAGETABLE_LEVELS 3 - -#ifndef __ASSEMBLY__ -typedef u64 pteval_t; -typedef u64 pmdval_t; -typedef u64 pudval_t; -typedef u64 pgdval_t; -typedef u64 pgprotval_t; - -typedef union { - struct { - unsigned long pte_low, pte_high; - }; - pteval_t pte; -} pte_t; -#endif /* __ASSEMBLY__ - */ -#else /* !CONFIG_X86_PAE */ -#define __PHYSICAL_MASK_SHIFT 32 -#define __VIRTUAL_MASK_SHIFT 32 -#define PAGETABLE_LEVELS 2 - -#ifndef __ASSEMBLY__ -typedef unsigned long pteval_t; -typedef unsigned long pmdval_t; -typedef unsigned long pudval_t; -typedef unsigned long pgdval_t; -typedef unsigned long pgprotval_t; - -typedef union { - pteval_t pte; - pteval_t pte_low; -} pte_t; - -#endif /* __ASSEMBLY__ */ -#endif /* CONFIG_X86_PAE */ - -#ifndef __ASSEMBLY__ -typedef struct page *pgtable_t; -#endif +#include #ifdef CONFIG_HUGETLB_PAGE #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif -#ifndef __ASSEMBLY__ #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) #ifdef CONFIG_DEBUG_VIRTUAL extern unsigned long __phys_addr(unsigned long); @@ -89,22 +19,7 @@ extern unsigned long __phys_addr(unsigned long); #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_FLATMEM */ -extern int nx_enabled; - -/* - * This much address space is reserved for vmalloc() and iomap() - * as well as fixmap mappings. - */ -extern unsigned int __VMALLOC_RESERVE; -extern int sysctl_legacy_va_layout; - -extern void find_low_pfn_range(void); -extern unsigned long init_memory_mapping(unsigned long start, - unsigned long end); -extern void initmem_init(unsigned long, unsigned long); -extern void free_initmem(void); -extern void setup_bootmem_allocator(void); - +#ifndef __ASSEMBLY__ #ifdef CONFIG_X86_USE_3DNOW #include diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h new file mode 100644 index 00000000000..b7b5402d7ab --- /dev/null +++ b/arch/x86/include/asm/page_32_types.h @@ -0,0 +1,90 @@ +#ifndef _ASM_X86_PAGE_32_DEFS_H +#define _ASM_X86_PAGE_32_DEFS_H + +#include + +/* + * This handles the memory map. + * + * A __PAGE_OFFSET of 0xC0000000 means that the kernel has + * a virtual address space of one gigabyte, which limits the + * amount of physical memory you can use to about 950MB. + * + * If you want more physical memory than this then see the CONFIG_HIGHMEM4G + * and CONFIG_HIGHMEM64G options in the kernel configuration. + */ +#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) + +#ifdef CONFIG_4KSTACKS +#define THREAD_ORDER 0 +#else +#define THREAD_ORDER 1 +#endif +#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) + +#define STACKFAULT_STACK 0 +#define DOUBLEFAULT_STACK 1 +#define NMI_STACK 0 +#define DEBUG_STACK 0 +#define MCE_STACK 0 +#define N_EXCEPTION_STACKS 1 + +#ifdef CONFIG_X86_PAE +/* 44=32+12, the limit we can fit into an unsigned long pfn */ +#define __PHYSICAL_MASK_SHIFT 44 +#define __VIRTUAL_MASK_SHIFT 32 +#define PAGETABLE_LEVELS 3 + +#else /* !CONFIG_X86_PAE */ +#define __PHYSICAL_MASK_SHIFT 32 +#define __VIRTUAL_MASK_SHIFT 32 +#define PAGETABLE_LEVELS 2 +#endif /* CONFIG_X86_PAE */ + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_X86_PAE +typedef u64 pteval_t; +typedef u64 pmdval_t; +typedef u64 pudval_t; +typedef u64 pgdval_t; +typedef u64 pgprotval_t; + +typedef union { + struct { + unsigned long pte_low, pte_high; + }; + pteval_t pte; +} pte_t; +#else /* !CONFIG_X86_PAE */ +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; + +typedef union { + pteval_t pte; + pteval_t pte_low; +} pte_t; +#endif /* CONFIG_X86_PAE */ + +extern int nx_enabled; + +/* + * This much address space is reserved for vmalloc() and iomap() + * as well as fixmap mappings. + */ +extern unsigned int __VMALLOC_RESERVE; +extern int sysctl_legacy_va_layout; + +extern void find_low_pfn_range(void); +extern unsigned long init_memory_mapping(unsigned long start, + unsigned long end); +extern void initmem_init(unsigned long, unsigned long); +extern void free_initmem(void); +extern void setup_bootmem_allocator(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_X86_PAGE_32_DEFS_H */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index e27fdbe5f9e..072694ed81a 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -1,105 +1,6 @@ #ifndef _ASM_X86_PAGE_64_H #define _ASM_X86_PAGE_64_H -#define PAGETABLE_LEVELS 4 - -#define THREAD_ORDER 1 -#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) -#define CURRENT_MASK (~(THREAD_SIZE - 1)) - -#define EXCEPTION_STACK_ORDER 0 -#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) - -#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) -#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) - -#define IRQ_STACK_ORDER 2 -#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) - -#define STACKFAULT_STACK 1 -#define DOUBLEFAULT_STACK 2 -#define NMI_STACK 3 -#define DEBUG_STACK 4 -#define MCE_STACK 5 -#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ - -#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) -#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) - -/* - * Set __PAGE_OFFSET to the most negative possible address + - * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a - * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's - * what Xen requires. - */ -#define __PAGE_OFFSET _AC(0xffff880000000000, UL) - -#define __PHYSICAL_START CONFIG_PHYSICAL_START -#define __KERNEL_ALIGN 0x200000 - -/* - * Make sure kernel is aligned to 2MB address. Catching it at compile - * time is better. Change your config file and compile the kernel - * for a 2MB aligned address (CONFIG_PHYSICAL_START) - */ -#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 -#error "CONFIG_PHYSICAL_START must be a multiple of 2MB" -#endif - -#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) -#define __START_KERNEL_map _AC(0xffffffff80000000, UL) - -/* See Documentation/x86_64/mm.txt for a description of the memory map. */ -#define __PHYSICAL_MASK_SHIFT 46 -#define __VIRTUAL_MASK_SHIFT 48 - -/* - * Kernel image size is limited to 512 MB (see level2_kernel_pgt in - * arch/x86/kernel/head_64.S), and it is mapped here: - */ -#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) -#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) - -#ifndef __ASSEMBLY__ -void clear_page(void *page); -void copy_page(void *to, void *from); - -/* duplicated to the one in bootmem.h */ -extern unsigned long max_pfn; -extern unsigned long phys_base; - -extern unsigned long __phys_addr(unsigned long); -#define __phys_reloc_hide(x) (x) - -/* - * These are used to make use of C type-checking.. - */ -typedef unsigned long pteval_t; -typedef unsigned long pmdval_t; -typedef unsigned long pudval_t; -typedef unsigned long pgdval_t; -typedef unsigned long pgprotval_t; - -typedef struct page *pgtable_t; - -typedef struct { pteval_t pte; } pte_t; - -#define vmemmap ((struct page *)VMEMMAP_START) - -extern unsigned long init_memory_mapping(unsigned long start, - unsigned long end); - -extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); -extern void free_initmem(void); - -extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); -extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); - -#endif /* !__ASSEMBLY__ */ - -#ifdef CONFIG_FLATMEM -#define pfn_valid(pfn) ((pfn) < max_pfn) -#endif - +#include #endif /* _ASM_X86_PAGE_64_H */ diff --git a/arch/x86/include/asm/page_64.h.rej b/arch/x86/include/asm/page_64.h.rej new file mode 100644 index 00000000000..9b1807f1859 --- /dev/null +++ b/arch/x86/include/asm/page_64.h.rej @@ -0,0 +1,114 @@ +*************** +*** 1,105 **** + #ifndef _ASM_X86_PAGE_64_H + #define _ASM_X86_PAGE_64_H + +- #define PAGETABLE_LEVELS 4 +- +- #define THREAD_ORDER 1 +- #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) +- #define CURRENT_MASK (~(THREAD_SIZE - 1)) +- +- #define EXCEPTION_STACK_ORDER 0 +- #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) +- +- #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) +- #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) +- +- #define IRQSTACK_ORDER 2 +- #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) +- +- #define STACKFAULT_STACK 1 +- #define DOUBLEFAULT_STACK 2 +- #define NMI_STACK 3 +- #define DEBUG_STACK 4 +- #define MCE_STACK 5 +- #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +- +- #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) +- #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) +- +- /* +- * Set __PAGE_OFFSET to the most negative possible address + +- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a +- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's +- * what Xen requires. +- */ +- #define __PAGE_OFFSET _AC(0xffff880000000000, UL) +- +- #define __PHYSICAL_START CONFIG_PHYSICAL_START +- #define __KERNEL_ALIGN 0x200000 +- +- /* +- * Make sure kernel is aligned to 2MB address. Catching it at compile +- * time is better. Change your config file and compile the kernel +- * for a 2MB aligned address (CONFIG_PHYSICAL_START) +- */ +- #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 +- #error "CONFIG_PHYSICAL_START must be a multiple of 2MB" +- #endif +- +- #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) +- #define __START_KERNEL_map _AC(0xffffffff80000000, UL) +- +- /* See Documentation/x86_64/mm.txt for a description of the memory map. */ +- #define __PHYSICAL_MASK_SHIFT 46 +- #define __VIRTUAL_MASK_SHIFT 48 +- +- /* +- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in +- * arch/x86/kernel/head_64.S), and it is mapped here: +- */ +- #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) +- #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) +- +- #ifndef __ASSEMBLY__ +- void clear_page(void *page); +- void copy_page(void *to, void *from); +- +- /* duplicated to the one in bootmem.h */ +- extern unsigned long max_pfn; +- extern unsigned long phys_base; +- +- extern unsigned long __phys_addr(unsigned long); +- #define __phys_reloc_hide(x) (x) +- +- /* +- * These are used to make use of C type-checking.. +- */ +- typedef unsigned long pteval_t; +- typedef unsigned long pmdval_t; +- typedef unsigned long pudval_t; +- typedef unsigned long pgdval_t; +- typedef unsigned long pgprotval_t; +- +- typedef struct page *pgtable_t; +- +- typedef struct { pteval_t pte; } pte_t; +- +- #define vmemmap ((struct page *)VMEMMAP_START) +- +- extern unsigned long init_memory_mapping(unsigned long start, +- unsigned long end); +- +- extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); +- extern void free_initmem(void); +- +- extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); +- extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); +- +- #endif /* !__ASSEMBLY__ */ +- +- #ifdef CONFIG_FLATMEM +- #define pfn_valid(pfn) ((pfn) < max_pfn) +- #endif +- + + #endif /* _ASM_X86_PAGE_64_H */ +--- 1,6 ---- + #ifndef _ASM_X86_PAGE_64_H + #define _ASM_X86_PAGE_64_H + ++ #include + + #endif /* _ASM_X86_PAGE_64_H */ diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h new file mode 100644 index 00000000000..d00cd388082 --- /dev/null +++ b/arch/x86/include/asm/page_64_types.h @@ -0,0 +1,102 @@ +#ifndef _ASM_X86_PAGE_64_DEFS_H +#define _ASM_X86_PAGE_64_DEFS_H + +#define PAGETABLE_LEVELS 4 + +#define THREAD_ORDER 1 +#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) +#define CURRENT_MASK (~(THREAD_SIZE - 1)) + +#define EXCEPTION_STACK_ORDER 0 +#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) + +#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) +#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) + +#define IRQ_STACK_ORDER 2 +#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) + +#define STACKFAULT_STACK 1 +#define DOUBLEFAULT_STACK 2 +#define NMI_STACK 3 +#define DEBUG_STACK 4 +#define MCE_STACK 5 +#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ + +#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) + +/* + * Set __PAGE_OFFSET to the most negative possible address + + * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a + * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's + * what Xen requires. + */ +#define __PAGE_OFFSET _AC(0xffff880000000000, UL) + +#define __PHYSICAL_START CONFIG_PHYSICAL_START +#define __KERNEL_ALIGN 0x200000 + +/* + * Make sure kernel is aligned to 2MB address. Catching it at compile + * time is better. Change your config file and compile the kernel + * for a 2MB aligned address (CONFIG_PHYSICAL_START) + */ +#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 +#error "CONFIG_PHYSICAL_START must be a multiple of 2MB" +#endif + +#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) +#define __START_KERNEL_map _AC(0xffffffff80000000, UL) + +/* See Documentation/x86_64/mm.txt for a description of the memory map. */ +#define __PHYSICAL_MASK_SHIFT 46 +#define __VIRTUAL_MASK_SHIFT 48 + +/* + * Kernel image size is limited to 512 MB (see level2_kernel_pgt in + * arch/x86/kernel/head_64.S), and it is mapped here: + */ +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) +#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) + +#ifndef __ASSEMBLY__ +void clear_page(void *page); +void copy_page(void *to, void *from); + +/* duplicated to the one in bootmem.h */ +extern unsigned long max_pfn; +extern unsigned long phys_base; + +extern unsigned long __phys_addr(unsigned long); +#define __phys_reloc_hide(x) (x) + +/* + * These are used to make use of C type-checking.. + */ +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; + +typedef struct { pteval_t pte; } pte_t; + +#define vmemmap ((struct page *)VMEMMAP_START) + +extern unsigned long init_memory_mapping(unsigned long start, + unsigned long end); + +extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); +extern void free_initmem(void); + +extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); +extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); + +#endif /* !__ASSEMBLY__ */ + +#ifdef CONFIG_FLATMEM +#define pfn_valid(pfn) ((pfn) < max_pfn) +#endif + +#endif /* _ASM_X86_PAGE_64_DEFS_H */ diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h new file mode 100644 index 00000000000..65787ad4c59 --- /dev/null +++ b/arch/x86/include/asm/page_types.h @@ -0,0 +1,77 @@ +#ifndef _ASM_X86_PAGE_DEFS_H +#define _ASM_X86_PAGE_DEFS_H + +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) +#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) + +/* Cast PAGE_MASK to a signed type so that it is sign-extended if + virtual addresses are 32-bits but physical addresses are larger + (ie, 32-bit PAE). */ +#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) + +/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ +#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) + +/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ +#define PTE_FLAGS_MASK (~PTE_PFN_MASK) + +#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) + +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HUGE_MAX_HSTATE 2 + +#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) + +#define VM_DATA_DEFAULT_FLAGS \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#ifdef CONFIG_X86_64 +#include +#else +#include +#endif /* CONFIG_X86_64 */ + +#ifndef __ASSEMBLY__ + +#include + +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pgprotval_t pgprot; } pgprot_t; + +#if PAGETABLE_LEVELS > 3 +typedef struct { pudval_t pud; } pud_t; +#endif + +#if PAGETABLE_LEVELS > 2 +typedef struct { pmdval_t pmd; } pmd_t; +#endif + +typedef struct page *pgtable_t; + +extern int page_is_ram(unsigned long pagenr); +extern int pagerange_is_ram(unsigned long start, unsigned long end); +extern int devmem_is_allowed(unsigned long pagenr); +extern void map_devmem(unsigned long pfn, unsigned long size, + pgprot_t vma_prot); +extern void unmap_devmem(unsigned long pfn, unsigned long size, + pgprot_t vma_prot); + +extern unsigned long max_low_pfn_mapped; +extern unsigned long max_pfn_mapped; + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_X86_PAGE_DEFS_H */ -- cgit v1.2.3 From 1dfc07aad5479f1fb832ff6f61a5a9ce822d9e1f Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sun, 8 Feb 2009 23:24:26 -0800 Subject: x86: move 2 and 3 level asm-generic defs into page-defs Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/page.h | 4 ---- arch/x86/include/asm/page_types.h | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 3b2d2af951c..da54f6c48a7 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -58,8 +58,6 @@ static inline pudval_t native_pud_val(pud_t pud) return pud.pud; } #else /* PAGETABLE_LEVELS == 3 */ -#include - static inline pudval_t native_pud_val(pud_t pud) { return native_pgd_val(pud.pgd); @@ -82,8 +80,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) } #else /* PAGETABLE_LEVELS == 2 */ -#include - static inline pmdval_t native_pmd_val(pmd_t pmd) { return native_pgd_val(pmd.pud.pgd); diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 65787ad4c59..92dfd251a65 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -53,10 +53,14 @@ typedef struct { pgprotval_t pgprot; } pgprot_t; #if PAGETABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; +#else +#include #endif #if PAGETABLE_LEVELS > 2 typedef struct { pmdval_t pmd; } pmd_t; +#else +#include #endif typedef struct page *pgtable_t; -- cgit v1.2.3 From e42778de31d78ae262a3b901264eabefb9c3b51b Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sun, 8 Feb 2009 23:42:01 -0800 Subject: x86: move defs around to allow paravirt.h to just include page_types.h Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/page.h | 78 ------------------------------------ arch/x86/include/asm/page_32_types.h | 2 + arch/x86/include/asm/page_types.h | 74 ++++++++++++++++++++++++++++++++++ arch/x86/include/asm/paravirt.h | 2 +- 4 files changed, 77 insertions(+), 79 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index da54f6c48a7..28423609b00 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -31,84 +31,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE -static inline pgd_t native_make_pgd(pgdval_t val) -{ - return (pgd_t) { val }; -} - -static inline pgdval_t native_pgd_val(pgd_t pgd) -{ - return pgd.pgd; -} - -static inline pgdval_t pgd_flags(pgd_t pgd) -{ - return native_pgd_val(pgd) & PTE_FLAGS_MASK; -} - -#if PAGETABLE_LEVELS >= 3 -#if PAGETABLE_LEVELS == 4 -static inline pud_t native_make_pud(pmdval_t val) -{ - return (pud_t) { val }; -} - -static inline pudval_t native_pud_val(pud_t pud) -{ - return pud.pud; -} -#else /* PAGETABLE_LEVELS == 3 */ -static inline pudval_t native_pud_val(pud_t pud) -{ - return native_pgd_val(pud.pgd); -} -#endif /* PAGETABLE_LEVELS == 4 */ - -static inline pudval_t pud_flags(pud_t pud) -{ - return native_pud_val(pud) & PTE_FLAGS_MASK; -} - -static inline pmd_t native_make_pmd(pmdval_t val) -{ - return (pmd_t) { val }; -} - -static inline pmdval_t native_pmd_val(pmd_t pmd) -{ - return pmd.pmd; -} - -#else /* PAGETABLE_LEVELS == 2 */ -static inline pmdval_t native_pmd_val(pmd_t pmd) -{ - return native_pgd_val(pmd.pud.pgd); -} -#endif /* PAGETABLE_LEVELS >= 3 */ - -static inline pmdval_t pmd_flags(pmd_t pmd) -{ - return native_pmd_val(pmd) & PTE_FLAGS_MASK; -} - -static inline pte_t native_make_pte(pteval_t val) -{ - return (pte_t) { .pte = val }; -} - -static inline pteval_t native_pte_val(pte_t pte) -{ - return pte.pte; -} - -static inline pteval_t pte_flags(pte_t pte) -{ - return native_pte_val(pte) & PTE_FLAGS_MASK; -} - -#define pgprot_val(x) ((x).pgprot) -#define __pgprot(x) ((pgprot_t) { (x) } ) - #ifdef CONFIG_PARAVIRT #include #else /* !CONFIG_PARAVIRT */ diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index b7b5402d7ab..83f820a2b10 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -43,6 +43,8 @@ #ifndef __ASSEMBLY__ +#include + #ifdef CONFIG_X86_PAE typedef u64 pteval_t; typedef u64 pmdval_t; diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 92dfd251a65..c41e3e8f227 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -51,18 +51,92 @@ typedef struct { pgdval_t pgd; } pgd_t; typedef struct { pgprotval_t pgprot; } pgprot_t; +static inline pgd_t native_make_pgd(pgdval_t val) +{ + return (pgd_t) { val }; +} + +static inline pgdval_t native_pgd_val(pgd_t pgd) +{ + return pgd.pgd; +} + +static inline pgdval_t pgd_flags(pgd_t pgd) +{ + return native_pgd_val(pgd) & PTE_FLAGS_MASK; +} + #if PAGETABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; + +static inline pud_t native_make_pud(pmdval_t val) +{ + return (pud_t) { val }; +} + +static inline pudval_t native_pud_val(pud_t pud) +{ + return pud.pud; +} #else #include + +static inline pudval_t native_pud_val(pud_t pud) +{ + return native_pgd_val(pud.pgd); +} #endif #if PAGETABLE_LEVELS > 2 typedef struct { pmdval_t pmd; } pmd_t; + +static inline pudval_t pud_flags(pud_t pud) +{ + return native_pud_val(pud) & PTE_FLAGS_MASK; +} + +static inline pmd_t native_make_pmd(pmdval_t val) +{ + return (pmd_t) { val }; +} + +static inline pmdval_t native_pmd_val(pmd_t pmd) +{ + return pmd.pmd; +} #else #include + +static inline pmdval_t native_pmd_val(pmd_t pmd) +{ + return native_pgd_val(pmd.pud.pgd); +} #endif +static inline pmdval_t pmd_flags(pmd_t pmd) +{ + return native_pmd_val(pmd) & PTE_FLAGS_MASK; +} + +static inline pte_t native_make_pte(pteval_t val) +{ + return (pte_t) { .pte = val }; +} + +static inline pteval_t native_pte_val(pte_t pte) +{ + return pte.pte; +} + +static inline pteval_t pte_flags(pte_t pte) +{ + return native_pte_val(pte) & PTE_FLAGS_MASK; +} + +#define pgprot_val(x) ((x).pgprot) +#define __pgprot(x) ((pgprot_t) { (x) } ) + + typedef struct page *pgtable_t; extern int page_is_ram(unsigned long pagenr); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index c85e7475e17..db570f23b22 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -4,7 +4,7 @@ * para-virtualization: those hooks are defined here. */ #ifdef CONFIG_PARAVIRT -#include +#include #include /* Bitmask of what can be clobbered: usually at least eax. */ -- cgit v1.2.3 From e2f5bda94152fa567f6b48126741014123f982b8 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 9 Feb 2009 00:09:52 -0800 Subject: x86: define pud_flags and pud_large properly to allow non-PAE builds --- arch/x86/include/asm/page_types.h | 10 +++++----- arch/x86/include/asm/pgtable.h | 7 ++++++- 2 files changed, 11 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index c41e3e8f227..0733853e7c8 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -90,11 +90,6 @@ static inline pudval_t native_pud_val(pud_t pud) #if PAGETABLE_LEVELS > 2 typedef struct { pmdval_t pmd; } pmd_t; -static inline pudval_t pud_flags(pud_t pud) -{ - return native_pud_val(pud) & PTE_FLAGS_MASK; -} - static inline pmd_t native_make_pmd(pmdval_t val) { return (pmd_t) { val }; @@ -113,6 +108,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) } #endif +static inline pudval_t pud_flags(pud_t pud) +{ + return native_pud_val(pud) & PTE_FLAGS_MASK; +} + static inline pmdval_t pmd_flags(pmd_t pmd) { return native_pmd_val(pmd) & PTE_FLAGS_MASK; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 10404e7bf32..9f508509797 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -398,7 +398,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd) static inline int pud_large(pud_t pud) { - return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == + return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == (_PAGE_PSE | _PAGE_PRESENT); } @@ -406,6 +406,11 @@ static inline int pud_bad(pud_t pud) { return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; } +#else +static inline int pud_large(pud_t pud) +{ + return 0; +} #endif /* PAGETABLE_LEVELS > 2 */ #if PAGETABLE_LEVELS > 3 -- cgit v1.2.3 From 54321d947ae9d6a051b81e3eccaf2d8658aeecc6 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 11 Feb 2009 10:20:05 -0800 Subject: x86: move pte types into pgtable*.h pgtable*.h is intended for definitions relating to actual pagetables and their entries, so move all the definitions for (pte|pmd|pud|pgd)(val)?_t to the appropriate pgtable*.h headers. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/page.h | 22 ------ arch/x86/include/asm/page_32_types.h | 30 -------- arch/x86/include/asm/page_64_types.h | 11 --- arch/x86/include/asm/page_types.h | 97 +------------------------ arch/x86/include/asm/paravirt.h | 2 +- arch/x86/include/asm/pgtable-2level_types.h | 15 ++++ arch/x86/include/asm/pgtable-3level_types.h | 18 +++++ arch/x86/include/asm/pgtable.h | 103 +++++++++++++++----------- arch/x86/include/asm/pgtable_64_types.h | 16 +++++ arch/x86/include/asm/pgtable_types.h | 107 ++++++++++++++++++++++++++-- arch/x86/include/asm/processor.h | 1 + 11 files changed, 215 insertions(+), 207 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 28423609b00..467ce69306b 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -31,28 +31,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE -#ifdef CONFIG_PARAVIRT -#include -#else /* !CONFIG_PARAVIRT */ - -#define pgd_val(x) native_pgd_val(x) -#define __pgd(x) native_make_pgd(x) - -#ifndef __PAGETABLE_PUD_FOLDED -#define pud_val(x) native_pud_val(x) -#define __pud(x) native_make_pud(x) -#endif - -#ifndef __PAGETABLE_PMD_FOLDED -#define pmd_val(x) native_pmd_val(x) -#define __pmd(x) native_make_pmd(x) -#endif - -#define pte_val(x) native_pte_val(x) -#define __pte(x) native_make_pte(x) - -#endif /* CONFIG_PARAVIRT */ - #define __pa(x) __phys_addr((unsigned long)(x)) #define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x)) /* __pa_symbol should be used for C visible symbols. diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index 83f820a2b10..b5486aaf36e 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -43,36 +43,6 @@ #ifndef __ASSEMBLY__ -#include - -#ifdef CONFIG_X86_PAE -typedef u64 pteval_t; -typedef u64 pmdval_t; -typedef u64 pudval_t; -typedef u64 pgdval_t; -typedef u64 pgprotval_t; - -typedef union { - struct { - unsigned long pte_low, pte_high; - }; - pteval_t pte; -} pte_t; -#else /* !CONFIG_X86_PAE */ -typedef unsigned long pteval_t; -typedef unsigned long pmdval_t; -typedef unsigned long pudval_t; -typedef unsigned long pgdval_t; -typedef unsigned long pgprotval_t; - -typedef union { - pteval_t pte; - pteval_t pte_low; -} pte_t; -#endif /* CONFIG_X86_PAE */ - -extern int nx_enabled; - /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index d00cd388082..bc73af3eda9 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -71,17 +71,6 @@ extern unsigned long phys_base; extern unsigned long __phys_addr(unsigned long); #define __phys_reloc_hide(x) (x) -/* - * These are used to make use of C type-checking.. - */ -typedef unsigned long pteval_t; -typedef unsigned long pmdval_t; -typedef unsigned long pudval_t; -typedef unsigned long pgdval_t; -typedef unsigned long pgprotval_t; - -typedef struct { pteval_t pte; } pte_t; - #define vmemmap ((struct page *)VMEMMAP_START) extern unsigned long init_memory_mapping(unsigned long start, diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 0733853e7c8..9f0c9596335 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -46,106 +46,15 @@ #ifndef __ASSEMBLY__ -#include - -typedef struct { pgdval_t pgd; } pgd_t; -typedef struct { pgprotval_t pgprot; } pgprot_t; - -static inline pgd_t native_make_pgd(pgdval_t val) -{ - return (pgd_t) { val }; -} - -static inline pgdval_t native_pgd_val(pgd_t pgd) -{ - return pgd.pgd; -} - -static inline pgdval_t pgd_flags(pgd_t pgd) -{ - return native_pgd_val(pgd) & PTE_FLAGS_MASK; -} - -#if PAGETABLE_LEVELS > 3 -typedef struct { pudval_t pud; } pud_t; - -static inline pud_t native_make_pud(pmdval_t val) -{ - return (pud_t) { val }; -} - -static inline pudval_t native_pud_val(pud_t pud) -{ - return pud.pud; -} -#else -#include - -static inline pudval_t native_pud_val(pud_t pud) -{ - return native_pgd_val(pud.pgd); -} -#endif - -#if PAGETABLE_LEVELS > 2 -typedef struct { pmdval_t pmd; } pmd_t; - -static inline pmd_t native_make_pmd(pmdval_t val) -{ - return (pmd_t) { val }; -} - -static inline pmdval_t native_pmd_val(pmd_t pmd) -{ - return pmd.pmd; -} -#else -#include - -static inline pmdval_t native_pmd_val(pmd_t pmd) -{ - return native_pgd_val(pmd.pud.pgd); -} -#endif - -static inline pudval_t pud_flags(pud_t pud) -{ - return native_pud_val(pud) & PTE_FLAGS_MASK; -} - -static inline pmdval_t pmd_flags(pmd_t pmd) -{ - return native_pmd_val(pmd) & PTE_FLAGS_MASK; -} - -static inline pte_t native_make_pte(pteval_t val) -{ - return (pte_t) { .pte = val }; -} - -static inline pteval_t native_pte_val(pte_t pte) -{ - return pte.pte; -} - -static inline pteval_t pte_flags(pte_t pte) -{ - return native_pte_val(pte) & PTE_FLAGS_MASK; -} - -#define pgprot_val(x) ((x).pgprot) -#define __pgprot(x) ((pgprot_t) { (x) } ) - - -typedef struct page *pgtable_t; +struct pgprot; extern int page_is_ram(unsigned long pagenr); extern int pagerange_is_ram(unsigned long start, unsigned long end); extern int devmem_is_allowed(unsigned long pagenr); extern void map_devmem(unsigned long pfn, unsigned long size, - pgprot_t vma_prot); + struct pgprot vma_prot); extern void unmap_devmem(unsigned long pfn, unsigned long size, - pgprot_t vma_prot); + struct pgprot vma_prot); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index db570f23b22..7ed73ccd74e 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -4,7 +4,7 @@ * para-virtualization: those hooks are defined here. */ #ifdef CONFIG_PARAVIRT -#include +#include #include /* Bitmask of what can be clobbered: usually at least eax. */ diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h index d77db8990ea..09ae67efceb 100644 --- a/arch/x86/include/asm/pgtable-2level_types.h +++ b/arch/x86/include/asm/pgtable-2level_types.h @@ -1,6 +1,21 @@ #ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H #define _ASM_X86_PGTABLE_2LEVEL_DEFS_H +#ifndef __ASSEMBLY__ +#include + +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; + +typedef union { + pteval_t pte; + pteval_t pte_low; +} pte_t; +#endif /* !__ASSEMBLY__ */ + #define SHARED_KERNEL_PMD 0 /* diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h index 62561367653..bcc89625ebe 100644 --- a/arch/x86/include/asm/pgtable-3level_types.h +++ b/arch/x86/include/asm/pgtable-3level_types.h @@ -1,6 +1,23 @@ #ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H #define _ASM_X86_PGTABLE_3LEVEL_DEFS_H +#ifndef __ASSEMBLY__ +#include + +typedef u64 pteval_t; +typedef u64 pmdval_t; +typedef u64 pudval_t; +typedef u64 pgdval_t; +typedef u64 pgprotval_t; + +typedef union { + struct { + unsigned long pte_low, pte_high; + }; + pteval_t pte; +} pte_t; +#endif /* !__ASSEMBLY__ */ + #ifdef CONFIG_PARAVIRT #define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) #else @@ -25,4 +42,5 @@ */ #define PTRS_PER_PTE 512 + #endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 9f508509797..b0d1066ab6a 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -25,6 +25,66 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; extern spinlock_t pgd_lock; extern struct list_head pgd_list; +#ifdef CONFIG_PARAVIRT +#include +#else /* !CONFIG_PARAVIRT */ +#define set_pte(ptep, pte) native_set_pte(ptep, pte) +#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) + +#define set_pte_present(mm, addr, ptep, pte) \ + native_set_pte_present(mm, addr, ptep, pte) +#define set_pte_atomic(ptep, pte) \ + native_set_pte_atomic(ptep, pte) + +#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) + +#ifndef __PAGETABLE_PUD_FOLDED +#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) +#define pgd_clear(pgd) native_pgd_clear(pgd) +#endif + +#ifndef set_pud +# define set_pud(pudp, pud) native_set_pud(pudp, pud) +#endif + +#ifndef __PAGETABLE_PMD_FOLDED +#define pud_clear(pud) native_pud_clear(pud) +#endif + +#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) +#define pmd_clear(pmd) native_pmd_clear(pmd) + +#define pte_update(mm, addr, ptep) do { } while (0) +#define pte_update_defer(mm, addr, ptep) do { } while (0) + +static inline void __init paravirt_pagetable_setup_start(pgd_t *base) +{ + native_pagetable_setup_start(base); +} + +static inline void __init paravirt_pagetable_setup_done(pgd_t *base) +{ + native_pagetable_setup_done(base); +} + +#define pgd_val(x) native_pgd_val(x) +#define __pgd(x) native_make_pgd(x) + +#ifndef __PAGETABLE_PUD_FOLDED +#define pud_val(x) native_pud_val(x) +#define __pud(x) native_make_pud(x) +#endif + +#ifndef __PAGETABLE_PMD_FOLDED +#define pmd_val(x) native_pmd_val(x) +#define __pmd(x) native_make_pmd(x) +#endif + +#define pte_val(x) native_pte_val(x) +#define __pte(x) native_make_pte(x) + +#endif /* CONFIG_PARAVIRT */ + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -214,49 +274,6 @@ static inline int is_new_memtype_allowed(unsigned long flags, return 1; } -#ifdef CONFIG_PARAVIRT -#include -#else /* !CONFIG_PARAVIRT */ -#define set_pte(ptep, pte) native_set_pte(ptep, pte) -#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) - -#define set_pte_present(mm, addr, ptep, pte) \ - native_set_pte_present(mm, addr, ptep, pte) -#define set_pte_atomic(ptep, pte) \ - native_set_pte_atomic(ptep, pte) - -#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) - -#ifndef __PAGETABLE_PUD_FOLDED -#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) -#define pgd_clear(pgd) native_pgd_clear(pgd) -#endif - -#ifndef set_pud -# define set_pud(pudp, pud) native_set_pud(pudp, pud) -#endif - -#ifndef __PAGETABLE_PMD_FOLDED -#define pud_clear(pud) native_pud_clear(pud) -#endif - -#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) -#define pmd_clear(pmd) native_pmd_clear(pmd) - -#define pte_update(mm, addr, ptep) do { } while (0) -#define pte_update_defer(mm, addr, ptep) do { } while (0) - -static inline void __init paravirt_pagetable_setup_start(pgd_t *base) -{ - native_pagetable_setup_start(base); -} - -static inline void __init paravirt_pagetable_setup_done(pgd_t *base) -{ - native_pagetable_setup_done(base); -} -#endif /* CONFIG_PARAVIRT */ - #endif /* __ASSEMBLY__ */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index ffaf1934068..2f59135c6f2 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -1,6 +1,22 @@ #ifndef _ASM_X86_PGTABLE_64_DEFS_H #define _ASM_X86_PGTABLE_64_DEFS_H +#ifndef __ASSEMBLY__ +#include + +/* + * These are used to make use of C type-checking.. + */ +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; + +typedef struct { pteval_t pte; } pte_t; + +#endif /* !__ASSEMBLY__ */ + #define SHARED_KERNEL_PMD 0 /* diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index e5d5a8d3577..a7452f10930 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -162,9 +162,110 @@ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ #endif +#ifdef CONFIG_X86_32 +# include "pgtable_32_types.h" +#else +# include "pgtable_64_types.h" +#endif + #ifndef __ASSEMBLY__ +#include + +typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; + +typedef struct { pgdval_t pgd; } pgd_t; + +static inline pgd_t native_make_pgd(pgdval_t val) +{ + return (pgd_t) { val }; +} + +static inline pgdval_t native_pgd_val(pgd_t pgd) +{ + return pgd.pgd; +} + +static inline pgdval_t pgd_flags(pgd_t pgd) +{ + return native_pgd_val(pgd) & PTE_FLAGS_MASK; +} + +#if PAGETABLE_LEVELS > 3 +typedef struct { pudval_t pud; } pud_t; + +static inline pud_t native_make_pud(pmdval_t val) +{ + return (pud_t) { val }; +} + +static inline pudval_t native_pud_val(pud_t pud) +{ + return pud.pud; +} +#else +#include + +static inline pudval_t native_pud_val(pud_t pud) +{ + return native_pgd_val(pud.pgd); +} +#endif + +#if PAGETABLE_LEVELS > 2 +typedef struct { pmdval_t pmd; } pmd_t; + +static inline pmd_t native_make_pmd(pmdval_t val) +{ + return (pmd_t) { val }; +} + +static inline pmdval_t native_pmd_val(pmd_t pmd) +{ + return pmd.pmd; +} +#else +#include + +static inline pmdval_t native_pmd_val(pmd_t pmd) +{ + return native_pgd_val(pmd.pud.pgd); +} +#endif + +static inline pudval_t pud_flags(pud_t pud) +{ + return native_pud_val(pud) & PTE_FLAGS_MASK; +} + +static inline pmdval_t pmd_flags(pmd_t pmd) +{ + return native_pmd_val(pmd) & PTE_FLAGS_MASK; +} + +static inline pte_t native_make_pte(pteval_t val) +{ + return (pte_t) { .pte = val }; +} + +static inline pteval_t native_pte_val(pte_t pte) +{ + return pte.pte; +} + +static inline pteval_t pte_flags(pte_t pte) +{ + return native_pte_val(pte) & PTE_FLAGS_MASK; +} + +#define pgprot_val(x) ((x).pgprot) +#define __pgprot(x) ((pgprot_t) { (x) } ) + + +typedef struct page *pgtable_t; + extern pteval_t __supported_pte_mask; +extern int nx_enabled; #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); @@ -217,10 +318,4 @@ extern pte_t *lookup_address(unsigned long address, unsigned int *level); #endif /* !__ASSEMBLY__ */ -#ifdef CONFIG_X86_32 -# include "pgtable_32_types.h" -#else -# include "pgtable_64_types.h" -#endif - #endif /* _ASM_X86_PGTABLE_DEFS_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 656d02ea509..96302880774 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -16,6 +16,7 @@ struct mm_struct; #include #include #include +#include #include #include #include -- cgit v1.2.3 From 744525092727827a9cf0044074db3e22dcf354fd Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 11 Feb 2009 16:31:40 -0800 Subject: x86: merge sys_rt_sigreturn between 32 and 64 bits Impact: cleanup With the recent changes in the 32-bit code to make system calls which use struct pt_regs take a pointer, sys_rt_sigreturn() have become identical between 32 and 64 bits, and both are empty wrappers around do_rt_sigreturn(). Remove both wrappers and rename both to sys_rt_sigreturn(). Cc: Brian Gerst Cc: Tejun Heo Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/syscalls.h | 2 +- arch/x86/kernel/signal.c | 14 +------------- 2 files changed, 2 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 77bb31a88ba..68b1be10cfa 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, struct old_sigaction __user *); int sys_sigaltstack(struct pt_regs *); unsigned long sys_sigreturn(struct pt_regs *); -int sys_rt_sigreturn(struct pt_regs *); +long sys_rt_sigreturn(struct pt_regs *); /* kernel/ioport.c */ long sys_iopl(struct pt_regs *); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index ccfb27412f0..7cdcd16885e 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -601,7 +601,7 @@ badframe: } #endif /* CONFIG_X86_32 */ -static long do_rt_sigreturn(struct pt_regs *regs) +long sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; unsigned long ax; @@ -632,18 +632,6 @@ badframe: return 0; } -#ifdef CONFIG_X86_32 -int sys_rt_sigreturn(struct pt_regs *regs) -{ - return do_rt_sigreturn(regs); -} -#else /* !CONFIG_X86_32 */ -asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) -{ - return do_rt_sigreturn(regs); -} -#endif /* CONFIG_X86_32 */ - /* * OK, we're invoking a handler: */ -- cgit v1.2.3 From 58105ef1857112a186696c9b8957020090226a28 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 31 Jan 2009 12:32:26 -0800 Subject: x86: UV: fix header struct usage Impact: Fixes warning Fix uv.h struct usage: arch/x86/include/asm/uv/uv.h:16: warning: 'struct mm_struct' declared inside parameter list arch/x86/include/asm/uv/uv.h:16: warning: its scope is only this definition or declaration, which is probably not what you want Signed-off-by: Randy Dunlap Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/uv/uv.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index 8ac1d7e312f..8242bf96581 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h @@ -3,6 +3,9 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; +struct cpumask; +struct mm_struct; + #ifdef CONFIG_X86_UV extern enum uv_system_type get_uv_system_type(void); -- cgit v1.2.3 From bc8bd002b8371ece81c9adbdce49d7e68f0a0cbc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Feb 2009 12:43:52 +0100 Subject: x86, defconfig: update the 32-bit defconfig Signed-off-by: Ingo Molnar --- arch/x86/configs/i386_defconfig | 406 ++++++++++++++++++++++++++++++++-------- 1 file changed, 324 insertions(+), 82 deletions(-) (limited to 'arch') diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index edba00d98ac..c3186ccc936 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -1,14 +1,13 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.27-rc5 -# Wed Sep 3 17:23:09 2008 +# Linux kernel version: 2.6.29-rc4 +# Thu Feb 12 12:42:50 2009 # # CONFIG_64BIT is not set CONFIG_X86_32=y # CONFIG_X86_64 is not set CONFIG_X86=y CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" -# CONFIG_GENERIC_LOCKBREAK is not set CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CMOS_UPDATE=y CONFIG_CLOCKSOURCE_WATCHDOG=y @@ -24,16 +23,14 @@ CONFIG_GENERIC_ISA_DMA=y CONFIG_GENERIC_IOMAP=y CONFIG_GENERIC_BUG=y CONFIG_GENERIC_HWEIGHT=y -# CONFIG_GENERIC_GPIO is not set CONFIG_ARCH_MAY_HAVE_PC_FDC=y # CONFIG_RWSEM_GENERIC_SPINLOCK is not set CONFIG_RWSEM_XCHGADD_ALGORITHM=y -# CONFIG_ARCH_HAS_ILOG2_U32 is not set -# CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y CONFIG_GENERIC_CALIBRATE_DELAY=y # CONFIG_GENERIC_TIME_VSYSCALL is not set CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_DEFAULT_IDLE=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y CONFIG_HAVE_SETUP_PER_CPU_AREA=y # CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set @@ -42,12 +39,12 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y # CONFIG_ZONE_DMA32 is not set CONFIG_ARCH_POPULATES_NODE_MAP=y # CONFIG_AUDIT_ARCH is not set -CONFIG_ARCH_SUPPORTS_AOUT=y CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_PENDING_IRQ=y CONFIG_X86_SMP=y +CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_X86_32_SMP=y CONFIG_X86_HT=y CONFIG_X86_BIOS_REBOOT=y @@ -76,30 +73,44 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_AUDIT=y CONFIG_AUDITSYSCALL=y CONFIG_AUDIT_TREE=y + +# +# RCU Subsystem +# +# CONFIG_CLASSIC_RCU is not set +CONFIG_TREE_RCU=y +# CONFIG_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=18 -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_NS=y -# CONFIG_CGROUP_DEVICE is not set -CONFIG_CPUSETS=y CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y CONFIG_GROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y # CONFIG_RT_GROUP_SCHED is not set # CONFIG_USER_SCHED is not set CONFIG_CGROUP_SCHED=y +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_NS=y +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_CPUACCT=y CONFIG_RESOURCE_COUNTERS=y # CONFIG_CGROUP_MEM_RES_CTLR is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set -CONFIG_PROC_PID_CPUSET=y CONFIG_RELAY=y CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y CONFIG_USER_NS=y CONFIG_PID_NS=y +CONFIG_NET_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -124,12 +135,15 @@ CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y CONFIG_SLUB_DEBUG=y # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y CONFIG_MARKERS=y # CONFIG_OPROFILE is not set CONFIG_HAVE_OPROFILE=y @@ -139,15 +153,10 @@ CONFIG_KRETPROBES=y CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y -# CONFIG_HAVE_ARCH_TRACEHOOK is not set -# CONFIG_HAVE_DMA_ATTRS is not set -CONFIG_USE_GENERIC_SMP_HELPERS=y -# CONFIG_HAVE_CLK is not set -CONFIG_PROC_PAGE_MONITOR=y +CONFIG_HAVE_ARCH_TRACEHOOK=y CONFIG_HAVE_GENERIC_DMA_COHERENT=y CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -155,12 +164,10 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y # CONFIG_LBD is not set CONFIG_BLK_DEV_IO_TRACE=y -# CONFIG_LSF is not set CONFIG_BLK_DEV_BSG=y # CONFIG_BLK_DEV_INTEGRITY is not set @@ -176,7 +183,7 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_CLASSIC_RCU=y +CONFIG_FREEZER=y # # Processor type and features @@ -186,6 +193,7 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_SMP=y +CONFIG_SPARSE_IRQ=y CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y CONFIG_X86_PC=y @@ -194,7 +202,7 @@ CONFIG_X86_PC=y # CONFIG_X86_GENERICARCH is not set # CONFIG_X86_VSMP is not set # CONFIG_X86_RDC321X is not set -CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y # CONFIG_PARAVIRT_GUEST is not set # CONFIG_MEMTEST is not set # CONFIG_M386 is not set @@ -238,10 +246,19 @@ CONFIG_X86_TSC=y CONFIG_X86_CMOV=y CONFIG_X86_MINIMUM_CPU_FAMILY=4 CONFIG_X86_DEBUGCTLMSR=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_CYRIX_32=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR_32=y +CONFIG_CPU_SUP_TRANSMETA_32=y +CONFIG_CPU_SUP_UMC_32=y +CONFIG_X86_DS=y +CONFIG_X86_PTRACE_BTS=y CONFIG_HPET_TIMER=y CONFIG_HPET_EMULATE_RTC=y CONFIG_DMI=y # CONFIG_IOMMU_HELPER is not set +# CONFIG_IOMMU_API is not set CONFIG_NR_CPUS=64 CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y @@ -250,12 +267,15 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y # CONFIG_X86_MCE is not set CONFIG_VM86=y # CONFIG_TOSHIBA is not set # CONFIG_I8K is not set CONFIG_X86_REBOOTFIXUPS=y CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y CONFIG_MICROCODE_OLD_INTERFACE=y CONFIG_X86_MSR=y CONFIG_X86_CPUID=y @@ -264,6 +284,7 @@ CONFIG_HIGHMEM4G=y # CONFIG_HIGHMEM64G is not set CONFIG_PAGE_OFFSET=0xC0000000 CONFIG_HIGHMEM=y +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SELECT_MEMORY_MODEL=y @@ -274,14 +295,17 @@ CONFIG_FLATMEM_MANUAL=y CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_SPARSEMEM_STATIC=y -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_RESOURCES_64BIT=y +# CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y +CONFIG_UNEVICTABLE_LRU=y CONFIG_HIGHPTE=y +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_RESERVE_LOW_64K=y # CONFIG_MATH_EMULATION is not set CONFIG_MTRR=y # CONFIG_MTRR_SANITIZER is not set @@ -302,10 +326,11 @@ CONFIG_PHYSICAL_START=0x1000000 CONFIG_PHYSICAL_ALIGN=0x200000 CONFIG_HOTPLUG_CPU=y # CONFIG_COMPAT_VDSO is not set +# CONFIG_CMDLINE_BOOL is not set CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y # -# Power management options +# Power management and ACPI options # CONFIG_PM=y CONFIG_PM_DEBUG=y @@ -331,19 +356,13 @@ CONFIG_ACPI_BATTERY=y CONFIG_ACPI_BUTTON=y CONFIG_ACPI_FAN=y CONFIG_ACPI_DOCK=y -# CONFIG_ACPI_BAY is not set CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_THERMAL=y -# CONFIG_ACPI_WMI is not set -# CONFIG_ACPI_ASUS is not set -# CONFIG_ACPI_TOSHIBA is not set # CONFIG_ACPI_CUSTOM_DSDT is not set CONFIG_ACPI_BLACKLIST_YEAR=0 # CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_EC=y # CONFIG_ACPI_PCI_SLOT is not set -CONFIG_ACPI_POWER=y CONFIG_ACPI_SYSTEM=y CONFIG_X86_PM_TIMER=y CONFIG_ACPI_CONTAINER=y @@ -388,7 +407,6 @@ CONFIG_X86_ACPI_CPUFREQ=y # # shared options # -# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set # CONFIG_X86_SPEEDSTEP_LIB is not set CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y @@ -415,6 +433,7 @@ CONFIG_ARCH_SUPPORTS_MSI=y CONFIG_PCI_MSI=y # CONFIG_PCI_LEGACY is not set # CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_STUB is not set CONFIG_HT_IRQ=y CONFIG_ISA_DMA_API=y # CONFIG_ISA is not set @@ -452,13 +471,17 @@ CONFIG_HOTPLUG_PCI=y # Executable file formats / Emulations # CONFIG_BINFMT_ELF=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_HAVE_AOUT=y # CONFIG_BINFMT_AOUT is not set CONFIG_BINFMT_MISC=y +CONFIG_HAVE_ATOMIC_IOMAP=y CONFIG_NET=y # # Networking options # +CONFIG_COMPAT_NET_DEV_OPS=y CONFIG_PACKET=y CONFIG_PACKET_MMAP=y CONFIG_UNIX=y @@ -519,7 +542,6 @@ CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y -# CONFIG_IP_VS is not set CONFIG_IPV6=y # CONFIG_IPV6_PRIVACY is not set # CONFIG_IPV6_ROUTER_PREF is not set @@ -557,19 +579,21 @@ CONFIG_NF_CONNTRACK_IRC=y CONFIG_NF_CONNTRACK_SIP=y CONFIG_NF_CT_NETLINK=y CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y CONFIG_NETFILTER_XT_TARGET_MARK=y CONFIG_NETFILTER_XT_TARGET_NFLOG=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y CONFIG_NETFILTER_XT_TARGET_TCPMSS=y CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y CONFIG_NETFILTER_XT_MATCH_MARK=y CONFIG_NETFILTER_XT_MATCH_POLICY=y CONFIG_NETFILTER_XT_MATCH_STATE=y +# CONFIG_IP_VS is not set # # IP: Netfilter Configuration # +CONFIG_NF_DEFRAG_IPV4=y CONFIG_NF_CONNTRACK_IPV4=y CONFIG_NF_CONNTRACK_PROC_COMPAT=y CONFIG_IP_NF_IPTABLES=y @@ -595,8 +619,8 @@ CONFIG_IP_NF_MANGLE=y CONFIG_NF_CONNTRACK_IPV6=y CONFIG_IP6_NF_IPTABLES=y CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_REJECT=y CONFIG_IP6_NF_MANGLE=y # CONFIG_IP_DCCP is not set @@ -604,6 +628,7 @@ CONFIG_IP6_NF_MANGLE=y # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set CONFIG_LLC=y @@ -623,6 +648,7 @@ CONFIG_NET_SCHED=y # CONFIG_NET_SCH_HTB is not set # CONFIG_NET_SCH_HFSC is not set # CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set # CONFIG_NET_SCH_RED is not set # CONFIG_NET_SCH_SFQ is not set # CONFIG_NET_SCH_TEQL is not set @@ -630,6 +656,7 @@ CONFIG_NET_SCHED=y # CONFIG_NET_SCH_GRED is not set # CONFIG_NET_SCH_DSMARK is not set # CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set # CONFIG_NET_SCH_INGRESS is not set # @@ -644,6 +671,7 @@ CONFIG_NET_CLS=y # CONFIG_NET_CLS_RSVP is not set # CONFIG_NET_CLS_RSVP6 is not set # CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set CONFIG_NET_EMATCH=y CONFIG_NET_EMATCH_STACK=32 # CONFIG_NET_EMATCH_CMP is not set @@ -659,7 +687,9 @@ CONFIG_NET_CLS_ACT=y # CONFIG_NET_ACT_NAT is not set # CONFIG_NET_ACT_PEDIT is not set # CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set # # Network testing @@ -676,29 +706,33 @@ CONFIG_HAMRADIO=y # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set CONFIG_FIB_RULES=y - -# -# Wireless -# +CONFIG_WIRELESS=y CONFIG_CFG80211=y +# CONFIG_CFG80211_REG_DEBUG is not set CONFIG_NL80211=y +CONFIG_WIRELESS_OLD_REGULATORY=y CONFIG_WIRELESS_EXT=y CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set CONFIG_MAC80211=y # # Rate control algorithm selection # -CONFIG_MAC80211_RC_PID=y -CONFIG_MAC80211_RC_DEFAULT_PID=y -CONFIG_MAC80211_RC_DEFAULT="pid" +CONFIG_MAC80211_RC_MINSTREL=y +# CONFIG_MAC80211_RC_DEFAULT_PID is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel" # CONFIG_MAC80211_MESH is not set CONFIG_MAC80211_LEDS=y # CONFIG_MAC80211_DEBUGFS is not set # CONFIG_MAC80211_DEBUG_MENU is not set -# CONFIG_IEEE80211 is not set -# CONFIG_RFKILL is not set +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_INPUT is not set +CONFIG_RFKILL_LEDS=y # CONFIG_NET_9P is not set # @@ -722,7 +756,7 @@ CONFIG_PROC_EVENTS=y # CONFIG_MTD is not set # CONFIG_PARPORT is not set CONFIG_PNP=y -# CONFIG_PNP_DEBUG is not set +CONFIG_PNP_DEBUG_MESSAGES=y # # Protocols @@ -750,20 +784,19 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_MISC_DEVICES=y # CONFIG_IBM_ASM is not set # CONFIG_PHANTOM is not set -# CONFIG_EEPROM_93CX6 is not set # CONFIG_SGI_IOC4 is not set # CONFIG_TIFM_CORE is not set -# CONFIG_ACER_WMI is not set -# CONFIG_ASUS_LAPTOP is not set -# CONFIG_FUJITSU_LAPTOP is not set -# CONFIG_TC1100_WMI is not set -# CONFIG_MSI_LAPTOP is not set -# CONFIG_COMPAL_LAPTOP is not set -# CONFIG_SONY_LAPTOP is not set -# CONFIG_THINKPAD_ACPI is not set -# CONFIG_INTEL_MENLOW is not set +# CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_HP_ILO is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -875,6 +908,7 @@ CONFIG_PATA_OLDPIIX=y CONFIG_PATA_SCH=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y # CONFIG_MD_LINEAR is not set # CONFIG_MD_RAID0 is not set # CONFIG_MD_RAID1 is not set @@ -930,6 +964,9 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y @@ -953,6 +990,9 @@ CONFIG_NET_TULIP=y # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set # CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set CONFIG_NET_PCI=y # CONFIG_PCNET32 is not set # CONFIG_AMD8111_ETH is not set @@ -960,7 +1000,6 @@ CONFIG_NET_PCI=y # CONFIG_B44 is not set CONFIG_FORCEDETH=y # CONFIG_FORCEDETH_NAPI is not set -# CONFIG_EEPRO100 is not set CONFIG_E100=y # CONFIG_FEALNX is not set # CONFIG_NATSEMI is not set @@ -974,15 +1013,16 @@ CONFIG_8139TOO=y # CONFIG_R6040 is not set # CONFIG_SIS900 is not set # CONFIG_EPIC100 is not set +# CONFIG_SMSC9420 is not set # CONFIG_SUNDANCE is not set # CONFIG_TLAN is not set # CONFIG_VIA_RHINE is not set # CONFIG_SC92031 is not set +# CONFIG_ATL2 is not set CONFIG_NETDEV_1000=y # CONFIG_ACENIC is not set # CONFIG_DL2K is not set CONFIG_E1000=y -# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set CONFIG_E1000E=y # CONFIG_IP1000 is not set # CONFIG_IGB is not set @@ -1000,18 +1040,23 @@ CONFIG_BNX2=y # CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set # CONFIG_ATL1E is not set +# CONFIG_JME is not set CONFIG_NETDEV_10000=y # CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_CHELSIO_T3 is not set +# CONFIG_ENIC is not set # CONFIG_IXGBE is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set # CONFIG_MYRI10GE is not set # CONFIG_NETXEN_NIC is not set # CONFIG_NIU is not set +# CONFIG_MLX4_EN is not set # CONFIG_MLX4_CORE is not set # CONFIG_TEHUTI is not set # CONFIG_BNX2X is not set +# CONFIG_QLGE is not set # CONFIG_SFC is not set CONFIG_TR=y # CONFIG_IBMOL is not set @@ -1025,9 +1070,8 @@ CONFIG_TR=y # CONFIG_WLAN_PRE80211 is not set CONFIG_WLAN_80211=y # CONFIG_PCMCIA_RAYCS is not set -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set # CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set # CONFIG_AIRO is not set # CONFIG_HERMES is not set # CONFIG_ATMEL is not set @@ -1044,6 +1088,8 @@ CONFIG_WLAN_80211=y CONFIG_ATH5K=y # CONFIG_ATH5K_DEBUG is not set # CONFIG_ATH9K is not set +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set # CONFIG_IWLCORE is not set # CONFIG_IWLWIFI_LEDS is not set # CONFIG_IWLAGN is not set @@ -1054,6 +1100,10 @@ CONFIG_ATH5K=y # CONFIG_ZD1211RW is not set # CONFIG_RT2X00 is not set +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# + # # USB Network Adapters # @@ -1062,6 +1112,7 @@ CONFIG_ATH5K=y # CONFIG_USB_PEGASUS is not set # CONFIG_USB_RTL8150 is not set # CONFIG_USB_USBNET is not set +# CONFIG_USB_HSO is not set CONFIG_NET_PCMCIA=y # CONFIG_PCMCIA_3C589 is not set # CONFIG_PCMCIA_3C574 is not set @@ -1123,6 +1174,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y CONFIG_MOUSE_PS2_SYNAPTICS=y CONFIG_MOUSE_PS2_LIFEBOOK=y CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set # CONFIG_MOUSE_PS2_TOUCHKIT is not set # CONFIG_MOUSE_SERIAL is not set # CONFIG_MOUSE_APPLETOUCH is not set @@ -1160,15 +1212,16 @@ CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set # CONFIG_TOUCHSCREEN_MTOUCH is not set # CONFIG_TOUCHSCREEN_INEXIO is not set # CONFIG_TOUCHSCREEN_MK712 is not set # CONFIG_TOUCHSCREEN_PENMOUNT is not set # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set # CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_UCB1400 is not set # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_PCSPKR is not set # CONFIG_INPUT_APANEL is not set @@ -1179,6 +1232,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_KEYSPAN_REMOTE is not set # CONFIG_INPUT_POWERMATE is not set # CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set # CONFIG_INPUT_UINPUT is not set # @@ -1245,6 +1299,7 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y @@ -1279,6 +1334,7 @@ CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y # CONFIG_I2C_CHARDEV is not set CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y # # I2C Hardware Bus support @@ -1331,8 +1387,6 @@ CONFIG_I2C_I801=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -1351,8 +1405,78 @@ CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set # CONFIG_PDA_POWER is not set # CONFIG_BATTERY_DS2760 is not set -# CONFIG_HWMON is not set +# CONFIG_BATTERY_BQ27x00 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7473 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_FSCPOS is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_CORETEMP is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_HWMON_DEBUG_CHIP is not set CONFIG_THERMAL=y +# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -1372,6 +1496,7 @@ CONFIG_WATCHDOG=y # CONFIG_I6300ESB_WDT is not set # CONFIG_ITCO_WDT is not set # CONFIG_IT8712F_WDT is not set +# CONFIG_IT87_WDT is not set # CONFIG_HP_WATCHDOG is not set # CONFIG_SC1200_WDT is not set # CONFIG_PC87413_WDT is not set @@ -1379,9 +1504,11 @@ CONFIG_WATCHDOG=y # CONFIG_SBC8360_WDT is not set # CONFIG_SBC7240_WDT is not set # CONFIG_CPU5_WDT is not set +# CONFIG_SMSC_SCH311X_WDT is not set # CONFIG_SMSC37B787_WDT is not set # CONFIG_W83627HF_WDT is not set # CONFIG_W83697HF_WDT is not set +# CONFIG_W83697UG_WDT is not set # CONFIG_W83877F_WDT is not set # CONFIG_W83977F_WDT is not set # CONFIG_MACHZ_WDT is not set @@ -1397,11 +1524,11 @@ CONFIG_WATCHDOG=y # USB-based Watchdog Cards # # CONFIG_USBPCWATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # @@ -1410,7 +1537,13 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_REGULATOR is not set # # Multimedia devices @@ -1450,6 +1583,7 @@ CONFIG_DRM=y # CONFIG_DRM_I810 is not set # CONFIG_DRM_I830 is not set CONFIG_DRM_I915=y +# CONFIG_DRM_I915_KMS is not set # CONFIG_DRM_MGA is not set # CONFIG_DRM_SIS is not set # CONFIG_DRM_VIA is not set @@ -1459,6 +1593,7 @@ CONFIG_DRM_I915=y CONFIG_FB=y # CONFIG_FIRMWARE_EDID is not set # CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set CONFIG_FB_CFB_FILLRECT=y CONFIG_FB_CFB_COPYAREA=y CONFIG_FB_CFB_IMAGEBLIT=y @@ -1487,7 +1622,6 @@ CONFIG_FB_TILEBLITTING=y # CONFIG_FB_UVESA is not set # CONFIG_FB_VESA is not set CONFIG_FB_EFI=y -# CONFIG_FB_IMAC is not set # CONFIG_FB_N411 is not set # CONFIG_FB_HGA is not set # CONFIG_FB_S1D13XXX is not set @@ -1503,6 +1637,7 @@ CONFIG_FB_EFI=y # CONFIG_FB_S3 is not set # CONFIG_FB_SAVAGE is not set # CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set # CONFIG_FB_NEOMAGIC is not set # CONFIG_FB_KYRO is not set # CONFIG_FB_3DFX is not set @@ -1515,12 +1650,15 @@ CONFIG_FB_EFI=y # CONFIG_FB_CARMINE is not set # CONFIG_FB_GEODE is not set # CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_LCD_CLASS_DEVICE is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_CORGI is not set +CONFIG_BACKLIGHT_GENERIC=y # CONFIG_BACKLIGHT_PROGEAR is not set # CONFIG_BACKLIGHT_MBP_NVIDIA is not set +# CONFIG_BACKLIGHT_SAHARA is not set # # Display device support @@ -1540,10 +1678,12 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_LOGO_LINUX_CLUT224=y CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y CONFIG_SND=y CONFIG_SND_TIMER=y CONFIG_SND_PCM=y CONFIG_SND_HWDEP=y +CONFIG_SND_JACK=y CONFIG_SND_SEQUENCER=y CONFIG_SND_SEQ_DUMMY=y CONFIG_SND_OSSEMUL=y @@ -1551,6 +1691,8 @@ CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y CONFIG_SND_PCM_OSS_PLUGINS=y CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_HRTIMER=y +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_SUPPORT_OLD_API=y CONFIG_SND_VERBOSE_PROCFS=y @@ -1605,11 +1747,16 @@ CONFIG_SND_PCI=y # CONFIG_SND_FM801 is not set CONFIG_SND_HDA_INTEL=y CONFIG_SND_HDA_HWDEP=y +# CONFIG_SND_HDA_RECONFIG is not set +# CONFIG_SND_HDA_INPUT_BEEP is not set CONFIG_SND_HDA_CODEC_REALTEK=y CONFIG_SND_HDA_CODEC_ANALOG=y CONFIG_SND_HDA_CODEC_SIGMATEL=y CONFIG_SND_HDA_CODEC_VIA=y CONFIG_SND_HDA_CODEC_ATIHDMI=y +CONFIG_SND_HDA_CODEC_NVHDMI=y +CONFIG_SND_HDA_CODEC_INTELHDMI=y +CONFIG_SND_HDA_ELD=y CONFIG_SND_HDA_CODEC_CONEXANT=y CONFIG_SND_HDA_CODEC_CMEDIA=y CONFIG_SND_HDA_CODEC_SI3054=y @@ -1643,6 +1790,7 @@ CONFIG_SND_USB=y # CONFIG_SND_USB_AUDIO is not set # CONFIG_SND_USB_USX2Y is not set # CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_US122L is not set CONFIG_SND_PCMCIA=y # CONFIG_SND_VXPOCKET is not set # CONFIG_SND_PDAUDIOCF is not set @@ -1657,15 +1805,37 @@ CONFIG_HIDRAW=y # USB Input Devices # CONFIG_USB_HID=y -CONFIG_USB_HIDINPUT_POWERBOOK=y -CONFIG_HID_FF=y CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# Special HID drivers +# +CONFIG_HID_COMPAT=y +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_EZKEY=y +CONFIG_HID_GYRATION=y +CONFIG_HID_LOGITECH=y CONFIG_LOGITECH_FF=y # CONFIG_LOGIRUMBLEPAD2_FF is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_NTRIG=y +CONFIG_HID_PANTHERLORD=y CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SUNPLUS=y +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_TOPSEED=y CONFIG_THRUSTMASTER_FF=y CONFIG_ZEROPLUS_FF=y -CONFIG_USB_HIDDEV=y CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y @@ -1683,6 +1853,8 @@ CONFIG_USB_DEVICEFS=y CONFIG_USB_SUSPEND=y # CONFIG_USB_OTG is not set CONFIG_USB_MON=y +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set # # USB Host Controller Drivers @@ -1691,6 +1863,7 @@ CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_ROOT_HUB_TT is not set # CONFIG_USB_EHCI_TT_NEWSCHED is not set +# CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1760_HCD is not set CONFIG_USB_OHCI_HCD=y @@ -1700,6 +1873,8 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_USB_UHCI_HCD=y # CONFIG_USB_SL811_HCD is not set # CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +# CONFIG_USB_HWA_HCD is not set # # USB Device Class drivers @@ -1707,20 +1882,20 @@ CONFIG_USB_UHCI_HCD=y # CONFIG_USB_ACM is not set CONFIG_USB_PRINTER=y # CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; # # -# may also be needed; see USB_STORAGE Help for more information +# see USB_STORAGE Help for more information # CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_DEBUG is not set # CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_DPCM is not set # CONFIG_USB_STORAGE_USBAT is not set # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set @@ -1728,7 +1903,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_ALAUDA is not set # CONFIG_USB_STORAGE_ONETOUCH is not set # CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_SIERRA is not set # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set CONFIG_USB_LIBUSUAL=y @@ -1749,6 +1923,7 @@ CONFIG_USB_LIBUSUAL=y # CONFIG_USB_EMI62 is not set # CONFIG_USB_EMI26 is not set # CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set @@ -1766,7 +1941,13 @@ CONFIG_USB_LIBUSUAL=y # CONFIG_USB_IOWARRIOR is not set # CONFIG_USB_TEST is not set # CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_UWB is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set CONFIG_NEW_LEDS=y @@ -1775,6 +1956,7 @@ CONFIG_LEDS_CLASS=y # # LED drivers # +# CONFIG_LEDS_ALIX2 is not set # CONFIG_LEDS_PCA9532 is not set # CONFIG_LEDS_CLEVO_MAIL is not set # CONFIG_LEDS_PCA955X is not set @@ -1785,6 +1967,7 @@ CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGERS=y # CONFIG_LEDS_TRIGGER_TIMER is not set # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set @@ -1824,6 +2007,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_M41T80 is not set # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set # # SPI RTC drivers @@ -1833,12 +2017,15 @@ CONFIG_RTC_INTF_DEV=y # Platform RTC drivers # CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set # CONFIG_RTC_DRV_DS1511 is not set # CONFIG_RTC_DRV_DS1553 is not set # CONFIG_RTC_DRV_DS1742 is not set # CONFIG_RTC_DRV_STK17TA8 is not set # CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set # CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_V3020 is not set # @@ -1851,6 +2038,22 @@ CONFIG_DMADEVICES=y # # CONFIG_INTEL_IOATDMA is not set # CONFIG_UIO is not set +# CONFIG_STAGING is not set +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACER_WMI is not set +# CONFIG_ASUS_LAPTOP is not set +# CONFIG_FUJITSU_LAPTOP is not set +# CONFIG_TC1100_WMI is not set +# CONFIG_MSI_LAPTOP is not set +# CONFIG_PANASONIC_LAPTOP is not set +# CONFIG_COMPAL_LAPTOP is not set +# CONFIG_SONY_LAPTOP is not set +# CONFIG_THINKPAD_ACPI is not set +# CONFIG_INTEL_MENLOW is not set +CONFIG_EEEPC_LAPTOP=y +# CONFIG_ACPI_WMI is not set +# CONFIG_ACPI_ASUS is not set +# CONFIG_ACPI_TOSHIBA is not set # # Firmware Drivers @@ -1872,21 +2075,24 @@ CONFIG_EXT3_FS=y CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -# CONFIG_EXT4DEV_FS is not set +# CONFIG_EXT4_FS is not set CONFIG_JBD=y # CONFIG_JBD_DEBUG is not set CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y +CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QUOTA_TREE=y # CONFIG_QFMT_V1 is not set CONFIG_QFMT_V2=y CONFIG_QUOTACTL=y @@ -1920,16 +2126,14 @@ CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_PROC_VMCORE=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_ECRYPT_FS is not set @@ -1939,6 +2143,7 @@ CONFIG_HUGETLB_PAGE=y # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1960,6 +2165,7 @@ CONFIG_NFS_ACL_SUPPORT=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_REGISTER_V4 is not set CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -2066,33 +2272,54 @@ CONFIG_TIMER_STATS=y CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VIRTUAL is not set # CONFIG_DEBUG_WRITECOUNT is not set CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +CONFIG_ARCH_WANT_FRAME_POINTERS=y CONFIG_FRAME_POINTER=y # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_KPROBES_SANITY_TEST is not set # CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_LKDTM is not set # CONFIG_FAULT_INJECTION is not set # CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_HAVE_FTRACE=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y CONFIG_HAVE_DYNAMIC_FTRACE=y -# CONFIG_FTRACE is not set +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_HW_BRANCH_TRACER=y + +# +# Tracers +# +# CONFIG_FUNCTION_TRACER is not set # CONFIG_IRQSOFF_TRACER is not set # CONFIG_SYSPROF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_CONTEXT_SWITCH_TRACER is not set +# CONFIG_BOOT_TRACER is not set +# CONFIG_TRACE_BRANCH_PROFILING is not set +# CONFIG_POWER_TRACER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_HW_BRANCH_TRACER is not set CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set # CONFIG_STRICT_DEVMEM is not set CONFIG_X86_VERBOSE_BOOTUP=y CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_STACK_USAGE=y # CONFIG_DEBUG_PAGEALLOC is not set @@ -2123,8 +2350,10 @@ CONFIG_OPTIMIZE_INLINING=y CONFIG_KEYS=y CONFIG_KEYS_DEBUG_PROC_KEYS=y CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set CONFIG_SECURITY_NETWORK=y # CONFIG_SECURITY_NETWORK_XFRM is not set +# CONFIG_SECURITY_PATH is not set CONFIG_SECURITY_FILE_CAPABILITIES=y # CONFIG_SECURITY_ROOTPLUG is not set CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 @@ -2135,7 +2364,6 @@ CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_SELINUX_DEVELOP=y CONFIG_SECURITY_SELINUX_AVC_STATS=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 -# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set # CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set # CONFIG_SECURITY_SMACK is not set CONFIG_CRYPTO=y @@ -2143,11 +2371,18 @@ CONFIG_CRYPTO=y # # Crypto core or helper # +# CONFIG_CRYPTO_FIPS is not set CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_GF128MUL is not set # CONFIG_CRYPTO_NULL is not set # CONFIG_CRYPTO_CRYPTD is not set @@ -2182,6 +2417,7 @@ CONFIG_CRYPTO_HMAC=y # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_CRC32C_INTEL is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -2222,6 +2458,11 @@ CONFIG_CRYPTO_DES=y # # CONFIG_CRYPTO_DEFLATE is not set # CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_PADLOCK is not set # CONFIG_CRYPTO_DEV_GEODE is not set @@ -2239,6 +2480,7 @@ CONFIG_VIRTUALIZATION=y CONFIG_BITREVERSE=y CONFIG_GENERIC_FIND_FIRST_BIT=y CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_FIND_LAST_BIT=y # CONFIG_CRC_CCITT is not set # CONFIG_CRC16 is not set CONFIG_CRC_T10DIF=y -- cgit v1.2.3 From dd5fc55449c5e7e4f008d7d26cda9e6e2bc94980 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Feb 2009 12:48:48 +0100 Subject: x86, defconfig: update the 64-bit defconfig Signed-off-by: Ingo Molnar --- arch/x86/configs/x86_64_defconfig | 412 ++++++++++++++++++++++++++++++-------- 1 file changed, 332 insertions(+), 80 deletions(-) (limited to 'arch') diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 322dd2748fc..4c85bfa58e1 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -1,14 +1,13 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.27-rc5 -# Wed Sep 3 17:13:39 2008 +# Linux kernel version: 2.6.29-rc4 +# Thu Feb 12 12:48:13 2009 # CONFIG_64BIT=y # CONFIG_X86_32 is not set CONFIG_X86_64=y CONFIG_X86=y CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -# CONFIG_GENERIC_LOCKBREAK is not set CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CMOS_UPDATE=y CONFIG_CLOCKSOURCE_WATCHDOG=y @@ -23,17 +22,16 @@ CONFIG_ZONE_DMA=y CONFIG_GENERIC_ISA_DMA=y CONFIG_GENERIC_IOMAP=y CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y CONFIG_GENERIC_HWEIGHT=y -# CONFIG_GENERIC_GPIO is not set CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set -# CONFIG_ARCH_HAS_ILOG2_U32 is not set -# CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_DEFAULT_IDLE=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y CONFIG_HAVE_SETUP_PER_CPU_AREA=y CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y @@ -42,12 +40,12 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_ZONE_DMA32=y CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_AOUT=y CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_PENDING_IRQ=y CONFIG_X86_SMP=y +CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_X86_64_SMP=y CONFIG_X86_HT=y CONFIG_X86_BIOS_REBOOT=y @@ -76,30 +74,44 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_AUDIT=y CONFIG_AUDITSYSCALL=y CONFIG_AUDIT_TREE=y + +# +# RCU Subsystem +# +# CONFIG_CLASSIC_RCU is not set +CONFIG_TREE_RCU=y +# CONFIG_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=64 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=18 -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_NS=y -# CONFIG_CGROUP_DEVICE is not set -CONFIG_CPUSETS=y CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y CONFIG_GROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y # CONFIG_RT_GROUP_SCHED is not set # CONFIG_USER_SCHED is not set CONFIG_CGROUP_SCHED=y +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_NS=y +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_CPUACCT=y CONFIG_RESOURCE_COUNTERS=y # CONFIG_CGROUP_MEM_RES_CTLR is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set -CONFIG_PROC_PID_CPUSET=y CONFIG_RELAY=y CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y CONFIG_USER_NS=y CONFIG_PID_NS=y +CONFIG_NET_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -124,12 +136,15 @@ CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y CONFIG_SLUB_DEBUG=y # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y CONFIG_MARKERS=y # CONFIG_OPROFILE is not set CONFIG_HAVE_OPROFILE=y @@ -139,15 +154,10 @@ CONFIG_KRETPROBES=y CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y -# CONFIG_HAVE_ARCH_TRACEHOOK is not set -# CONFIG_HAVE_DMA_ATTRS is not set -CONFIG_USE_GENERIC_SMP_HELPERS=y -# CONFIG_HAVE_CLK is not set -CONFIG_PROC_PAGE_MONITOR=y +CONFIG_HAVE_ARCH_TRACEHOOK=y # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -155,7 +165,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y CONFIG_BLK_DEV_IO_TRACE=y @@ -175,7 +184,7 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_CLASSIC_RCU=y +CONFIG_FREEZER=y # # Processor type and features @@ -185,6 +194,8 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_SMP=y +CONFIG_SPARSE_IRQ=y +# CONFIG_NUMA_MIGRATE_IRQ_DESC is not set CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y CONFIG_X86_PC=y @@ -192,6 +203,7 @@ CONFIG_X86_PC=y # CONFIG_X86_VOYAGER is not set # CONFIG_X86_GENERICARCH is not set # CONFIG_X86_VSMP is not set +CONFIG_SCHED_OMIT_FRAME_POINTER=y # CONFIG_PARAVIRT_GUEST is not set # CONFIG_MEMTEST is not set # CONFIG_M386 is not set @@ -230,6 +242,11 @@ CONFIG_X86_CMPXCHG64=y CONFIG_X86_CMOV=y CONFIG_X86_MINIMUM_CPU_FAMILY=64 CONFIG_X86_DEBUGCTLMSR=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR_64=y +CONFIG_X86_DS=y +CONFIG_X86_PTRACE_BTS=y CONFIG_HPET_TIMER=y CONFIG_HPET_EMULATE_RTC=y CONFIG_DMI=y @@ -237,8 +254,11 @@ CONFIG_GART_IOMMU=y CONFIG_CALGARY_IOMMU=y CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_STATS=y CONFIG_SWIOTLB=y CONFIG_IOMMU_HELPER=y +CONFIG_IOMMU_API=y +# CONFIG_MAXSMP is not set CONFIG_NR_CPUS=64 CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y @@ -247,12 +267,17 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y # CONFIG_X86_MCE is not set # CONFIG_I8K is not set CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y CONFIG_MICROCODE_OLD_INTERFACE=y CONFIG_X86_MSR=y CONFIG_X86_CPUID=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_DIRECT_GBPAGES=y CONFIG_NUMA=y CONFIG_K8_NUMA=y CONFIG_X86_64_ACPI_NUMA=y @@ -269,7 +294,6 @@ CONFIG_SPARSEMEM_MANUAL=y CONFIG_SPARSEMEM=y CONFIG_NEED_MULTIPLE_NODES=y CONFIG_HAVE_MEMORY_PRESENT=y -# CONFIG_SPARSEMEM_STATIC is not set CONFIG_SPARSEMEM_EXTREME=y CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y CONFIG_SPARSEMEM_VMEMMAP=y @@ -280,10 +304,14 @@ CONFIG_SPARSEMEM_VMEMMAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MIGRATION=y -CONFIG_RESOURCES_64BIT=y +CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_ZONE_DMA_FLAG=1 CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y +CONFIG_UNEVICTABLE_LRU=y +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_RESERVE_LOW_64K=y CONFIG_MTRR=y # CONFIG_MTRR_SANITIZER is not set CONFIG_X86_PAT=y @@ -302,11 +330,12 @@ CONFIG_PHYSICAL_START=0x1000000 CONFIG_PHYSICAL_ALIGN=0x200000 CONFIG_HOTPLUG_CPU=y # CONFIG_COMPAT_VDSO is not set +# CONFIG_CMDLINE_BOOL is not set CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y # -# Power management options +# Power management and ACPI options # CONFIG_ARCH_HIBERNATION_HEADER=y CONFIG_PM=y @@ -333,20 +362,14 @@ CONFIG_ACPI_BATTERY=y CONFIG_ACPI_BUTTON=y CONFIG_ACPI_FAN=y CONFIG_ACPI_DOCK=y -# CONFIG_ACPI_BAY is not set CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_THERMAL=y CONFIG_ACPI_NUMA=y -# CONFIG_ACPI_WMI is not set -# CONFIG_ACPI_ASUS is not set -# CONFIG_ACPI_TOSHIBA is not set # CONFIG_ACPI_CUSTOM_DSDT is not set CONFIG_ACPI_BLACKLIST_YEAR=0 # CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_EC=y # CONFIG_ACPI_PCI_SLOT is not set -CONFIG_ACPI_POWER=y CONFIG_ACPI_SYSTEM=y CONFIG_X86_PM_TIMER=y CONFIG_ACPI_CONTAINER=y @@ -381,12 +404,16 @@ CONFIG_X86_ACPI_CPUFREQ=y # # shared options # -# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set # CONFIG_X86_SPEEDSTEP_LIB is not set CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y +# +# Memory power savings +# +# CONFIG_I7300_IDLE is not set + # # Bus options (PCI etc.) # @@ -395,8 +422,10 @@ CONFIG_PCI_DIRECT=y CONFIG_PCI_MMCONFIG=y CONFIG_PCI_DOMAINS=y CONFIG_DMAR=y +# CONFIG_DMAR_DEFAULT_ON is not set CONFIG_DMAR_GFX_WA=y CONFIG_DMAR_FLOPPY_WA=y +# CONFIG_INTR_REMAP is not set CONFIG_PCIEPORTBUS=y # CONFIG_HOTPLUG_PCI_PCIE is not set CONFIG_PCIEAER=y @@ -405,6 +434,7 @@ CONFIG_ARCH_SUPPORTS_MSI=y CONFIG_PCI_MSI=y # CONFIG_PCI_LEGACY is not set # CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_STUB is not set CONFIG_HT_IRQ=y CONFIG_ISA_DMA_API=y CONFIG_K8_NB=y @@ -438,6 +468,8 @@ CONFIG_HOTPLUG_PCI=y # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_HAVE_AOUT is not set CONFIG_BINFMT_MISC=y CONFIG_IA32_EMULATION=y # CONFIG_IA32_AOUT is not set @@ -449,6 +481,7 @@ CONFIG_NET=y # # Networking options # +CONFIG_COMPAT_NET_DEV_OPS=y CONFIG_PACKET=y CONFIG_PACKET_MMAP=y CONFIG_UNIX=y @@ -509,7 +542,6 @@ CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y -# CONFIG_IP_VS is not set CONFIG_IPV6=y # CONFIG_IPV6_PRIVACY is not set # CONFIG_IPV6_ROUTER_PREF is not set @@ -547,19 +579,21 @@ CONFIG_NF_CONNTRACK_IRC=y CONFIG_NF_CONNTRACK_SIP=y CONFIG_NF_CT_NETLINK=y CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y CONFIG_NETFILTER_XT_TARGET_MARK=y CONFIG_NETFILTER_XT_TARGET_NFLOG=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y CONFIG_NETFILTER_XT_TARGET_TCPMSS=y CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y CONFIG_NETFILTER_XT_MATCH_MARK=y CONFIG_NETFILTER_XT_MATCH_POLICY=y CONFIG_NETFILTER_XT_MATCH_STATE=y +# CONFIG_IP_VS is not set # # IP: Netfilter Configuration # +CONFIG_NF_DEFRAG_IPV4=y CONFIG_NF_CONNTRACK_IPV4=y CONFIG_NF_CONNTRACK_PROC_COMPAT=y CONFIG_IP_NF_IPTABLES=y @@ -585,8 +619,8 @@ CONFIG_IP_NF_MANGLE=y CONFIG_NF_CONNTRACK_IPV6=y CONFIG_IP6_NF_IPTABLES=y CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_REJECT=y CONFIG_IP6_NF_MANGLE=y # CONFIG_IP_DCCP is not set @@ -594,6 +628,7 @@ CONFIG_IP6_NF_MANGLE=y # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set CONFIG_LLC=y @@ -613,6 +648,7 @@ CONFIG_NET_SCHED=y # CONFIG_NET_SCH_HTB is not set # CONFIG_NET_SCH_HFSC is not set # CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set # CONFIG_NET_SCH_RED is not set # CONFIG_NET_SCH_SFQ is not set # CONFIG_NET_SCH_TEQL is not set @@ -620,6 +656,7 @@ CONFIG_NET_SCHED=y # CONFIG_NET_SCH_GRED is not set # CONFIG_NET_SCH_DSMARK is not set # CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set # CONFIG_NET_SCH_INGRESS is not set # @@ -634,6 +671,7 @@ CONFIG_NET_CLS=y # CONFIG_NET_CLS_RSVP is not set # CONFIG_NET_CLS_RSVP6 is not set # CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set CONFIG_NET_EMATCH=y CONFIG_NET_EMATCH_STACK=32 # CONFIG_NET_EMATCH_CMP is not set @@ -649,7 +687,9 @@ CONFIG_NET_CLS_ACT=y # CONFIG_NET_ACT_NAT is not set # CONFIG_NET_ACT_PEDIT is not set # CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set # # Network testing @@ -666,29 +706,33 @@ CONFIG_HAMRADIO=y # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set CONFIG_FIB_RULES=y - -# -# Wireless -# +CONFIG_WIRELESS=y CONFIG_CFG80211=y +# CONFIG_CFG80211_REG_DEBUG is not set CONFIG_NL80211=y +CONFIG_WIRELESS_OLD_REGULATORY=y CONFIG_WIRELESS_EXT=y CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set CONFIG_MAC80211=y # # Rate control algorithm selection # -CONFIG_MAC80211_RC_PID=y -CONFIG_MAC80211_RC_DEFAULT_PID=y -CONFIG_MAC80211_RC_DEFAULT="pid" +CONFIG_MAC80211_RC_MINSTREL=y +# CONFIG_MAC80211_RC_DEFAULT_PID is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel" # CONFIG_MAC80211_MESH is not set CONFIG_MAC80211_LEDS=y # CONFIG_MAC80211_DEBUGFS is not set # CONFIG_MAC80211_DEBUG_MENU is not set -# CONFIG_IEEE80211 is not set -# CONFIG_RFKILL is not set +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_INPUT is not set +CONFIG_RFKILL_LEDS=y # CONFIG_NET_9P is not set # @@ -712,7 +756,7 @@ CONFIG_PROC_EVENTS=y # CONFIG_MTD is not set # CONFIG_PARPORT is not set CONFIG_PNP=y -# CONFIG_PNP_DEBUG is not set +CONFIG_PNP_DEBUG_MESSAGES=y # # Protocols @@ -740,21 +784,21 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_MISC_DEVICES=y # CONFIG_IBM_ASM is not set # CONFIG_PHANTOM is not set -# CONFIG_EEPROM_93CX6 is not set # CONFIG_SGI_IOC4 is not set # CONFIG_TIFM_CORE is not set -# CONFIG_ACER_WMI is not set -# CONFIG_ASUS_LAPTOP is not set -# CONFIG_FUJITSU_LAPTOP is not set -# CONFIG_MSI_LAPTOP is not set -# CONFIG_COMPAL_LAPTOP is not set -# CONFIG_SONY_LAPTOP is not set -# CONFIG_THINKPAD_ACPI is not set -# CONFIG_INTEL_MENLOW is not set +# CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_SGI_XP is not set # CONFIG_HP_ILO is not set # CONFIG_SGI_GRU is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -864,6 +908,7 @@ CONFIG_PATA_OLDPIIX=y CONFIG_PATA_SCH=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y # CONFIG_MD_LINEAR is not set # CONFIG_MD_RAID0 is not set # CONFIG_MD_RAID1 is not set @@ -919,6 +964,9 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y @@ -942,6 +990,9 @@ CONFIG_NET_TULIP=y # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set # CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set CONFIG_NET_PCI=y # CONFIG_PCNET32 is not set # CONFIG_AMD8111_ETH is not set @@ -949,7 +1000,6 @@ CONFIG_NET_PCI=y # CONFIG_B44 is not set CONFIG_FORCEDETH=y # CONFIG_FORCEDETH_NAPI is not set -# CONFIG_EEPRO100 is not set CONFIG_E100=y # CONFIG_FEALNX is not set # CONFIG_NATSEMI is not set @@ -963,15 +1013,16 @@ CONFIG_8139TOO_PIO=y # CONFIG_R6040 is not set # CONFIG_SIS900 is not set # CONFIG_EPIC100 is not set +# CONFIG_SMSC9420 is not set # CONFIG_SUNDANCE is not set # CONFIG_TLAN is not set # CONFIG_VIA_RHINE is not set # CONFIG_SC92031 is not set +# CONFIG_ATL2 is not set CONFIG_NETDEV_1000=y # CONFIG_ACENIC is not set # CONFIG_DL2K is not set CONFIG_E1000=y -# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set # CONFIG_E1000E is not set # CONFIG_IP1000 is not set # CONFIG_IGB is not set @@ -989,18 +1040,23 @@ CONFIG_TIGON3=y # CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set # CONFIG_ATL1E is not set +# CONFIG_JME is not set CONFIG_NETDEV_10000=y # CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_CHELSIO_T3 is not set +# CONFIG_ENIC is not set # CONFIG_IXGBE is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set # CONFIG_MYRI10GE is not set # CONFIG_NETXEN_NIC is not set # CONFIG_NIU is not set +# CONFIG_MLX4_EN is not set # CONFIG_MLX4_CORE is not set # CONFIG_TEHUTI is not set # CONFIG_BNX2X is not set +# CONFIG_QLGE is not set # CONFIG_SFC is not set CONFIG_TR=y # CONFIG_IBMOL is not set @@ -1013,9 +1069,8 @@ CONFIG_TR=y # CONFIG_WLAN_PRE80211 is not set CONFIG_WLAN_80211=y # CONFIG_PCMCIA_RAYCS is not set -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set # CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set # CONFIG_AIRO is not set # CONFIG_HERMES is not set # CONFIG_ATMEL is not set @@ -1032,6 +1087,8 @@ CONFIG_WLAN_80211=y CONFIG_ATH5K=y # CONFIG_ATH5K_DEBUG is not set # CONFIG_ATH9K is not set +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set # CONFIG_IWLCORE is not set # CONFIG_IWLWIFI_LEDS is not set # CONFIG_IWLAGN is not set @@ -1042,6 +1099,10 @@ CONFIG_ATH5K=y # CONFIG_ZD1211RW is not set # CONFIG_RT2X00 is not set +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# + # # USB Network Adapters # @@ -1050,6 +1111,7 @@ CONFIG_ATH5K=y # CONFIG_USB_PEGASUS is not set # CONFIG_USB_RTL8150 is not set # CONFIG_USB_USBNET is not set +# CONFIG_USB_HSO is not set CONFIG_NET_PCMCIA=y # CONFIG_PCMCIA_3C589 is not set # CONFIG_PCMCIA_3C574 is not set @@ -1059,6 +1121,7 @@ CONFIG_NET_PCMCIA=y # CONFIG_PCMCIA_SMC91C92 is not set # CONFIG_PCMCIA_XIRC2PS is not set # CONFIG_PCMCIA_AXNET is not set +# CONFIG_PCMCIA_IBMTR is not set # CONFIG_WAN is not set CONFIG_FDDI=y # CONFIG_DEFXX is not set @@ -1110,6 +1173,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y CONFIG_MOUSE_PS2_SYNAPTICS=y CONFIG_MOUSE_PS2_LIFEBOOK=y CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set # CONFIG_MOUSE_PS2_TOUCHKIT is not set # CONFIG_MOUSE_SERIAL is not set # CONFIG_MOUSE_APPLETOUCH is not set @@ -1147,15 +1211,16 @@ CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set # CONFIG_TOUCHSCREEN_MTOUCH is not set # CONFIG_TOUCHSCREEN_INEXIO is not set # CONFIG_TOUCHSCREEN_MK712 is not set # CONFIG_TOUCHSCREEN_PENMOUNT is not set # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set # CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_UCB1400 is not set # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_PCSPKR is not set # CONFIG_INPUT_APANEL is not set @@ -1165,6 +1230,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_KEYSPAN_REMOTE is not set # CONFIG_INPUT_POWERMATE is not set # CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set # CONFIG_INPUT_UINPUT is not set # @@ -1231,6 +1297,7 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y @@ -1260,6 +1327,7 @@ CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y # CONFIG_I2C_CHARDEV is not set CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y # # I2C Hardware Bus support @@ -1311,8 +1379,6 @@ CONFIG_I2C_I801=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -1331,8 +1397,78 @@ CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set # CONFIG_PDA_POWER is not set # CONFIG_BATTERY_DS2760 is not set -# CONFIG_HWMON is not set +# CONFIG_BATTERY_BQ27x00 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7473 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_FSCPOS is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_CORETEMP is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_HWMON_DEBUG_CHIP is not set CONFIG_THERMAL=y +# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -1352,15 +1488,18 @@ CONFIG_WATCHDOG=y # CONFIG_I6300ESB_WDT is not set # CONFIG_ITCO_WDT is not set # CONFIG_IT8712F_WDT is not set +# CONFIG_IT87_WDT is not set # CONFIG_HP_WATCHDOG is not set # CONFIG_SC1200_WDT is not set # CONFIG_PC87413_WDT is not set # CONFIG_60XX_WDT is not set # CONFIG_SBC8360_WDT is not set # CONFIG_CPU5_WDT is not set +# CONFIG_SMSC_SCH311X_WDT is not set # CONFIG_SMSC37B787_WDT is not set # CONFIG_W83627HF_WDT is not set # CONFIG_W83697HF_WDT is not set +# CONFIG_W83697UG_WDT is not set # CONFIG_W83877F_WDT is not set # CONFIG_W83977F_WDT is not set # CONFIG_MACHZ_WDT is not set @@ -1376,11 +1515,11 @@ CONFIG_WATCHDOG=y # USB-based Watchdog Cards # # CONFIG_USBPCWATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # @@ -1389,7 +1528,13 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_REGULATOR is not set # # Multimedia devices @@ -1423,6 +1568,7 @@ CONFIG_DRM=y # CONFIG_DRM_I810 is not set # CONFIG_DRM_I830 is not set CONFIG_DRM_I915=y +CONFIG_DRM_I915_KMS=y # CONFIG_DRM_MGA is not set # CONFIG_DRM_SIS is not set # CONFIG_DRM_VIA is not set @@ -1432,6 +1578,7 @@ CONFIG_DRM_I915=y CONFIG_FB=y # CONFIG_FIRMWARE_EDID is not set # CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set CONFIG_FB_CFB_FILLRECT=y CONFIG_FB_CFB_COPYAREA=y CONFIG_FB_CFB_IMAGEBLIT=y @@ -1460,7 +1607,6 @@ CONFIG_FB_TILEBLITTING=y # CONFIG_FB_UVESA is not set # CONFIG_FB_VESA is not set CONFIG_FB_EFI=y -# CONFIG_FB_IMAC is not set # CONFIG_FB_N411 is not set # CONFIG_FB_HGA is not set # CONFIG_FB_S1D13XXX is not set @@ -1475,6 +1621,7 @@ CONFIG_FB_EFI=y # CONFIG_FB_S3 is not set # CONFIG_FB_SAVAGE is not set # CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set # CONFIG_FB_NEOMAGIC is not set # CONFIG_FB_KYRO is not set # CONFIG_FB_3DFX is not set @@ -1486,12 +1633,15 @@ CONFIG_FB_EFI=y # CONFIG_FB_CARMINE is not set # CONFIG_FB_GEODE is not set # CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_LCD_CLASS_DEVICE is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_CORGI is not set +CONFIG_BACKLIGHT_GENERIC=y # CONFIG_BACKLIGHT_PROGEAR is not set # CONFIG_BACKLIGHT_MBP_NVIDIA is not set +# CONFIG_BACKLIGHT_SAHARA is not set # # Display device support @@ -1511,10 +1661,12 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_LOGO_LINUX_CLUT224=y CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y CONFIG_SND=y CONFIG_SND_TIMER=y CONFIG_SND_PCM=y CONFIG_SND_HWDEP=y +CONFIG_SND_JACK=y CONFIG_SND_SEQUENCER=y CONFIG_SND_SEQ_DUMMY=y CONFIG_SND_OSSEMUL=y @@ -1522,6 +1674,8 @@ CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y CONFIG_SND_PCM_OSS_PLUGINS=y CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_HRTIMER=y +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_SUPPORT_OLD_API=y CONFIG_SND_VERBOSE_PROCFS=y @@ -1575,11 +1729,16 @@ CONFIG_SND_PCI=y # CONFIG_SND_FM801 is not set CONFIG_SND_HDA_INTEL=y CONFIG_SND_HDA_HWDEP=y +# CONFIG_SND_HDA_RECONFIG is not set +# CONFIG_SND_HDA_INPUT_BEEP is not set CONFIG_SND_HDA_CODEC_REALTEK=y CONFIG_SND_HDA_CODEC_ANALOG=y CONFIG_SND_HDA_CODEC_SIGMATEL=y CONFIG_SND_HDA_CODEC_VIA=y CONFIG_SND_HDA_CODEC_ATIHDMI=y +CONFIG_SND_HDA_CODEC_NVHDMI=y +CONFIG_SND_HDA_CODEC_INTELHDMI=y +CONFIG_SND_HDA_ELD=y CONFIG_SND_HDA_CODEC_CONEXANT=y CONFIG_SND_HDA_CODEC_CMEDIA=y CONFIG_SND_HDA_CODEC_SI3054=y @@ -1612,6 +1771,7 @@ CONFIG_SND_USB=y # CONFIG_SND_USB_AUDIO is not set # CONFIG_SND_USB_USX2Y is not set # CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_US122L is not set CONFIG_SND_PCMCIA=y # CONFIG_SND_VXPOCKET is not set # CONFIG_SND_PDAUDIOCF is not set @@ -1626,15 +1786,37 @@ CONFIG_HIDRAW=y # USB Input Devices # CONFIG_USB_HID=y -CONFIG_USB_HIDINPUT_POWERBOOK=y -CONFIG_HID_FF=y CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# Special HID drivers +# +CONFIG_HID_COMPAT=y +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_EZKEY=y +CONFIG_HID_GYRATION=y +CONFIG_HID_LOGITECH=y CONFIG_LOGITECH_FF=y # CONFIG_LOGIRUMBLEPAD2_FF is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_NTRIG=y +CONFIG_HID_PANTHERLORD=y CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SUNPLUS=y +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_TOPSEED=y CONFIG_THRUSTMASTER_FF=y CONFIG_ZEROPLUS_FF=y -CONFIG_USB_HIDDEV=y CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y @@ -1652,6 +1834,8 @@ CONFIG_USB_DEVICEFS=y CONFIG_USB_SUSPEND=y # CONFIG_USB_OTG is not set CONFIG_USB_MON=y +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set # # USB Host Controller Drivers @@ -1660,6 +1844,7 @@ CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_ROOT_HUB_TT is not set # CONFIG_USB_EHCI_TT_NEWSCHED is not set +# CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1760_HCD is not set CONFIG_USB_OHCI_HCD=y @@ -1669,6 +1854,8 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_USB_UHCI_HCD=y # CONFIG_USB_SL811_HCD is not set # CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +# CONFIG_USB_HWA_HCD is not set # # USB Device Class drivers @@ -1676,20 +1863,20 @@ CONFIG_USB_UHCI_HCD=y # CONFIG_USB_ACM is not set CONFIG_USB_PRINTER=y # CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; # # -# may also be needed; see USB_STORAGE Help for more information +# see USB_STORAGE Help for more information # CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_DEBUG is not set # CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_DPCM is not set # CONFIG_USB_STORAGE_USBAT is not set # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set @@ -1697,7 +1884,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_ALAUDA is not set # CONFIG_USB_STORAGE_ONETOUCH is not set # CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_SIERRA is not set # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set CONFIG_USB_LIBUSUAL=y @@ -1718,6 +1904,7 @@ CONFIG_USB_LIBUSUAL=y # CONFIG_USB_EMI62 is not set # CONFIG_USB_EMI26 is not set # CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set @@ -1735,7 +1922,13 @@ CONFIG_USB_LIBUSUAL=y # CONFIG_USB_IOWARRIOR is not set # CONFIG_USB_TEST is not set # CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_UWB is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set CONFIG_NEW_LEDS=y @@ -1744,6 +1937,7 @@ CONFIG_LEDS_CLASS=y # # LED drivers # +# CONFIG_LEDS_ALIX2 is not set # CONFIG_LEDS_PCA9532 is not set # CONFIG_LEDS_CLEVO_MAIL is not set # CONFIG_LEDS_PCA955X is not set @@ -1754,6 +1948,7 @@ CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGERS=y # CONFIG_LEDS_TRIGGER_TIMER is not set # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set @@ -1793,6 +1988,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_M41T80 is not set # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set # # SPI RTC drivers @@ -1802,12 +1998,15 @@ CONFIG_RTC_INTF_DEV=y # Platform RTC drivers # CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set # CONFIG_RTC_DRV_DS1511 is not set # CONFIG_RTC_DRV_DS1553 is not set # CONFIG_RTC_DRV_DS1742 is not set # CONFIG_RTC_DRV_STK17TA8 is not set # CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set # CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_V3020 is not set # @@ -1820,6 +2019,21 @@ CONFIG_DMADEVICES=y # # CONFIG_INTEL_IOATDMA is not set # CONFIG_UIO is not set +# CONFIG_STAGING is not set +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACER_WMI is not set +# CONFIG_ASUS_LAPTOP is not set +# CONFIG_FUJITSU_LAPTOP is not set +# CONFIG_MSI_LAPTOP is not set +# CONFIG_PANASONIC_LAPTOP is not set +# CONFIG_COMPAL_LAPTOP is not set +# CONFIG_SONY_LAPTOP is not set +# CONFIG_THINKPAD_ACPI is not set +# CONFIG_INTEL_MENLOW is not set +CONFIG_EEEPC_LAPTOP=y +# CONFIG_ACPI_WMI is not set +# CONFIG_ACPI_ASUS is not set +# CONFIG_ACPI_TOSHIBA is not set # # Firmware Drivers @@ -1841,22 +2055,25 @@ CONFIG_EXT3_FS=y CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -# CONFIG_EXT4DEV_FS is not set +# CONFIG_EXT4_FS is not set CONFIG_JBD=y # CONFIG_JBD_DEBUG is not set CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y +CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QUOTA_TREE=y # CONFIG_QFMT_V1 is not set CONFIG_QFMT_V2=y CONFIG_QUOTACTL=y @@ -1890,16 +2107,14 @@ CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_PROC_VMCORE=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_ECRYPT_FS is not set @@ -1909,6 +2124,7 @@ CONFIG_HUGETLB_PAGE=y # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1930,6 +2146,7 @@ CONFIG_NFS_ACL_SUPPORT=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_REGISTER_V4 is not set CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -2035,40 +2252,60 @@ CONFIG_TIMER_STATS=y CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VIRTUAL is not set # CONFIG_DEBUG_WRITECOUNT is not set CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +CONFIG_ARCH_WANT_FRAME_POINTERS=y CONFIG_FRAME_POINTER=y # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_KPROBES_SANITY_TEST is not set # CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_LKDTM is not set # CONFIG_FAULT_INJECTION is not set # CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_HAVE_FTRACE=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y CONFIG_HAVE_DYNAMIC_FTRACE=y -# CONFIG_FTRACE is not set +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_HW_BRANCH_TRACER=y + +# +# Tracers +# +# CONFIG_FUNCTION_TRACER is not set # CONFIG_IRQSOFF_TRACER is not set # CONFIG_SYSPROF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_CONTEXT_SWITCH_TRACER is not set +# CONFIG_BOOT_TRACER is not set +# CONFIG_TRACE_BRANCH_PROFILING is not set +# CONFIG_POWER_TRACER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_HW_BRANCH_TRACER is not set CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set # CONFIG_STRICT_DEVMEM is not set CONFIG_X86_VERBOSE_BOOTUP=y CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_STACK_USAGE=y # CONFIG_DEBUG_PAGEALLOC is not set # CONFIG_DEBUG_PER_CPU_MAPS is not set # CONFIG_X86_PTDUMP is not set CONFIG_DEBUG_RODATA=y -# CONFIG_DIRECT_GBPAGES is not set # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_DEBUG_NX_TEST=m # CONFIG_IOMMU_DEBUG is not set @@ -2092,8 +2329,10 @@ CONFIG_OPTIMIZE_INLINING=y CONFIG_KEYS=y CONFIG_KEYS_DEBUG_PROC_KEYS=y CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set CONFIG_SECURITY_NETWORK=y # CONFIG_SECURITY_NETWORK_XFRM is not set +# CONFIG_SECURITY_PATH is not set CONFIG_SECURITY_FILE_CAPABILITIES=y # CONFIG_SECURITY_ROOTPLUG is not set CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 @@ -2104,7 +2343,6 @@ CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_SELINUX_DEVELOP=y CONFIG_SECURITY_SELINUX_AVC_STATS=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 -# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set # CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set # CONFIG_SECURITY_SMACK is not set CONFIG_CRYPTO=y @@ -2112,11 +2350,18 @@ CONFIG_CRYPTO=y # # Crypto core or helper # +# CONFIG_CRYPTO_FIPS is not set CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_GF128MUL is not set # CONFIG_CRYPTO_NULL is not set # CONFIG_CRYPTO_CRYPTD is not set @@ -2151,6 +2396,7 @@ CONFIG_CRYPTO_HMAC=y # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_CRC32C_INTEL is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -2191,6 +2437,11 @@ CONFIG_CRYPTO_DES=y # # CONFIG_CRYPTO_DEFLATE is not set # CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_HIFN_795X is not set CONFIG_HAVE_KVM=y @@ -2205,6 +2456,7 @@ CONFIG_VIRTUALIZATION=y CONFIG_BITREVERSE=y CONFIG_GENERIC_FIND_FIRST_BIT=y CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_FIND_LAST_BIT=y # CONFIG_CRC_CCITT is not set # CONFIG_CRC16 is not set CONFIG_CRC_T10DIF=y -- cgit v1.2.3 From 556831063bcf8fd818e92227c40e3eaabe759bab Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Feb 2009 12:51:29 +0100 Subject: x86, defconfig: turn off CONFIG_ENABLE_WARN_DEPRECATED MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit deprecation warnings have become rather noisy lately: drivers/i2c/i2c-core.c: In function ‘i2c_new_device’: drivers/i2c/i2c-core.c:283: warning: ‘i2c_attach_client’ is deprecated (declared at include/linux/i2c.h:434) drivers/i2c/i2c-core.c: In function ‘i2c_del_adapter’: drivers/i2c/i2c-core.c:646: warning: ‘detach_client’ is deprecated (declared at include/linux/i2c.h:154) drivers/i2c/i2c-core.c: In function ‘i2c_register_driver’: drivers/i2c/i2c-core.c:713: warning: ‘detach_client’ is deprecated (declared at include/linux/i2c.h:154) drivers/i2c/i2c-core.c: In function ‘__detach_adapter’: drivers/i2c/i2c-core.c:780: warning: ‘detach_client’ is deprecated (declared at include/linux/i2c.h:154) drivers/i2c/i2c-core.c: At top level: drivers/i2c/i2c-core.c:876: warning: ‘i2c_attach_client’ is deprecated (declared at drivers/i2c/i2c-core.c:827) drivers/i2c/i2c-core.c:876: warning: ‘i2c_attach_client’ is deprecated (declared at drivers/i2c/i2c-core.c:827) drivers/i2c/i2c-core.c:904: warning: ‘i2c_detach_client’ is deprecated (declared at drivers/i2c/i2c-core.c:879) drivers/i2c/i2c-core.c:904: warning: ‘i2c_detach_client’ is deprecated (declared at drivers/i2c/i2c-core.c:879) So turn it off for now - these reminders can obscure critical warnings. Signed-off-by: Ingo Molnar --- arch/x86/configs/i386_defconfig | 3 +-- arch/x86/configs/x86_64_defconfig | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index c3186ccc936..0562a36f74d 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -2242,8 +2242,7 @@ CONFIG_NLS_UTF8=y # CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_PRINTK_TIME=y -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set CONFIG_FRAME_WARN=2048 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 4c85bfa58e1..06633483b29 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -2223,8 +2223,7 @@ CONFIG_NLS_UTF8=y # CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_PRINTK_TIME=y -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set CONFIG_FRAME_WARN=2048 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set -- cgit v1.2.3 From bd282422fe9566a89bc34af325efb6d2701903be Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Feb 2009 12:59:05 +0100 Subject: x86, defconfig: turn off CONFIG_SCSI_ISCSI_ATTRS=y It was enabled by mistake - iscsi is not included in a typical default PC, and no other architecture has it built-in (=y) either. Turn it off. Signed-off-by: Ingo Molnar --- arch/x86/configs/i386_defconfig | 8 ++++---- arch/x86/configs/x86_64_defconfig | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 0562a36f74d..8f40a80ccb5 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.29-rc4 -# Thu Feb 12 12:42:50 2009 +# Thu Feb 12 12:57:57 2009 # # CONFIG_64BIT is not set CONFIG_X86_32=y @@ -835,7 +835,7 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SPI_ATTRS=y # CONFIG_SCSI_FC_ATTRS is not set -CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_ISCSI_ATTRS is not set # CONFIG_SCSI_SAS_ATTRS is not set # CONFIG_SCSI_SAS_LIBSAS is not set # CONFIG_SCSI_SRP_ATTRS is not set @@ -2064,8 +2064,7 @@ CONFIG_EFI_VARS=y # CONFIG_DELL_RBU is not set # CONFIG_DCDBAS is not set CONFIG_DMIID=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=y +# CONFIG_ISCSI_IBFT_FIND is not set # # File systems @@ -2243,6 +2242,7 @@ CONFIG_NLS_UTF8=y CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=2048 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 06633483b29..10728fd91c6 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.29-rc4 -# Thu Feb 12 12:48:13 2009 +# Thu Feb 12 12:57:29 2009 # CONFIG_64BIT=y # CONFIG_X86_32 is not set @@ -837,7 +837,7 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SPI_ATTRS=y # CONFIG_SCSI_FC_ATTRS is not set -CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_ISCSI_ATTRS is not set # CONFIG_SCSI_SAS_ATTRS is not set # CONFIG_SCSI_SAS_LIBSAS is not set # CONFIG_SCSI_SRP_ATTRS is not set @@ -2044,8 +2044,7 @@ CONFIG_EFI_VARS=y # CONFIG_DELL_RBU is not set # CONFIG_DCDBAS is not set CONFIG_DMIID=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=y +# CONFIG_ISCSI_IBFT_FIND is not set # # File systems @@ -2224,6 +2223,7 @@ CONFIG_NLS_UTF8=y CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=2048 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set -- cgit v1.2.3 From 3023533de43c5c01c660e1b48d3700b028eb4615 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Feb 2009 13:28:43 +0100 Subject: x86: fix warning in find_low_pfn_range() Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index d48f2560364..e77459dd38a 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -722,7 +722,7 @@ void __init find_low_pfn_range(void) highmem_pages = 0; #ifdef CONFIG_HIGHMEM if (highmem_pages >= max_pfn) { - printk(KERN_ERR "highmem size specified (%uMB) is " + printk(KERN_ERR "highmem size specified (%luMB) is " "bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); @@ -731,7 +731,7 @@ void __init find_low_pfn_range(void) if (highmem_pages) { if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE){ - printk(KERN_ERR "highmem size %uMB results in " + printk(KERN_ERR "highmem size %luMB results in " "smaller than 64MB lowmem, ignoring it.\n" , pages_to_mb(highmem_pages)); highmem_pages = 0; -- cgit v1.2.3 From 4769843bc265a9c24584b98709cf39e1df5c1404 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Feb 2009 13:31:41 +0100 Subject: x86, 32-bit: clean up find_low_pfn_range() Impact: cleanup Split find_low_pfn_range() into two functions: - lowmem_pfn_init() - highmem_pfn_init() The former gets called if all of RAM fits into lowmem, otherwise we call highmem_pfn_init(). Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 137 +++++++++++++++++++++++++++++--------------------- 1 file changed, 79 insertions(+), 58 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index e77459dd38a..9d36eb9ebd5 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -675,75 +675,96 @@ static int __init parse_highmem(char *arg) } early_param("highmem", parse_highmem); +#define MSG_HIGHMEM_TOO_BIG \ + "highmem size (%luMB) is bigger than pages available (%luMB)!\n" + +#define MSG_LOWMEM_TOO_SMALL \ + "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" /* - * Determine low and high memory ranges: + * All of RAM fits into lowmem - but if user wants highmem + * artificially via the highmem=x boot parameter then create + * it: */ -void __init find_low_pfn_range(void) +void __init lowmem_pfn_init(void) { - /* it could update max_pfn */ - - /* max_low_pfn is 0, we already have early_res support */ - - max_low_pfn = max_pfn; - if (max_low_pfn > MAXMEM_PFN) { - if (highmem_pages == -1) - highmem_pages = max_pfn - MAXMEM_PFN; - if (highmem_pages + MAXMEM_PFN < max_pfn) - max_pfn = MAXMEM_PFN + highmem_pages; - if (highmem_pages + MAXMEM_PFN > max_pfn) { - printk(KERN_WARNING "only %luMB highmem pages " - "available, ignoring highmem size of %luMB.\n", - pages_to_mb(max_pfn - MAXMEM_PFN), + if (highmem_pages == -1) + highmem_pages = 0; +#ifdef CONFIG_HIGHMEM + if (highmem_pages >= max_pfn) { + printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, + pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); + highmem_pages = 0; + } + if (highmem_pages) { + if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { + printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, pages_to_mb(highmem_pages)); highmem_pages = 0; } - max_low_pfn = MAXMEM_PFN; + max_low_pfn -= highmem_pages; + } +#else + if (highmem_pages) + printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); +#endif +} + +#define MSG_HIGHMEM_TOO_SMALL \ + "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" + +#define MSG_HIGHMEM_TRIMMED \ + "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" +/* + * We have more RAM than fits into lowmem - we try to put it into + * highmem, also taking the highmem=x boot parameter into account: + */ +void __init highmem_pfn_init(void) +{ + if (highmem_pages == -1) + highmem_pages = max_pfn - MAXMEM_PFN; + + if (highmem_pages + MAXMEM_PFN < max_pfn) + max_pfn = MAXMEM_PFN + highmem_pages; + + if (highmem_pages + MAXMEM_PFN > max_pfn) { + printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, + pages_to_mb(max_pfn - MAXMEM_PFN), + pages_to_mb(highmem_pages)); + highmem_pages = 0; + } + max_low_pfn = MAXMEM_PFN; #ifndef CONFIG_HIGHMEM - /* Maximum memory usable is what is directly addressable */ - printk(KERN_WARNING "Warning only %ldMB will be used.\n", - MAXMEM>>20); - if (max_pfn > MAX_NONPAE_PFN) - printk(KERN_WARNING - "Use a HIGHMEM64G enabled kernel.\n"); - else - printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); - max_pfn = MAXMEM_PFN; + /* Maximum memory usable is what is directly addressable */ + printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); + if (max_pfn > MAX_NONPAE_PFN) + printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); + else + printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); + max_pfn = MAXMEM_PFN; #else /* !CONFIG_HIGHMEM */ #ifndef CONFIG_HIGHMEM64G - if (max_pfn > MAX_NONPAE_PFN) { - max_pfn = MAX_NONPAE_PFN; - printk(KERN_WARNING "Warning only 4GB will be used." - "Use a HIGHMEM64G enabled kernel.\n"); - } + if (max_pfn > MAX_NONPAE_PFN) { + max_pfn = MAX_NONPAE_PFN; + printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); + } #endif /* !CONFIG_HIGHMEM64G */ #endif /* !CONFIG_HIGHMEM */ - } else { - if (highmem_pages == -1) - highmem_pages = 0; -#ifdef CONFIG_HIGHMEM - if (highmem_pages >= max_pfn) { - printk(KERN_ERR "highmem size specified (%luMB) is " - "bigger than pages available (%luMB)!.\n", - pages_to_mb(highmem_pages), - pages_to_mb(max_pfn)); - highmem_pages = 0; - } - if (highmem_pages) { - if (max_low_pfn - highmem_pages < - 64*1024*1024/PAGE_SIZE){ - printk(KERN_ERR "highmem size %luMB results in " - "smaller than 64MB lowmem, ignoring it.\n" - , pages_to_mb(highmem_pages)); - highmem_pages = 0; - } - max_low_pfn -= highmem_pages; - } -#else - if (highmem_pages) - printk(KERN_ERR "ignoring highmem size on non-highmem" - " kernel!\n"); -#endif - } +} + +/* + * Determine low and high memory ranges: + */ +void __init find_low_pfn_range(void) +{ + /* it could update max_pfn */ + + /* max_low_pfn is 0, we already have early_res support */ + max_low_pfn = max_pfn; + + if (max_low_pfn > MAXMEM_PFN) + highmem_pfn_init(); + else + lowmem_pfn_init(); } #ifndef CONFIG_NEED_MULTIPLE_NODES -- cgit v1.2.3 From d88316c243e5458a1888edbe0353c4dec6e61c73 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Feb 2009 15:16:03 +0100 Subject: x86, 32-bit: refactor find_low_pfn_range() Impact: cleanup Make the max_low_pfn logic a bit more standard between lowmem_pfn_init() and highmem_pfn_init(). Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 9d36eb9ebd5..1a9612499a3 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -687,6 +687,9 @@ early_param("highmem", parse_highmem); */ void __init lowmem_pfn_init(void) { + /* max_low_pfn is 0, we already have early_res support */ + max_low_pfn = max_pfn; + if (highmem_pages == -1) highmem_pages = 0; #ifdef CONFIG_HIGHMEM @@ -720,6 +723,8 @@ void __init lowmem_pfn_init(void) */ void __init highmem_pfn_init(void) { + max_low_pfn = MAXMEM_PFN; + if (highmem_pages == -1) highmem_pages = max_pfn - MAXMEM_PFN; @@ -732,7 +737,6 @@ void __init highmem_pfn_init(void) pages_to_mb(highmem_pages)); highmem_pages = 0; } - max_low_pfn = MAXMEM_PFN; #ifndef CONFIG_HIGHMEM /* Maximum memory usable is what is directly addressable */ printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); @@ -758,13 +762,10 @@ void __init find_low_pfn_range(void) { /* it could update max_pfn */ - /* max_low_pfn is 0, we already have early_res support */ - max_low_pfn = max_pfn; - - if (max_low_pfn > MAXMEM_PFN) - highmem_pfn_init(); - else + if (max_pfn <= MAXMEM_PFN) lowmem_pfn_init(); + else + highmem_pfn_init(); } #ifndef CONFIG_NEED_MULTIPLE_NODES -- cgit v1.2.3 From 999c7880cc8eeb0cbe6610b8c6d0ab0ec51cd848 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 13 Feb 2009 13:15:55 +0100 Subject: x86 headers: remove duplicate pud_large() definition Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 62024ff897d..1c097a3a666 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -387,13 +387,6 @@ static inline unsigned long pages_to_mb(unsigned long npg) #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) -#if PAGETABLE_LEVELS == 2 -static inline int pud_large(pud_t pud) -{ - return 0; -} -#endif - #if PAGETABLE_LEVELS > 2 static inline int pud_none(pud_t pud) { -- cgit v1.2.3 From 56cefcea7c8769ffc04a5609e6ac849e36685468 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 13 Feb 2009 13:23:02 +0100 Subject: x86 headers: include linux/types.h To properly pick up types relied on by prototypes like 'bool'. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 467ce69306b..89ed9d70b0a 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_PAGE_H #define _ASM_X86_PAGE_H +#include + #ifdef __KERNEL__ #include -- cgit v1.2.3 From e43623b4ed1d0a1a86f0e05a2df8b9637b90ddd7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 13 Feb 2009 13:24:19 +0100 Subject: x86 headers: include page_types.h in pgtable_types.h To properly pick up details like PTE_FLAGS_MASK. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_types.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index a7452f10930..9dafe87be2d 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -2,6 +2,7 @@ #define _ASM_X86_PGTABLE_DEFS_H #include +#include #define FIRST_USER_ADDRESS 0 -- cgit v1.2.3 From beb6943d8df7ce9278282101af4e0f6f7b648451 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 13 Feb 2009 13:36:47 +0100 Subject: x86 headers: protect page_32.h via __ASSEMBLY__ Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page_32.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h index b3f0bf79e84..da4e762406f 100644 --- a/arch/x86/include/asm/page_32.h +++ b/arch/x86/include/asm/page_32.h @@ -3,6 +3,8 @@ #include +#ifndef __ASSEMBLY__ + #ifdef CONFIG_HUGETLB_PAGE #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif @@ -19,8 +21,6 @@ extern unsigned long __phys_addr(unsigned long); #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_FLATMEM */ -#ifndef __ASSEMBLY__ - #ifdef CONFIG_X86_USE_3DNOW #include -- cgit v1.2.3 From 694aa960608d2976666d850bd4ef78053bbd0c84 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Thu, 12 Feb 2009 16:25:42 -0800 Subject: xen: fix xen_flush_tlb_others The commit commit 4595f9620cda8a1e973588e743cf5f8436dd20c6 Author: Rusty Russell Date: Sat Jan 10 21:58:09 2009 -0800 x86: change flush_tlb_others to take a const struct cpumask causes xen_flush_tlb_others to allocate a multicall and then issue it without initializing it in the case where the cpumask is empty, leading to: [ 8.354898] 1 multicall(s) failed: cpu 1 [ 8.354921] Pid: 2213, comm: bootclean Not tainted 2.6.29-rc3-x86_32p-xenU-tip #135 [ 8.354937] Call Trace: [ 8.354955] [] xen_mc_flush+0x133/0x1b0 [ 8.354971] [] ? xen_force_evtchn_callback+0x1a/0x30 [ 8.354988] [] xen_flush_tlb_others+0xb0/0xd0 [ 8.355003] [] flush_tlb_page+0x53/0xa0 [ 8.355018] [] do_wp_page+0x2a0/0x7c0 [ 8.355034] [] ? notify_remote_via_irq+0x3a/0x70 [ 8.355049] [] handle_mm_fault+0x7b0/0xa50 [ 8.355065] [] ? wake_up_new_task+0x8e/0xb0 [ 8.355079] [] ? do_fork+0xe5/0x320 [ 8.355095] [] do_page_fault+0xe9/0x240 [ 8.355109] [] ? do_page_fault+0x0/0x240 [ 8.355125] [] error_code+0x72/0x78 [ 8.355139] call 1/1: op=2863311530 arg=[aaaaaaaa] result=-38 xen_flush_tlb_others+0x41/0xd0 Since empty cpumasks are rare and undoing an xen_mc_entry() is tricky just issue such requests normally. Signed-off-by: Ian Campbell Signed-off-by: Ingo Molnar --- arch/x86/xen/mmu.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d2e8ed1aff3..319bd40a57c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1273,8 +1273,6 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, /* Remove us, and any offline CPUS. */ cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); - if (unlikely(cpumask_empty(to_cpumask(args->mask)))) - goto issue; if (va == TLB_FLUSH_ALL) { args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; @@ -1285,7 +1283,6 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); -issue: xen_mc_issue(PARAVIRT_LAZY_MMU); } -- cgit v1.2.3 From c466ed2e4337c5eb03b283da507eb5328ab06c73 Mon Sep 17 00:00:00 2001 From: Dimitri Sivanich Date: Fri, 13 Feb 2009 08:40:55 -0600 Subject: x86, UV: set full apicid in uv_hub_send_ipi The uv_hub_send_ipi() function needs to set the full apicid in the UVH_IPI_INT mmr. Signed-off-by: Dimitri Sivanich Signed-off-by: Ingo Molnar --- arch/x86/kernel/genx2apic_uv_x.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 6adb5e6f4d9..89b84e004f0 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -114,16 +114,15 @@ int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) static void uv_send_IPI_one(int cpu, int vector) { - unsigned long val, apicid, lapicid; + unsigned long val, apicid; int pnode; apicid = per_cpu(x86_cpu_to_apicid, cpu); - lapicid = apicid & 0x3f; /* ZZZ macro needed */ pnode = uv_apicid_to_pnode(apicid); - val = ( 1UL << UVH_IPI_INT_SEND_SHFT ) | - ( lapicid << UVH_IPI_INT_APIC_ID_SHFT ) | - ( vector << UVH_IPI_INT_VECTOR_SHFT ); + val = (1UL << UVH_IPI_INT_SEND_SHFT) | + (apicid << UVH_IPI_INT_APIC_ID_SHFT) | + (vector << UVH_IPI_INT_VECTOR_SHFT); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } -- cgit v1.2.3 From 0341c14da49e7b93d2998926f6ac89a3129e3fa1 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 13 Feb 2009 11:14:01 -0800 Subject: x86: use _types.h headers in asm where available In general, the only definitions that assembly files can use are in _types.S headers (where available), so convert them. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/boot/compressed/head_32.S | 2 +- arch/x86/boot/compressed/head_64.S | 4 ++-- arch/x86/boot/header.S | 2 +- arch/x86/kernel/acpi/realmode/wakeup.S | 4 ++-- arch/x86/kernel/acpi/wakeup_32.S | 2 +- arch/x86/kernel/acpi/wakeup_64.S | 4 ++-- arch/x86/kernel/efi_stub_32.S | 2 +- arch/x86/kernel/entry_32.S | 2 +- arch/x86/kernel/entry_64.S | 2 +- arch/x86/kernel/head_32.S | 4 ++-- arch/x86/kernel/relocate_kernel_32.S | 2 +- arch/x86/kernel/relocate_kernel_64.S | 4 ++-- arch/x86/kernel/trampoline_32.S | 2 +- arch/x86/kernel/trampoline_64.S | 4 ++-- arch/x86/kernel/vmlinux_32.lds.S | 2 +- arch/x86/kernel/vmlinux_64.lds.S | 2 +- arch/x86/lib/getuser.S | 2 +- arch/x86/power/hibernate_asm_32.S | 2 +- arch/x86/power/hibernate_asm_64.S | 2 +- arch/x86/xen/xen-head.S | 2 +- 20 files changed, 26 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 29c5fbf0839..112f81abfa6 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 1d5dff4123e..36743525e37 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -26,8 +26,8 @@ #include #include -#include -#include +#include +#include #include #include #include diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index b993062e9a5..7ccff4884a2 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include "boot.h" #include "offsets.h" diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S index 3355973b12a..580b4e29601 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.S @@ -3,8 +3,8 @@ */ #include #include -#include -#include +#include +#include #include .code16 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index a12e6a9fb65..8ded418b059 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -1,7 +1,7 @@ .section .text.page_aligned #include #include -#include +#include # Copyright 2003, 2008 Pavel Machek , distribute under GPLv2 diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index bcc293423a7..82add8b804b 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -1,8 +1,8 @@ .text #include #include -#include -#include +#include +#include #include #include diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S index ef00bb77d7e..8b9171b9cbd 100644 --- a/arch/x86/kernel/efi_stub_32.S +++ b/arch/x86/kernel/efi_stub_32.S @@ -6,7 +6,7 @@ */ #include -#include +#include /* * efi_call_phys(void *, ...) is a function with variable parameters. diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index e9920683145..999e827ef9c 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index fbcf96b295f..dcf31283f94 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -48,7 +48,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 2a0aad7718d..c32ca19d591 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -11,8 +11,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index a160f311972..2064d0aa8d2 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -7,7 +7,7 @@ */ #include -#include +#include #include #include diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index b0bbdd4829c..d32cfb27a47 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -7,10 +7,10 @@ */ #include -#include +#include #include #include -#include +#include /* * Must be relocatable PIC code callable as a C function diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index d8ccc3c6552..66d874e5404 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S @@ -29,7 +29,7 @@ #include #include -#include +#include /* We can free up trampoline after bootup if cpu hotplug is not supported. */ #ifndef CONFIG_HOTPLUG_CPU diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 95a012a4664..cddfb8d386b 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -25,8 +25,8 @@ */ #include -#include -#include +#include +#include #include #include #include diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index 3eba7f7bac0..0d860963f26 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 087a7f2c639..fbfced6f680 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -6,7 +6,7 @@ #include #include -#include +#include #undef i386 /* in case the preprocessor is a 32bit one */ diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index ad374003742..51f1504cddd 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index d1e9b53f9d3..b641388d828 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 000415947d9..9356547d8c0 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -18,7 +18,7 @@ .text #include #include -#include +#include #include #include diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 63d49a523ed..1a5ff24e29c 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include -- cgit v1.2.3 From 9b3651cbc26cfcea8276ecaff66718ea087f2e91 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 13 Feb 2009 11:01:54 -0800 Subject: x86: move more pagetable-related definitions into pgtable*.h PAGETABLE_LEVELS and the PTE masks should be in pgtable*.h Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/page_32_types.h | 2 -- arch/x86/include/asm/page_64_types.h | 2 -- arch/x86/include/asm/page_types.h | 6 ------ arch/x86/include/asm/pgtable-2level_types.h | 2 ++ arch/x86/include/asm/pgtable-3level_types.h | 2 ++ arch/x86/include/asm/pgtable_64_types.h | 1 + arch/x86/include/asm/pgtable_types.h | 6 ++++++ 7 files changed, 11 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index b5486aaf36e..f1e4a79a6e4 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -33,12 +33,10 @@ /* 44=32+12, the limit we can fit into an unsigned long pfn */ #define __PHYSICAL_MASK_SHIFT 44 #define __VIRTUAL_MASK_SHIFT 32 -#define PAGETABLE_LEVELS 3 #else /* !CONFIG_X86_PAE */ #define __PHYSICAL_MASK_SHIFT 32 #define __VIRTUAL_MASK_SHIFT 32 -#define PAGETABLE_LEVELS 2 #endif /* CONFIG_X86_PAE */ #ifndef __ASSEMBLY__ diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index bc73af3eda9..d38c91b7024 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -1,8 +1,6 @@ #ifndef _ASM_X86_PAGE_64_DEFS_H #define _ASM_X86_PAGE_64_DEFS_H -#define PAGETABLE_LEVELS 4 - #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define CURRENT_MASK (~(THREAD_SIZE - 1)) diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 2c52ff76758..2d625da6603 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -16,12 +16,6 @@ (ie, 32-bit PAE). */ #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) -/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ -#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) - -/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ -#define PTE_FLAGS_MASK (~PTE_PFN_MASK) - #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h index 09ae67efceb..daacc23e3fb 100644 --- a/arch/x86/include/asm/pgtable-2level_types.h +++ b/arch/x86/include/asm/pgtable-2level_types.h @@ -17,6 +17,7 @@ typedef union { #endif /* !__ASSEMBLY__ */ #define SHARED_KERNEL_PMD 0 +#define PAGETABLE_LEVELS 2 /* * traditional i386 two-level paging structure: @@ -25,6 +26,7 @@ typedef union { #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 + /* * the i386 is two-level, so we don't really have any * PMD directory physically. diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h index bcc89625ebe..1bd5876c864 100644 --- a/arch/x86/include/asm/pgtable-3level_types.h +++ b/arch/x86/include/asm/pgtable-3level_types.h @@ -24,6 +24,8 @@ typedef union { #define SHARED_KERNEL_PMD 1 #endif +#define PAGETABLE_LEVELS 3 + /* * PGDIR_SHIFT determines what a top-level page table entry can map */ diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 2f59135c6f2..fbf42b8e038 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -18,6 +18,7 @@ typedef struct { pteval_t pte; } pte_t; #endif /* !__ASSEMBLY__ */ #define SHARED_KERNEL_PMD 0 +#define PAGETABLE_LEVELS 4 /* * PGDIR_SHIFT determines what a top-level page table entry can map diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 9dafe87be2d..4d258ad76a0 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -173,6 +173,12 @@ #include +/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ +#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) + +/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ +#define PTE_FLAGS_MASK (~PTE_PFN_MASK) + typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; typedef struct { pgdval_t pgd; } pgd_t; -- cgit v1.2.3 From bf33a70a73876b163d62612e9567cbac6604ba7e Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Fri, 13 Feb 2009 12:52:44 -0600 Subject: x86: fix "__udivdi3" [drivers/scsi/aha1542.ko] undefined Commit 976e8f677e42757e5586ea04a9ac8bb8ddaa037e ("x86: asm/io.h: unify virt_to_phys/phys_to_virt") changed the return of virt_to_phys from long to phys_addr_t which is unsigned long long on a PAE platform. So, I could suggest a fix below since isa addresses may never be above 32 bits. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index e5a2ab44cd5..4f8e820cf38 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -125,7 +125,7 @@ static inline void *phys_to_virt(phys_addr_t address) /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ -#define isa_virt_to_bus virt_to_phys +#define isa_virt_to_bus (unsigned long)virt_to_phys #define isa_page_to_bus page_to_phys #define isa_bus_to_virt phys_to_virt -- cgit v1.2.3 From f6db44df5bd39ed33883786d35342759af796e4a Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 14 Feb 2009 23:59:18 -0800 Subject: x86: fix typo in filter_cpuid_features() Impact: fix wrong disabling of cpu features an amd system got this strange output: CPU: CPU feature monitor disabled due to lack of CPUID level 0x5 but in /proc/cpuinfo I have: cpuid level : 5 on intel system: CPU: CPU feature monitor disabled due to lack of CPUID level 0x5 CPU: CPU feature dca disabled due to lack of CPUID level 0x9 but in /proc/cpuinfo i have: cpuid level : 11 Tt turns out there is a typo, and we should use level member in df. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 21f086b4c1a..32093d08d87 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -241,9 +241,9 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) * signs here... */ if (cpu_has(c, df->feature) && - ((s32)df->feature < 0 ? - (u32)df->feature > (u32)c->extended_cpuid_level : - (s32)df->feature > (s32)c->cpuid_level)) { + ((s32)df->level < 0 ? + (u32)df->level > (u32)c->extended_cpuid_level : + (s32)df->level > (s32)c->cpuid_level)) { clear_cpu_cap(c, df->feature); if (warn) printk(KERN_WARNING @@ -253,7 +253,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) df->level); } } -} +} /* * Naming convention should be: [()] -- cgit v1.2.3 From 88d0f550d71493cd975a11a03c166211b2f3bd32 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 14 Feb 2009 23:57:28 -0800 Subject: x86: make 32bit to call enable_IO_APIC early like 64bit Impact: cleanup So we remove some #ifdefs. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 8 ++++---- arch/x86/kernel/io_apic.c | 4 ---- arch/x86/kernel/smpboot.c | 3 +-- 3 files changed, 5 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 8bd801db24d..b8616aaa168 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1646,19 +1646,19 @@ int __init APIC_init_uniprocessor(void) physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); setup_local_APIC(); -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_IO_APIC /* * Now enable IO-APICs, actually call clear_IO_APIC * We need clear_IO_APIC before enabling vector on BP */ if (!skip_ioapic_setup && nr_ioapics) enable_IO_APIC(); -#endif -#ifdef CONFIG_X86_IO_APIC if (!smp_found_config || skip_ioapic_setup || !nr_ioapics) -#endif localise_nmi_watchdog(); +#else + localise_nmi_watchdog(); +#endif end_local_APIC_setup(); #ifdef CONFIG_X86_IO_APIC diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 7248ca11bdc..0b7cde3da48 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3057,13 +3057,9 @@ out: void __init setup_IO_APIC(void) { -#ifdef CONFIG_X86_32 - enable_IO_APIC(); -#else /* * calling enable_IO_APIC() is moved to setup_local_APIC for BP */ -#endif io_apic_irqs = ~PIC_IRQS; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index af57f88186e..10834954e30 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1154,13 +1154,12 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) */ setup_local_APIC(); -#ifdef CONFIG_X86_64 /* * Enable IO APIC before setting up error vector */ if (!skip_ioapic_setup && nr_ioapics) enable_IO_APIC(); -#endif + end_local_APIC_setup(); map_cpu_to_logical_apicid(); -- cgit v1.2.3 From 970ec1a8213cd1a1ea29972ebbe4575a8b30bca1 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 15 Feb 2009 14:06:13 -0800 Subject: [IA64] fix __apci_unmap_table Impact: fix build error to fix: tip/arch/ia64/kernel/acpi.c:203: error: conflicting types for '__acpi_unmap_table' tip/include/linux/acpi.h:82: error: previous declaration of '__acpi_unmap_table' was here tip/arch/ia64/kernel/acpi.c:203: error: conflicting types for '__acpi_unmap_table' tip/include/linux/acpi.h:82: error: previous declaration of '__acpi_unmap_table' was here Signed-off-by: Yinghai Lu Cc: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/ia64/kernel/acpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 2363ed17319..bdef2ce38c8 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -199,7 +199,7 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) return __va(phys_addr); } -char *__init __acpi_unmap_table(unsigned long virt_addr, unsigned long size) +void __init __acpi_unmap_table(char *map, unsigned long size) { } -- cgit v1.2.3 From cb9eff097831007afb30d64373f29d99825d0068 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 12 Feb 2009 05:03:36 +0000 Subject: net: new user space API for time stamping of incoming and outgoing packets User space can request hardware and/or software time stamping. Reporting of the result(s) via a new control message is enabled separately for each field in the message because some of the fields may require additional computation and thus cause overhead. User space can tell the different kinds of time stamps apart and choose what suits its needs. When a TX timestamp operation is requested, the TX skb will be cloned and the clone will be time stamped (in hardware or software) and added to the socket error queue of the skb, if the skb has a socket associated with it. The actual TX timestamp will reach userspace as a RX timestamp on the cloned packet. If timestamping is requested and no timestamping is done in the device driver (potentially this may use hardware timestamping), it will be done in software after the device's start_hard_xmit routine. Signed-off-by: Patrick Ohly Signed-off-by: David S. Miller --- arch/alpha/include/asm/socket.h | 3 +++ arch/arm/include/asm/socket.h | 3 +++ arch/avr32/include/asm/socket.h | 3 +++ arch/blackfin/include/asm/socket.h | 3 +++ arch/cris/include/asm/socket.h | 3 +++ arch/h8300/include/asm/socket.h | 3 +++ arch/ia64/include/asm/socket.h | 3 +++ arch/m68k/include/asm/socket.h | 3 +++ arch/mips/include/asm/socket.h | 3 +++ arch/parisc/include/asm/socket.h | 3 +++ arch/powerpc/include/asm/socket.h | 3 +++ arch/s390/include/asm/socket.h | 3 +++ arch/sh/include/asm/socket.h | 3 +++ arch/sparc/include/asm/socket.h | 3 +++ arch/x86/include/asm/socket.h | 3 +++ arch/xtensa/include/asm/socket.h | 3 +++ 16 files changed, 48 insertions(+) (limited to 'arch') diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index a1057c2d95e..3641ec1452f 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h @@ -62,6 +62,9 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 6817be9573a..537de4e0ef5 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h @@ -54,4 +54,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_SOCKET_H */ diff --git a/arch/avr32/include/asm/socket.h b/arch/avr32/include/asm/socket.h index 35863f26092..04c86061970 100644 --- a/arch/avr32/include/asm/socket.h +++ b/arch/avr32/include/asm/socket.h @@ -54,4 +54,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* __ASM_AVR32_SOCKET_H */ diff --git a/arch/blackfin/include/asm/socket.h b/arch/blackfin/include/asm/socket.h index 2ca702e44d4..fac7fe9e1f8 100644 --- a/arch/blackfin/include/asm/socket.h +++ b/arch/blackfin/include/asm/socket.h @@ -53,4 +53,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_SOCKET_H */ diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h index 9df0ca82f5d..d5cf7400540 100644 --- a/arch/cris/include/asm/socket.h +++ b/arch/cris/include/asm/socket.h @@ -56,6 +56,9 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_SOCKET_H */ diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h index da2520dbf25..602518a70a1 100644 --- a/arch/h8300/include/asm/socket.h +++ b/arch/h8300/include/asm/socket.h @@ -54,4 +54,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_SOCKET_H */ diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index d5ef0aa3e31..745421225ec 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h @@ -63,4 +63,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/asm/socket.h index dbc64e92c41..ca87f938b03 100644 --- a/arch/m68k/include/asm/socket.h +++ b/arch/m68k/include/asm/socket.h @@ -54,4 +54,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_SOCKET_H */ diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h index facc2d7a87c..2abca178016 100644 --- a/arch/mips/include/asm/socket.h +++ b/arch/mips/include/asm/socket.h @@ -75,6 +75,9 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #ifdef __KERNEL__ /** sock_type - Socket types diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index fba402c95ac..885472bf7b7 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h @@ -54,6 +54,9 @@ #define SO_MARK 0x401f +#define SO_TIMESTAMPING 0x4020 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index f5a4e168e49..1e5cfad0e3f 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h @@ -61,4 +61,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h index c786ab623b2..02330c50241 100644 --- a/arch/s390/include/asm/socket.h +++ b/arch/s390/include/asm/socket.h @@ -62,4 +62,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_SOCKET_H */ diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h index 6d4bf651295..345653b9682 100644 --- a/arch/sh/include/asm/socket.h +++ b/arch/sh/include/asm/socket.h @@ -54,4 +54,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* __ASM_SH_SOCKET_H */ diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h index bf50d0c2d58..982a12f959f 100644 --- a/arch/sparc/include/asm/socket.h +++ b/arch/sparc/include/asm/socket.h @@ -50,6 +50,9 @@ #define SO_MARK 0x0022 +#define SO_TIMESTAMPING 0x0023 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h index 8ab9cc8b2ec..ca8bf2cd0ba 100644 --- a/arch/x86/include/asm/socket.h +++ b/arch/x86/include/asm/socket.h @@ -54,4 +54,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_X86_SOCKET_H */ diff --git a/arch/xtensa/include/asm/socket.h b/arch/xtensa/include/asm/socket.h index 6100682b1da..dd1a7a4a1ce 100644 --- a/arch/xtensa/include/asm/socket.h +++ b/arch/xtensa/include/asm/socket.h @@ -65,4 +65,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _XTENSA_SOCKET_H */ -- cgit v1.2.3 From 9033304a1520df346862c95743a6c2023f21f057 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 13:35:44 -0800 Subject: x86, xen: short-circuit tests for dom0 When testing for a dom0/initial/privileged domain, make sure the predicate evaluates to a compile-time 0 if CONFIG_XEN_DOM0 isn't enabled. This will make most of the dom0 code evaporate without much more effort. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/xen/hypervisor.h | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index 81fbd735aec..d5b7e90c0ed 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -38,22 +38,30 @@ extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; enum xen_domain_type { - XEN_NATIVE, - XEN_PV_DOMAIN, - XEN_HVM_DOMAIN, + XEN_NATIVE, /* running on bare hardware */ + XEN_PV_DOMAIN, /* running in a PV domain */ + XEN_HVM_DOMAIN, /* running in a Xen hvm domain */ }; -extern enum xen_domain_type xen_domain_type; - #ifdef CONFIG_XEN -#define xen_domain() (xen_domain_type != XEN_NATIVE) +extern enum xen_domain_type xen_domain_type; #else -#define xen_domain() (0) +#define xen_domain_type XEN_NATIVE #endif -#define xen_pv_domain() (xen_domain() && xen_domain_type == XEN_PV_DOMAIN) -#define xen_hvm_domain() (xen_domain() && xen_domain_type == XEN_HVM_DOMAIN) +#define xen_domain() (xen_domain_type != XEN_NATIVE) +#define xen_pv_domain() (xen_domain() && \ + xen_domain_type == XEN_PV_DOMAIN) +#define xen_hvm_domain() (xen_domain() && \ + xen_domain_type == XEN_HVM_DOMAIN) + +#ifdef CONFIG_XEN_DOM0 +#include -#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN) +#define xen_initial_domain() (xen_pv_domain() && \ + xen_start_info->flags & SIF_INITDOMAIN) +#else /* !CONFIG_XEN_DOM0 */ +#define xen_initial_domain() (0) +#endif /* CONFIG_XEN_DOM0 */ #endif /* _ASM_X86_XEN_HYPERVISOR_H */ -- cgit v1.2.3 From b93d51dc62a41b5c2d6f32a0870709dc0cc55545 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Fri, 6 Feb 2009 13:35:57 -0800 Subject: x86, xen: record and display initiator of each multicall when debugging Store the caller for each multicall so we can report it on failure. Signed-off-by: Ian Campbell Signed-off-by: Ingo Molnar --- arch/x86/xen/multicalls.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index c738644b543..82f2513e975 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -39,6 +39,7 @@ struct mc_buffer { struct multicall_entry entries[MC_BATCH]; #if MC_DEBUG struct multicall_entry debug[MC_BATCH]; + void *caller[MC_BATCH]; #endif unsigned char args[MC_ARGS]; struct callback { @@ -154,11 +155,12 @@ void xen_mc_flush(void) ret, smp_processor_id()); dump_stack(); for (i = 0; i < b->mcidx; i++) { - printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\n", + printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n", i+1, b->mcidx, b->debug[i].op, b->debug[i].args[0], - b->entries[i].result); + b->entries[i].result, + b->caller[i]); } } #endif @@ -197,6 +199,9 @@ struct multicall_space __xen_mc_entry(size_t args) } ret.mc = &b->entries[b->mcidx]; +#ifdef MC_DEBUG + b->caller[b->mcidx] = __builtin_return_address(0); +#endif b->mcidx++; ret.args = &b->args[argidx]; b->argidx = argidx + args; -- cgit v1.2.3 From 3d39e9d07b576ee72f2c94cfad9e618fe21763b2 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 13:38:51 -0800 Subject: x86, xen: degrade BUG to WARN when multicall fails If one of the components of a multicall fails, WARN rather than BUG, to help with debugging. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/xen/multicalls.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 82f2513e975..6cffd5532b9 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -179,7 +179,7 @@ void xen_mc_flush(void) } b->cbidx = 0; - BUG_ON(ret); + WARN_ON(ret); } struct multicall_space __xen_mc_entry(size_t args) -- cgit v1.2.3 From c99608637eac8834d830496c462c054137772122 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 6 Feb 2009 13:38:56 -0800 Subject: x86, xen: do multicall callbacks with interrupts disabled We can't call the callbacks after enabling interrupts, as we may get a nested multicall call, which would cause a great deal of havok. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/xen/multicalls.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 6cffd5532b9..8bff7e7c290 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -170,8 +170,6 @@ void xen_mc_flush(void) } else BUG_ON(b->argidx != 0); - local_irq_restore(flags); - for (i = 0; i < b->cbidx; i++) { struct callback *cb = &b->callbacks[i]; @@ -179,6 +177,8 @@ void xen_mc_flush(void) } b->cbidx = 0; + local_irq_restore(flags); + WARN_ON(ret); } -- cgit v1.2.3 From 3bd25d0fa3ed588a6735b815cb0c146c23888ace Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 15 Feb 2009 02:54:03 -0800 Subject: x86: pre init pirq_entries[] Impact: cleanup set default value early - this allows the removal of a number of dynamic initialization codepaths, and an #ifdef. Signed-off-by: Yinghai Lu Cc: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 0b7cde3da48..a89878e08a4 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -813,8 +813,9 @@ static void clear_IO_APIC (void) */ #define MAX_PIRQS 8 -static int pirq_entries [MAX_PIRQS]; -static int pirqs_enabled; +static int pirq_entries[MAX_PIRQS] = { + [0 ... MAX_PIRQS - 1] = -1 +}; static int __init ioapic_pirq_setup(char *str) { @@ -823,10 +824,6 @@ static int __init ioapic_pirq_setup(char *str) get_options(str, ARRAY_SIZE(ints), ints); - for (i = 0; i < MAX_PIRQS; i++) - pirq_entries[i] = -1; - - pirqs_enabled = 1; apic_printk(APIC_VERBOSE, KERN_INFO "PIRQ redirection, working around broken MP-BIOS.\n"); max = MAX_PIRQS; @@ -1976,13 +1973,6 @@ void __init enable_IO_APIC(void) int apic; unsigned long flags; -#ifdef CONFIG_X86_32 - int i; - if (!pirqs_enabled) - for (i = 0; i < MAX_PIRQS; i++) - pirq_entries[i] = -1; -#endif - /* * The number of IO-APIC IRQ registers (== #pins): */ -- cgit v1.2.3 From 98c061b6cf2e7a1010286a7a4f672c4623e1b3e0 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 16 Feb 2009 00:00:50 -0800 Subject: x86: make APIC_init_uniprocessor() more like smp_prepare_cpus() Impact: cleanup 1. move localise_nmi_watchdog() later 2. change setup_boot_APIC_clock() to setup_boot_clock() for 64-bit Signed-off-by: Yinghai Lu Cc: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index b8616aaa168..c03f4cc135c 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1649,32 +1649,28 @@ int __init APIC_init_uniprocessor(void) #ifdef CONFIG_X86_IO_APIC /* * Now enable IO-APICs, actually call clear_IO_APIC - * We need clear_IO_APIC before enabling vector on BP + * We need clear_IO_APIC before enabling error vector */ if (!skip_ioapic_setup && nr_ioapics) enable_IO_APIC(); - - if (!smp_found_config || skip_ioapic_setup || !nr_ioapics) - localise_nmi_watchdog(); -#else - localise_nmi_watchdog(); #endif + end_local_APIC_setup(); #ifdef CONFIG_X86_IO_APIC if (smp_found_config && !skip_ioapic_setup && nr_ioapics) setup_IO_APIC(); -# ifdef CONFIG_X86_64 - else + else { nr_ioapics = 0; -# endif + localise_nmi_watchdog(); + } +#else + localise_nmi_watchdog(); #endif + setup_boot_clock(); #ifdef CONFIG_X86_64 - setup_boot_APIC_clock(); check_nmi_watchdog(); -#else - setup_boot_clock(); #endif return 0; -- cgit v1.2.3 From ee8b53c1cfe33aecf0c77c0aa3ce437f0d84d831 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 12:20:13 +0100 Subject: x86: remove stale arch/x86/include/asm/page_64.h.rej file Introduced by: 51c78eb: x86: create _types.h counterparts for page*.h Cc: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page_64.h.rej | 114 ------------------------------------- 1 file changed, 114 deletions(-) delete mode 100644 arch/x86/include/asm/page_64.h.rej (limited to 'arch') diff --git a/arch/x86/include/asm/page_64.h.rej b/arch/x86/include/asm/page_64.h.rej deleted file mode 100644 index 9b1807f1859..00000000000 --- a/arch/x86/include/asm/page_64.h.rej +++ /dev/null @@ -1,114 +0,0 @@ -*************** -*** 1,105 **** - #ifndef _ASM_X86_PAGE_64_H - #define _ASM_X86_PAGE_64_H - -- #define PAGETABLE_LEVELS 4 -- -- #define THREAD_ORDER 1 -- #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) -- #define CURRENT_MASK (~(THREAD_SIZE - 1)) -- -- #define EXCEPTION_STACK_ORDER 0 -- #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) -- -- #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) -- #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) -- -- #define IRQSTACK_ORDER 2 -- #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) -- -- #define STACKFAULT_STACK 1 -- #define DOUBLEFAULT_STACK 2 -- #define NMI_STACK 3 -- #define DEBUG_STACK 4 -- #define MCE_STACK 5 -- #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ -- -- #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) -- #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) -- -- /* -- * Set __PAGE_OFFSET to the most negative possible address + -- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a -- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's -- * what Xen requires. -- */ -- #define __PAGE_OFFSET _AC(0xffff880000000000, UL) -- -- #define __PHYSICAL_START CONFIG_PHYSICAL_START -- #define __KERNEL_ALIGN 0x200000 -- -- /* -- * Make sure kernel is aligned to 2MB address. Catching it at compile -- * time is better. Change your config file and compile the kernel -- * for a 2MB aligned address (CONFIG_PHYSICAL_START) -- */ -- #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 -- #error "CONFIG_PHYSICAL_START must be a multiple of 2MB" -- #endif -- -- #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) -- #define __START_KERNEL_map _AC(0xffffffff80000000, UL) -- -- /* See Documentation/x86_64/mm.txt for a description of the memory map. */ -- #define __PHYSICAL_MASK_SHIFT 46 -- #define __VIRTUAL_MASK_SHIFT 48 -- -- /* -- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in -- * arch/x86/kernel/head_64.S), and it is mapped here: -- */ -- #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) -- #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) -- -- #ifndef __ASSEMBLY__ -- void clear_page(void *page); -- void copy_page(void *to, void *from); -- -- /* duplicated to the one in bootmem.h */ -- extern unsigned long max_pfn; -- extern unsigned long phys_base; -- -- extern unsigned long __phys_addr(unsigned long); -- #define __phys_reloc_hide(x) (x) -- -- /* -- * These are used to make use of C type-checking.. -- */ -- typedef unsigned long pteval_t; -- typedef unsigned long pmdval_t; -- typedef unsigned long pudval_t; -- typedef unsigned long pgdval_t; -- typedef unsigned long pgprotval_t; -- -- typedef struct page *pgtable_t; -- -- typedef struct { pteval_t pte; } pte_t; -- -- #define vmemmap ((struct page *)VMEMMAP_START) -- -- extern unsigned long init_memory_mapping(unsigned long start, -- unsigned long end); -- -- extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); -- extern void free_initmem(void); -- -- extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); -- extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); -- -- #endif /* !__ASSEMBLY__ */ -- -- #ifdef CONFIG_FLATMEM -- #define pfn_valid(pfn) ((pfn) < max_pfn) -- #endif -- - - #endif /* _ASM_X86_PAGE_64_H */ ---- 1,6 ---- - #ifndef _ASM_X86_PAGE_64_H - #define _ASM_X86_PAGE_64_H - -+ #include - - #endif /* _ASM_X86_PAGE_64_H */ -- cgit v1.2.3 From 06cd9a7dc8a58186060a91b6ddc031057435fd34 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 16 Feb 2009 17:29:58 -0800 Subject: x86: add x2apic config Impact: cleanup so could deselect x2apic and INTR_REMAP will select x2apic Signed-off-by: Yinghai Lu Cc: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 15 +++++++++++++++ arch/x86/include/asm/apic.h | 18 +++++++++++++++--- arch/x86/kernel/Makefile | 4 ++-- arch/x86/kernel/apic.c | 24 ++++++++++++------------ arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/genapic_64.c | 4 ++++ arch/x86/kernel/setup.c | 3 +-- arch/x86/kernel/smpboot.c | 2 +- 8 files changed, 51 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1042d69b267..bce241fe1d2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -235,6 +235,20 @@ config SMP If you don't know what to do here, say N. +config X86_X2APIC + bool "Support x2apic" + depends on X86_LOCAL_APIC && X86_64 + ---help--- + This enables x2apic support on CPUs that have this feature. + + This allows 32-bit apic IDs (so it can support very large systems), + and accesses the local apic via MSRs not via mmio. + + ( On certain CPU models you may need to enable INTR_REMAP too, + to get functional x2apic mode. ) + + If you don't know what to do here, say N. + config SPARSE_IRQ bool "Support sparse irq numbering" depends on PCI_MSI || HT_IRQ @@ -1828,6 +1842,7 @@ config DMAR_FLOPPY_WA config INTR_REMAP bool "Support for Interrupt Remapping (EXPERIMENTAL)" depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL + select X86_X2APIC ---help--- Supports Interrupt remapping for IO-APIC and MSI devices. To use x2apic mode in the CPU's which support x2APIC enhancements or diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index fba49f66228..dc1db99cd40 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -112,7 +112,7 @@ static inline u32 native_apic_msr_read(u32 reg) return low; } -#ifndef CONFIG_X86_32 +#ifdef CONFIG_X86_X2APIC extern int x2apic; extern void check_x2apic(void); extern void enable_x2apic(void); @@ -131,7 +131,19 @@ static inline int x2apic_enabled(void) return 0; } #else -#define x2apic_enabled() 0 +static inline void check_x2apic(void) +{ +} +static inline void enable_x2apic(void) +{ +} +static inline void enable_IR_x2apic(void) +{ +} +static inline int x2apic_enabled(void) +{ + return 0; +} #endif struct apic_ops { @@ -177,7 +189,7 @@ static inline u32 safe_apic_wait_icr_idle(void) extern int get_physical_broadcast(void); -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_X2APIC static inline void ack_x2APIC_irq(void) { /* Docs say use 0 for future compatibility */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 24f357e7557..1cefd218f76 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -117,8 +117,8 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 # 64 bit specific files ifeq ($(CONFIG_X86_64),y) obj-y += genapic_64.o genapic_flat_64.o - obj-y += genx2apic_cluster.o - obj-y += genx2apic_phys.o + obj-$(CONFIG_X86_X2APIC) += genx2apic_cluster.o + obj-$(CONFIG_X86_X2APIC) += genx2apic_phys.o obj-$(CONFIG_X86_UV) += genx2apic_uv_x.o tlb_uv.o obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index a894eea9d51..004aa1c31e4 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -112,11 +112,7 @@ static __init int setup_apicpmtimer(char *s) __setup("apicpmtimer", setup_apicpmtimer); #endif -#ifdef CONFIG_X86_64 -#define HAVE_X2APIC -#endif - -#ifdef HAVE_X2APIC +#ifdef CONFIG_X86_X2APIC int x2apic; /* x2apic enabled before OS handover */ static int x2apic_preenabled; @@ -269,7 +265,7 @@ static struct apic_ops xapic_ops = { struct apic_ops __read_mostly *apic_ops = &xapic_ops; EXPORT_SYMBOL_GPL(apic_ops); -#ifdef HAVE_X2APIC +#ifdef CONFIG_X86_X2APIC static void x2apic_wait_icr_idle(void) { /* no need to wait for icr idle in x2apic */ @@ -1320,11 +1316,14 @@ void __cpuinit end_local_APIC_setup(void) apic_pm_activate(); } -#ifdef HAVE_X2APIC +#ifdef CONFIG_X86_X2APIC void check_x2apic(void) { int msr, msr2; + if (!cpu_has_x2apic) + return; + rdmsr(MSR_IA32_APICBASE, msr, msr2); if (msr & X2APIC_ENABLE) { @@ -1338,6 +1337,9 @@ void enable_x2apic(void) { int msr, msr2; + if (!x2apic) + return; + rdmsr(MSR_IA32_APICBASE, msr, msr2); if (!(msr & X2APIC_ENABLE)) { pr_info("Enabling x2apic\n"); @@ -1439,7 +1441,7 @@ end: return; } -#endif /* HAVE_X2APIC */ +#endif /* CONFIG_X86_X2APIC */ #ifdef CONFIG_X86_64 /* @@ -1570,7 +1572,7 @@ void __init early_init_lapic_mapping(void) */ void __init init_apic_mappings(void) { -#ifdef HAVE_X2APIC +#ifdef CONFIG_X86_X2APIC if (x2apic) { boot_cpu_physical_apicid = read_apic_id(); return; @@ -1634,9 +1636,7 @@ int __init APIC_init_uniprocessor(void) } #endif -#ifdef HAVE_X2APIC enable_IR_x2apic(); -#endif #ifdef CONFIG_X86_64 default_setup_apic_routing(); #endif @@ -2021,7 +2021,7 @@ static int lapic_resume(struct sys_device *dev) local_irq_save(flags); -#ifdef HAVE_X2APIC +#ifdef CONFIG_X86_X2APIC if (x2apic) enable_x2apic(); else diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4db150ed446..4b5d13e472d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1051,7 +1051,7 @@ void __cpuinit cpu_init(void) barrier(); check_efer(); - if (cpu != 0 && x2apic) + if (cpu != 0) enable_x2apic(); /* diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 820dea5d0eb..cdc4772d9c8 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -35,8 +35,10 @@ static struct genapic *apic_probe[] __initdata = { #ifdef CONFIG_X86_UV &apic_x2apic_uv_x, #endif +#ifdef CONFIG_X86_X2APIC &apic_x2apic_phys, &apic_x2apic_cluster, +#endif &apic_physflat, NULL, }; @@ -46,10 +48,12 @@ static struct genapic *apic_probe[] __initdata = { */ void __init default_setup_apic_routing(void) { +#ifdef CONFIG_X86_X2APIC if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { if (!intr_remapping_enabled) apic = &apic_flat; } +#endif if (apic == &apic_flat) { if (max_physical_apicid >= 8) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 8fce6c71451..43d964411c0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -836,8 +836,7 @@ void __init setup_arch(char **cmdline_p) #else num_physpages = max_pfn; - if (cpu_has_x2apic) - check_x2apic(); + check_x2apic(); /* How many end-of-memory variables you have, grandma! */ /* need this before calling reserve_initrd */ diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 10834954e30..b5f2b698973 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1128,8 +1128,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) current_thread_info()->cpu = 0; /* needed? */ set_cpu_sibling_map(0); -#ifdef CONFIG_X86_64 enable_IR_x2apic(); +#ifdef CONFIG_X86_64 default_setup_apic_routing(); #endif -- cgit v1.2.3 From c1eeb2de41d7015678bdd412b48a5f071b84e29a Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 16 Feb 2009 23:02:14 -0800 Subject: x86: fold apic_ops into genapic Impact: cleanup make it simpler, don't need have one extra struct. v2: fix the sgi_uv build Signed-off-by: Yinghai Lu Cc: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 97 ++++++++++--------------------- arch/x86/include/asm/genapic.h | 61 +++++++++++++++++++ arch/x86/kernel/apic.c | 63 ++------------------ arch/x86/kernel/bigsmp_32.c | 7 +++ arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 2 +- arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 2 +- arch/x86/kernel/cpu/perfctr-watchdog.c | 2 +- arch/x86/kernel/es7000_32.c | 7 +++ arch/x86/kernel/genapic_64.c | 2 +- arch/x86/kernel/genapic_flat_64.c | 16 ++++- arch/x86/kernel/genx2apic_cluster.c | 11 +++- arch/x86/kernel/genx2apic_phys.c | 11 +++- arch/x86/kernel/genx2apic_uv_x.c | 9 ++- arch/x86/kernel/ipi.c | 2 +- arch/x86/kernel/irq.c | 2 +- arch/x86/kernel/nmi.c | 2 +- arch/x86/kernel/numaq_32.c | 7 +++ arch/x86/kernel/probe_32.c | 7 +++ arch/x86/kernel/summit_32.c | 7 +++ arch/x86/kernel/uv_irq.c | 2 +- arch/x86/kernel/vmi_32.c | 6 +- arch/x86/kernel/vmiclock_32.c | 2 +- arch/x86/lguest/boot.c | 19 +++--- arch/x86/xen/enlighten.c | 21 +++---- 24 files changed, 205 insertions(+), 162 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index dc1db99cd40..4f56e053d34 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -92,6 +92,12 @@ static inline u32 native_apic_mem_read(u32 reg) return *((volatile u32 *)(APIC_BASE + reg)); } +extern void native_apic_wait_icr_idle(void); +extern u32 native_safe_apic_wait_icr_idle(void); +extern void native_apic_icr_write(u32 low, u32 id); +extern u64 native_apic_icr_read(void); + +#ifdef CONFIG_X86_X2APIC static inline void native_apic_msr_write(u32 reg, u32 v) { if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || @@ -112,7 +118,31 @@ static inline u32 native_apic_msr_read(u32 reg) return low; } -#ifdef CONFIG_X86_X2APIC +static inline void native_x2apic_wait_icr_idle(void) +{ + /* no need to wait for icr idle in x2apic */ + return; +} + +static inline u32 native_safe_x2apic_wait_icr_idle(void) +{ + /* no need to wait for icr idle in x2apic */ + return 0; +} + +static inline void native_x2apic_icr_write(u32 low, u32 id) +{ + wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); +} + +static inline u64 native_x2apic_icr_read(void) +{ + unsigned long val; + + rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); + return val; +} + extern int x2apic; extern void check_x2apic(void); extern void enable_x2apic(void); @@ -146,47 +176,6 @@ static inline int x2apic_enabled(void) } #endif -struct apic_ops { - u32 (*read)(u32 reg); - void (*write)(u32 reg, u32 v); - u64 (*icr_read)(void); - void (*icr_write)(u32 low, u32 high); - void (*wait_icr_idle)(void); - u32 (*safe_wait_icr_idle)(void); -}; - -extern struct apic_ops *apic_ops; - -static inline u32 apic_read(u32 reg) -{ - return apic_ops->read(reg); -} - -static inline void apic_write(u32 reg, u32 val) -{ - apic_ops->write(reg, val); -} - -static inline u64 apic_icr_read(void) -{ - return apic_ops->icr_read(); -} - -static inline void apic_icr_write(u32 low, u32 high) -{ - apic_ops->icr_write(low, high); -} - -static inline void apic_wait_icr_idle(void) -{ - apic_ops->wait_icr_idle(); -} - -static inline u32 safe_apic_wait_icr_idle(void) -{ - return apic_ops->safe_wait_icr_idle(); -} - extern int get_physical_broadcast(void); #ifdef CONFIG_X86_X2APIC @@ -197,18 +186,6 @@ static inline void ack_x2APIC_irq(void) } #endif - -static inline void ack_APIC_irq(void) -{ - /* - * ack_APIC_irq() actually gets compiled as a single instruction - * ... yummie. - */ - - /* Docs say use 0 for future compatibility */ - apic_write(APIC_EOI, 0); -} - extern int lapic_get_maxlvt(void); extern void clear_local_APIC(void); extern void connect_bsp_APIC(void); @@ -256,18 +233,6 @@ static inline void disable_local_APIC(void) { } #define SET_APIC_ID(x) (apic->set_apic_id(x)) #else -#ifdef CONFIG_X86_LOCAL_APIC -static inline unsigned default_get_apic_id(unsigned long x) -{ - unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); - - if (APIC_XAPIC(ver)) - return (x >> 24) & 0xFF; - else - return (x >> 24) & 0x0F; -} -#endif - #endif #endif /* _ASM_X86_APIC_H */ diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 273b99452ae..a6d0b00a544 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -5,6 +5,7 @@ #include #include +#include /* * Copyright 2004 James Cleverdon, IBM. @@ -83,10 +84,70 @@ struct genapic { void (*smp_callin_clear_local_apic)(void); void (*store_NMI_vector)(unsigned short *high, unsigned short *low); void (*inquire_remote_apic)(int apicid); + + /* apic ops */ + u32 (*read)(u32 reg); + void (*write)(u32 reg, u32 v); + u64 (*icr_read)(void); + void (*icr_write)(u32 low, u32 high); + void (*wait_icr_idle)(void); + u32 (*safe_wait_icr_idle)(void); }; extern struct genapic *apic; +static inline u32 apic_read(u32 reg) +{ + return apic->read(reg); +} + +static inline void apic_write(u32 reg, u32 val) +{ + apic->write(reg, val); +} + +static inline u64 apic_icr_read(void) +{ + return apic->icr_read(); +} + +static inline void apic_icr_write(u32 low, u32 high) +{ + apic->icr_write(low, high); +} + +static inline void apic_wait_icr_idle(void) +{ + apic->wait_icr_idle(); +} + +static inline u32 safe_apic_wait_icr_idle(void) +{ + return apic->safe_wait_icr_idle(); +} + + +static inline void ack_APIC_irq(void) +{ + /* + * ack_APIC_irq() actually gets compiled as a single instruction + * ... yummie. + */ + + /* Docs say use 0 for future compatibility */ + apic_write(APIC_EOI, 0); +} + +static inline unsigned default_get_apic_id(unsigned long x) +{ + unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); + + if (APIC_XAPIC(ver)) + return (x >> 24) & 0xFF; + else + return (x >> 24) & 0x0F; +} + /* * Warm reset vector default position: */ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 004aa1c31e4..af494bad885 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -210,18 +210,13 @@ static int modern_apic(void) return lapic_get_version() >= 0x14; } -/* - * Paravirt kernels also might be using these below ops. So we still - * use generic apic_read()/apic_write(), which might be pointing to different - * ops in PARAVIRT case. - */ -void xapic_wait_icr_idle(void) +void native_apic_wait_icr_idle(void) { while (apic_read(APIC_ICR) & APIC_ICR_BUSY) cpu_relax(); } -u32 safe_xapic_wait_icr_idle(void) +u32 native_safe_apic_wait_icr_idle(void) { u32 send_status; int timeout; @@ -237,13 +232,13 @@ u32 safe_xapic_wait_icr_idle(void) return send_status; } -void xapic_icr_write(u32 low, u32 id) +void native_apic_icr_write(u32 low, u32 id) { apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); apic_write(APIC_ICR, low); } -static u64 xapic_icr_read(void) +u64 native_apic_icr_read(void) { u32 icr1, icr2; @@ -253,54 +248,6 @@ static u64 xapic_icr_read(void) return icr1 | ((u64)icr2 << 32); } -static struct apic_ops xapic_ops = { - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = xapic_icr_read, - .icr_write = xapic_icr_write, - .wait_icr_idle = xapic_wait_icr_idle, - .safe_wait_icr_idle = safe_xapic_wait_icr_idle, -}; - -struct apic_ops __read_mostly *apic_ops = &xapic_ops; -EXPORT_SYMBOL_GPL(apic_ops); - -#ifdef CONFIG_X86_X2APIC -static void x2apic_wait_icr_idle(void) -{ - /* no need to wait for icr idle in x2apic */ - return; -} - -static u32 safe_x2apic_wait_icr_idle(void) -{ - /* no need to wait for icr idle in x2apic */ - return 0; -} - -void x2apic_icr_write(u32 low, u32 id) -{ - wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); -} - -static u64 x2apic_icr_read(void) -{ - unsigned long val; - - rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); - return val; -} - -static struct apic_ops x2apic_ops = { - .read = native_apic_msr_read, - .write = native_apic_msr_write, - .icr_read = x2apic_icr_read, - .icr_write = x2apic_icr_write, - .wait_icr_idle = x2apic_wait_icr_idle, - .safe_wait_icr_idle = safe_x2apic_wait_icr_idle, -}; -#endif - /** * enable_NMI_through_LVT0 - enable NMI through local vector table 0 */ @@ -1329,7 +1276,6 @@ void check_x2apic(void) if (msr & X2APIC_ENABLE) { pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); x2apic_preenabled = x2apic = 1; - apic_ops = &x2apic_ops; } } @@ -1403,7 +1349,6 @@ void __init enable_IR_x2apic(void) if (!x2apic) { x2apic = 1; - apic_ops = &x2apic_ops; enable_x2apic(); } diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index 47a62f46afd..9eeb714c5de 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -263,4 +263,11 @@ struct genapic apic_bigsmp = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 4772e91e824..e22d6ed26e6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 5e8c79e748a..42f090702f0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 9abd48b2267..f6c70a164e3 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include struct nmi_watchdog_ctlblk { diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 55515d73d9c..23f1df4ce18 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -806,4 +806,11 @@ struct genapic apic_es7000 = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index cdc4772d9c8..70b616b4c62 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -19,8 +19,8 @@ #include #include -#include #include +#include #include extern struct genapic apic_flat; diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 249d2d3c034..36ee760fd13 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -17,8 +17,8 @@ #include #include #include -#include #include +#include #ifdef CONFIG_ACPI #include @@ -229,6 +229,13 @@ struct genapic apic_flat = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = NULL, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; /* @@ -374,4 +381,11 @@ struct genapic apic_physflat = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = NULL, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 7c87156b641..dd6e8d68542 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -7,8 +7,8 @@ #include #include -#include #include +#include DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); @@ -46,7 +46,7 @@ static void /* * send the IPI. */ - x2apic_icr_write(cfg, apicid); + native_x2apic_icr_write(cfg, apicid); } /* @@ -234,4 +234,11 @@ struct genapic apic_x2apic_cluster = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = NULL, + + .read = native_apic_msr_read, + .write = native_apic_msr_write, + .icr_read = native_x2apic_icr_read, + .icr_write = native_x2apic_icr_write, + .wait_icr_idle = native_x2apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, }; diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 5cbae8aa040..eb1486bb002 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -7,8 +7,8 @@ #include #include -#include #include +#include static int x2apic_phys; @@ -50,7 +50,7 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, /* * send the IPI. */ - x2apic_icr_write(cfg, apicid); + native_x2apic_icr_write(cfg, apicid); } static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) @@ -220,4 +220,11 @@ struct genapic apic_x2apic_phys = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = NULL, + + .read = native_apic_msr_read, + .write = native_apic_msr_write, + .icr_read = native_x2apic_icr_read, + .icr_write = native_x2apic_icr_write, + .wait_icr_idle = native_x2apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, }; diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 89b84e004f0..9ae4a92fac8 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -22,8 +22,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -292,6 +292,13 @@ struct genapic apic_x2apic_uv_x = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = NULL, + + .read = native_apic_msr_read, + .write = native_apic_msr_write, + .icr_read = native_x2apic_icr_read, + .icr_write = native_x2apic_icr_write, + .wait_icr_idle = native_x2apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, }; static __cpuinit void set_x2apic_extra_bits(int pnode) diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index dbf5445727a..1326272cae4 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index f13ca1650aa..3957776b193 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index bdfad80c3cf..48b9ca5e088 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -11,7 +11,7 @@ * Mikael Pettersson : PM converted to driver model. Disable/enable API. */ -#include +#include #include #include diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 0cc41a1d255..f0f0c2f0596 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -569,4 +569,11 @@ struct genapic apic_numaq = { .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, .store_NMI_vector = numaq_store_NMI_vector, .inquire_remote_apic = NULL, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index 22337b75de6..1f701caa95b 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -127,6 +127,13 @@ struct genapic apic_default = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; extern struct genapic apic_numaq; diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 1e733eff9b3..1cf32c325d1 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -599,4 +599,11 @@ struct genapic apic_summit = { .smp_callin_clear_local_apic = NULL, .store_NMI_vector = NULL, .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index aeef529917e..75eb5ec5dd2 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include static void uv_noop(unsigned int irq) diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index f052c84ecbe..a1c7b71dc0d 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include @@ -798,8 +798,8 @@ static inline int __init activate_vmi(void) #endif #ifdef CONFIG_X86_LOCAL_APIC - para_fill(apic_ops->read, APICRead); - para_fill(apic_ops->write, APICWrite); + para_fill(apic->read, APICRead); + para_fill(apic->write, APICWrite); #endif /* diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index a4791ef412d..2a5e0e6a7c0 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index da2e314f61b..bc9893f2c38 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -55,7 +55,7 @@ #include #include #include -#include +#include #include #include #include @@ -828,13 +828,14 @@ static u32 lguest_apic_safe_wait_icr_idle(void) return 0; } -static struct apic_ops lguest_basic_apic_ops = { - .read = lguest_apic_read, - .write = lguest_apic_write, - .icr_read = lguest_apic_icr_read, - .icr_write = lguest_apic_icr_write, - .wait_icr_idle = lguest_apic_wait_icr_idle, - .safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle, +static void set_lguest_basic_apic_ops(void) +{ + apic->read = lguest_apic_read; + apic->write = lguest_apic_write; + apic->icr_read = lguest_apic_icr_read; + apic->icr_write = lguest_apic_icr_write; + apic->wait_icr_idle = lguest_apic_wait_icr_idle; + apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle; }; #endif @@ -1035,7 +1036,7 @@ __init void lguest_init(void) #ifdef CONFIG_X86_LOCAL_APIC /* apic read/write intercepts */ - apic_ops = &lguest_basic_apic_ops; + set_lguest_basic_apic_ops(); #endif /* time operations */ diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 95ff6a0e942..e3dd3fb6729 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -36,7 +36,7 @@ #include #include -#include +#include #include #include #include @@ -554,14 +554,15 @@ static u32 xen_safe_apic_wait_icr_idle(void) return 0; } -static struct apic_ops xen_basic_apic_ops = { - .read = xen_apic_read, - .write = xen_apic_write, - .icr_read = xen_apic_icr_read, - .icr_write = xen_apic_icr_write, - .wait_icr_idle = xen_apic_wait_icr_idle, - .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle, -}; +static void set_xen_basic_apic_ops(void) +{ + apic->read = xen_apic_read; + apic->write = xen_apic_write; + apic->icr_read = xen_apic_icr_read; + apic->icr_write = xen_apic_icr_write; + apic->wait_icr_idle = xen_apic_wait_icr_idle; + apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; +} #endif @@ -898,7 +899,7 @@ asmlinkage void __init xen_start_kernel(void) /* * set up the basic apic ops. */ - apic_ops = &xen_basic_apic_ops; + set_xen_basic_apic_ops(); #endif if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { -- cgit v1.2.3 From 7d01d32d3b9becd6deba318b718db3d9fc181d23 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 12:33:20 +0100 Subject: x86, apic: fix build fallout of genapic changes - make oprofile build - select X86_X2APIC from X86_UV - it relies on it - export genapic for oprofile modular build Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + arch/x86/kernel/cpu/mcheck/p4.c | 2 +- arch/x86/kernel/genapic_64.c | 1 + arch/x86/kernel/probe_32.c | 2 ++ arch/x86/oprofile/nmi_int.c | 2 +- arch/x86/oprofile/op_model_p4.c | 2 +- arch/x86/oprofile/op_model_ppro.c | 2 +- 7 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bce241fe1d2..8955262caa3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -316,6 +316,7 @@ config X86_UV bool "SGI Ultraviolet" depends on X86_64 depends on X86_EXTENDED_PLATFORM + select X86_X2APIC ---help--- This option is needed in order to support SGI Ultraviolet systems. If you don't have one of these, you should say N here. diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index 9b60fce09f7..f9c92b66dfb 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 70b616b4c62..ef788635324 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -30,6 +30,7 @@ extern struct genapic apic_x2apic_phys; extern struct genapic apic_x2apic_cluster; struct genapic __read_mostly *apic = &apic_flat; +EXPORT_SYMBOL_GPL(apic); static struct genapic *apic_probe[] __initdata = { #ifdef CONFIG_X86_UV diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index 1f701caa95b..6e31b17d546 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -8,6 +8,7 @@ */ #include #include +#include #include #include #include @@ -143,6 +144,7 @@ extern struct genapic apic_es7000; extern struct genapic apic_default; struct genapic *apic = &apic_default; +EXPORT_SYMBOL_GPL(apic); static struct genapic *apic_probe[] __initdata = { #ifdef CONFIG_X86_NUMAQ diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 202864ad49a..a32a5c7a8ef 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "op_counter.h" #include "op_x86_model.h" diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 4c4a51c90bc..09a237bc9ef 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "op_x86_model.h" diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index e9f80c744cf..5ebd8f605d7 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include -- cgit v1.2.3 From 28aa29eeb3918f820b914679cfc4404972f2df32 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 14:36:42 +0100 Subject: remove: genapic prepare Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic.h | 13 +------------ arch/x86/kernel/probe_32.c | 9 +++++++++ 2 files changed, 10 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index a6d0b00a544..9b874a38683 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -168,8 +168,6 @@ extern void apic_send_IPI_self(int vector); extern struct genapic apic_x2apic_uv_x; DECLARE_PER_CPU(int, x2apic_extra_bits); -extern void default_setup_apic_routing(void); - extern int default_cpu_present_to_apicid(int mps_cpu); extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); #endif @@ -211,10 +209,9 @@ static inline unsigned int read_apic_id(void) return apic->get_apic_id(reg); } -#ifdef CONFIG_X86_64 extern void default_setup_apic_routing(void); -#else +#ifdef CONFIG_X86_32 /* * Set up the logical destination ID. * @@ -251,14 +248,6 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) return cpuid_apic >> index_msb; } -static inline void default_setup_apic_routing(void) -{ -#ifdef CONFIG_X86_IO_APIC - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Flat", nr_ioapics); -#endif -} - extern int default_apicid_to_node(int logical_apicid); #endif diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index 6e31b17d546..b3d5d74e522 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -53,6 +53,15 @@ int no_broadcast = DEFAULT_SEND_IPI; #ifdef CONFIG_X86_LOCAL_APIC +void default_setup_apic_routing(void) +{ +#ifdef CONFIG_X86_IO_APIC + printk(KERN_INFO + "Enabling APIC mode: Flat. Using %d I/O APICs\n", + nr_ioapics); +#endif +} + static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) { /* -- cgit v1.2.3 From e2780a68f889c9d7ec8e78d58a3a2be8cfebf202 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 13:52:29 +0100 Subject: x86, apic: merge genapic.h into apic.h Impact: cleanup Reduce the number of include files to worry about. Also, most of the users of APIC facilities had to include genapic.h already, which embedded apic.h, so the distinction was meaningless. [ include apic.h from genapic.h for compatibility. ] Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 314 ++++++++++++++++++++++++++++++++++++++++- arch/x86/include/asm/genapic.h | 312 ---------------------------------------- 2 files changed, 310 insertions(+), 316 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 4f56e053d34..c07f5fbf43c 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -1,15 +1,18 @@ #ifndef _ASM_X86_APIC_H #define _ASM_X86_APIC_H -#include +#include #include +#include #include -#include -#include +#include #include +#include +#include +#include +#include #include -#include #include #define ARCH_APICTIMER_STOPS_ON_C3 1 @@ -235,4 +238,307 @@ static inline void disable_local_APIC(void) { } #endif +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC sub-arch data struct. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +struct genapic { + char *name; + + int (*probe)(void); + int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); + int (*apic_id_registered)(void); + + u32 irq_delivery_mode; + u32 irq_dest_mode; + + const struct cpumask *(*target_cpus)(void); + + int disable_esr; + + int dest_logical; + unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); + unsigned long (*check_apicid_present)(int apicid); + + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); + void (*init_apic_ldr)(void); + + physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); + + void (*setup_apic_routing)(void); + int (*multi_timer_check)(int apic, int irq); + int (*apicid_to_node)(int logical_apicid); + int (*cpu_to_logical_apicid)(int cpu); + int (*cpu_present_to_apicid)(int mps_cpu); + physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); + void (*setup_portio_remap)(void); + int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); + void (*enable_apic_mode)(void); + int (*phys_pkg_id)(int cpuid_apic, int index_msb); + + /* + * When one of the next two hooks returns 1 the genapic + * is switched to this. Essentially they are additional + * probe functions: + */ + int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); + + unsigned int (*get_apic_id)(unsigned long x); + unsigned long (*set_apic_id)(unsigned int id); + unsigned long apic_id_mask; + + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask); + + /* ipi */ + void (*send_IPI_mask)(const struct cpumask *mask, int vector); + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, + int vector); + void (*send_IPI_allbutself)(int vector); + void (*send_IPI_all)(int vector); + void (*send_IPI_self)(int vector); + + /* wakeup_secondary_cpu */ + int (*wakeup_cpu)(int apicid, unsigned long start_eip); + + int trampoline_phys_low; + int trampoline_phys_high; + + void (*wait_for_init_deassert)(atomic_t *deassert); + void (*smp_callin_clear_local_apic)(void); + void (*store_NMI_vector)(unsigned short *high, unsigned short *low); + void (*inquire_remote_apic)(int apicid); + + /* apic ops */ + u32 (*read)(u32 reg); + void (*write)(u32 reg, u32 v); + u64 (*icr_read)(void); + void (*icr_write)(u32 low, u32 high); + void (*wait_icr_idle)(void); + u32 (*safe_wait_icr_idle)(void); +}; + +extern struct genapic *apic; + +static inline u32 apic_read(u32 reg) +{ + return apic->read(reg); +} + +static inline void apic_write(u32 reg, u32 val) +{ + apic->write(reg, val); +} + +static inline u64 apic_icr_read(void) +{ + return apic->icr_read(); +} + +static inline void apic_icr_write(u32 low, u32 high) +{ + apic->icr_write(low, high); +} + +static inline void apic_wait_icr_idle(void) +{ + apic->wait_icr_idle(); +} + +static inline u32 safe_apic_wait_icr_idle(void) +{ + return apic->safe_wait_icr_idle(); +} + + +static inline void ack_APIC_irq(void) +{ + /* + * ack_APIC_irq() actually gets compiled as a single instruction + * ... yummie. + */ + + /* Docs say use 0 for future compatibility */ + apic_write(APIC_EOI, 0); +} + +static inline unsigned default_get_apic_id(unsigned long x) +{ + unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); + + if (APIC_XAPIC(ver)) + return (x >> 24) & 0xFF; + else + return (x >> 24) & 0x0F; +} + +/* + * Warm reset vector default position: + */ +#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 +#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 + +#ifdef CONFIG_X86_32 +extern void es7000_update_genapic_to_cluster(void); +#else +extern struct genapic apic_flat; +extern struct genapic apic_physflat; +extern struct genapic apic_x2apic_cluster; +extern struct genapic apic_x2apic_phys; +extern int default_acpi_madt_oem_check(char *, char *); + +extern void apic_send_IPI_self(int vector); + +extern struct genapic apic_x2apic_uv_x; +DECLARE_PER_CPU(int, x2apic_extra_bits); + +extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); +#endif + +static inline void default_wait_for_init_deassert(atomic_t *deassert) +{ + while (!atomic_read(deassert)) + cpu_relax(); + return; +} + +extern void generic_bigsmp_probe(void); + + +#ifdef CONFIG_X86_LOCAL_APIC + +#include + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +static inline const struct cpumask *default_target_cpus(void) +{ +#ifdef CONFIG_SMP + return cpu_online_mask; +#else + return cpumask_of(0); +#endif +} + +DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); + + +static inline unsigned int read_apic_id(void) +{ + unsigned int reg; + + reg = apic_read(APIC_ID); + + return apic->get_apic_id(reg); +} + +extern void default_setup_apic_routing(void); + +#ifdef CONFIG_X86_32 +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +extern void default_init_apic_ldr(void); + +static inline int default_apic_id_registered(void) +{ + return physid_isset(read_apic_id(), phys_cpu_present_map); +} + +static inline unsigned int +default_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + return cpumask_bits(cpumask)[0]; +} + +static inline unsigned int +default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + unsigned long mask1 = cpumask_bits(cpumask)[0]; + unsigned long mask2 = cpumask_bits(andmask)[0]; + unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; + + return (unsigned int)(mask1 & mask2 & mask3); +} + +static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +extern int default_apicid_to_node(int logical_apicid); + +#endif + +static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} + +static inline unsigned long default_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) +{ + return phys_map; +} + +/* Mapping from cpu number to logical apicid */ +static inline int default_cpu_to_logical_apicid(int cpu) +{ + return 1 << cpu; +} + +static inline int __default_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static inline int +__default_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); +} + +#ifdef CONFIG_X86_32 +static inline int default_cpu_present_to_apicid(int mps_cpu) +{ + return __default_cpu_present_to_apicid(mps_cpu); +} + +static inline int +default_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return __default_check_phys_apicid_present(boot_cpu_physical_apicid); +} +#else +extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); +#endif + +static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) +{ + return physid_mask_of_physid(phys_apicid); +} + +#endif /* CONFIG_X86_LOCAL_APIC */ + #endif /* _ASM_X86_APIC_H */ diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h index 9b874a38683..4b8b98fa7f2 100644 --- a/arch/x86/include/asm/genapic.h +++ b/arch/x86/include/asm/genapic.h @@ -1,313 +1 @@ -#ifndef _ASM_X86_GENAPIC_H -#define _ASM_X86_GENAPIC_H - -#include - -#include -#include #include - -/* - * Copyright 2004 James Cleverdon, IBM. - * Subject to the GNU Public License, v.2 - * - * Generic APIC sub-arch data struct. - * - * Hacked for x86-64 by James Cleverdon from i386 architecture code by - * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and - * James Cleverdon. - */ -struct genapic { - char *name; - - int (*probe)(void); - int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); - int (*apic_id_registered)(void); - - u32 irq_delivery_mode; - u32 irq_dest_mode; - - const struct cpumask *(*target_cpus)(void); - - int disable_esr; - - int dest_logical; - unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); - unsigned long (*check_apicid_present)(int apicid); - - void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); - void (*init_apic_ldr)(void); - - physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); - - void (*setup_apic_routing)(void); - int (*multi_timer_check)(int apic, int irq); - int (*apicid_to_node)(int logical_apicid); - int (*cpu_to_logical_apicid)(int cpu); - int (*cpu_present_to_apicid)(int mps_cpu); - physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - void (*setup_portio_remap)(void); - int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); - void (*enable_apic_mode)(void); - int (*phys_pkg_id)(int cpuid_apic, int index_msb); - - /* - * When one of the next two hooks returns 1 the genapic - * is switched to this. Essentially they are additional - * probe functions: - */ - int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); - - unsigned int (*get_apic_id)(unsigned long x); - unsigned long (*set_apic_id)(unsigned int id); - unsigned long apic_id_mask; - - unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); - unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, - const struct cpumask *andmask); - - /* ipi */ - void (*send_IPI_mask)(const struct cpumask *mask, int vector); - void (*send_IPI_mask_allbutself)(const struct cpumask *mask, - int vector); - void (*send_IPI_allbutself)(int vector); - void (*send_IPI_all)(int vector); - void (*send_IPI_self)(int vector); - - /* wakeup_secondary_cpu */ - int (*wakeup_cpu)(int apicid, unsigned long start_eip); - - int trampoline_phys_low; - int trampoline_phys_high; - - void (*wait_for_init_deassert)(atomic_t *deassert); - void (*smp_callin_clear_local_apic)(void); - void (*store_NMI_vector)(unsigned short *high, unsigned short *low); - void (*inquire_remote_apic)(int apicid); - - /* apic ops */ - u32 (*read)(u32 reg); - void (*write)(u32 reg, u32 v); - u64 (*icr_read)(void); - void (*icr_write)(u32 low, u32 high); - void (*wait_icr_idle)(void); - u32 (*safe_wait_icr_idle)(void); -}; - -extern struct genapic *apic; - -static inline u32 apic_read(u32 reg) -{ - return apic->read(reg); -} - -static inline void apic_write(u32 reg, u32 val) -{ - apic->write(reg, val); -} - -static inline u64 apic_icr_read(void) -{ - return apic->icr_read(); -} - -static inline void apic_icr_write(u32 low, u32 high) -{ - apic->icr_write(low, high); -} - -static inline void apic_wait_icr_idle(void) -{ - apic->wait_icr_idle(); -} - -static inline u32 safe_apic_wait_icr_idle(void) -{ - return apic->safe_wait_icr_idle(); -} - - -static inline void ack_APIC_irq(void) -{ - /* - * ack_APIC_irq() actually gets compiled as a single instruction - * ... yummie. - */ - - /* Docs say use 0 for future compatibility */ - apic_write(APIC_EOI, 0); -} - -static inline unsigned default_get_apic_id(unsigned long x) -{ - unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); - - if (APIC_XAPIC(ver)) - return (x >> 24) & 0xFF; - else - return (x >> 24) & 0x0F; -} - -/* - * Warm reset vector default position: - */ -#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 -#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 - -#ifdef CONFIG_X86_32 -extern void es7000_update_genapic_to_cluster(void); -#else -extern struct genapic apic_flat; -extern struct genapic apic_physflat; -extern struct genapic apic_x2apic_cluster; -extern struct genapic apic_x2apic_phys; -extern int default_acpi_madt_oem_check(char *, char *); - -extern void apic_send_IPI_self(int vector); - -extern struct genapic apic_x2apic_uv_x; -DECLARE_PER_CPU(int, x2apic_extra_bits); - -extern int default_cpu_present_to_apicid(int mps_cpu); -extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); -#endif - -static inline void default_wait_for_init_deassert(atomic_t *deassert) -{ - while (!atomic_read(deassert)) - cpu_relax(); - return; -} - -extern void generic_bigsmp_probe(void); - - -#ifdef CONFIG_X86_LOCAL_APIC - -#include - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -static inline const struct cpumask *default_target_cpus(void) -{ -#ifdef CONFIG_SMP - return cpu_online_mask; -#else - return cpumask_of(0); -#endif -} - -DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); - - -static inline unsigned int read_apic_id(void) -{ - unsigned int reg; - - reg = apic_read(APIC_ID); - - return apic->get_apic_id(reg); -} - -extern void default_setup_apic_routing(void); - -#ifdef CONFIG_X86_32 -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -extern void default_init_apic_ldr(void); - -static inline int default_apic_id_registered(void) -{ - return physid_isset(read_apic_id(), phys_cpu_present_map); -} - -static inline unsigned int -default_cpu_mask_to_apicid(const struct cpumask *cpumask) -{ - return cpumask_bits(cpumask)[0]; -} - -static inline unsigned int -default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - unsigned long mask1 = cpumask_bits(cpumask)[0]; - unsigned long mask2 = cpumask_bits(andmask)[0]; - unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; - - return (unsigned int)(mask1 & mask2 & mask3); -} - -static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -extern int default_apicid_to_node(int logical_apicid); - -#endif - -static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} - -static inline unsigned long default_check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) -{ - return phys_map; -} - -/* Mapping from cpu number to logical apicid */ -static inline int default_cpu_to_logical_apicid(int cpu) -{ - return 1 << cpu; -} - -static inline int __default_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) - return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - -static inline int -__default_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); -} - -#ifdef CONFIG_X86_32 -static inline int default_cpu_present_to_apicid(int mps_cpu) -{ - return __default_cpu_present_to_apicid(mps_cpu); -} - -static inline int -default_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return __default_check_phys_apicid_present(boot_cpu_physical_apicid); -} -#else -extern int default_cpu_present_to_apicid(int mps_cpu); -extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); -#endif - -static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) -{ - return physid_mask_of_physid(phys_apicid); -} - -#endif /* CONFIG_X86_LOCAL_APIC */ - -#endif /* _ASM_X86_GENAPIC_64_H */ -- cgit v1.2.3 From 7b6aa335ca1a845c2262ec7a595b4521bca0f79d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 13:58:15 +0100 Subject: x86, apic: remove genapic.h Impact: cleanup Remove genapic.h and remove all references to it. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/ipi.h | 2 +- arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/apic.c | 2 +- arch/x86/kernel/bigsmp_32.c | 2 +- arch/x86/kernel/cpu/addon_cpuid_features.c | 2 +- arch/x86/kernel/cpu/amd.c | 2 +- arch/x86/kernel/cpu/common.c | 4 ++-- arch/x86/kernel/cpu/intel.c | 2 +- arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 2 +- arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 2 +- arch/x86/kernel/cpu/mcheck/p4.c | 2 +- arch/x86/kernel/crash.c | 2 +- arch/x86/kernel/es7000_32.c | 4 ++-- arch/x86/kernel/genapic_64.c | 2 +- arch/x86/kernel/genapic_flat_64.c | 2 +- arch/x86/kernel/genx2apic_cluster.c | 2 +- arch/x86/kernel/genx2apic_phys.c | 2 +- arch/x86/kernel/genx2apic_uv_x.c | 2 +- arch/x86/kernel/io_apic.c | 2 +- arch/x86/kernel/ipi.c | 2 +- arch/x86/kernel/irq.c | 2 +- arch/x86/kernel/irq_32.c | 2 +- arch/x86/kernel/kgdb.c | 2 +- arch/x86/kernel/mpparse.c | 2 +- arch/x86/kernel/nmi.c | 2 +- arch/x86/kernel/numaq_32.c | 4 ++-- arch/x86/kernel/probe_32.c | 8 ++++---- arch/x86/kernel/reboot.c | 2 +- arch/x86/kernel/setup.c | 2 +- arch/x86/kernel/smp.c | 2 +- arch/x86/kernel/smpboot.c | 4 ++-- arch/x86/kernel/summit_32.c | 2 +- arch/x86/kernel/tlb_uv.c | 4 ++-- arch/x86/kernel/uv_irq.c | 2 +- arch/x86/kernel/visws_quirks.c | 4 ++-- arch/x86/kernel/vmi_32.c | 2 +- arch/x86/kernel/vmiclock_32.c | 2 +- arch/x86/lguest/boot.c | 2 +- arch/x86/mm/srat_64.c | 2 +- arch/x86/mm/tlb.c | 2 +- arch/x86/oprofile/nmi_int.c | 2 +- arch/x86/oprofile/op_model_p4.c | 2 +- arch/x86/oprofile/op_model_ppro.c | 2 +- arch/x86/pci/numaq_32.c | 2 +- arch/x86/xen/enlighten.c | 2 +- 45 files changed, 54 insertions(+), 54 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index 5f2efc5d992..3395c680a97 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -123,7 +123,7 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector); extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector); -#include +#include extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 956c1dee6fb..42814152c94 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index af494bad885..7db03a9b61d 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -36,7 +36,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index 9eeb714c5de..72f4e534051 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index e48640cfac0..6882a735d9c 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -7,7 +7,7 @@ #include #include -#include +#include struct cpuid_bit { u16 feature; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ff4d7b9e32e..c94ba9311e6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -12,7 +12,7 @@ # include #endif -#include +#include #include "cpu.h" diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4b5d13e472d..41f3788ec9b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -26,8 +26,8 @@ #ifdef CONFIG_X86_LOCAL_APIC #include #include -#include -#include +#include +#include #include #endif diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1f137a87d4b..290f92e2b7c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -24,7 +24,7 @@ #ifdef CONFIG_X86_LOCAL_APIC #include #include -#include +#include #endif static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index e22d6ed26e6..4772e91e824 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 42f090702f0..5e8c79e748a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index f9c92b66dfb..9b60fce09f7 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index ad7f2a696f4..3340cc0f244 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -28,7 +28,7 @@ #include #include -#include +#include #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 23f1df4ce18..6cdfb3e4dc3 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include /* @@ -387,7 +387,7 @@ void __init es7000_enable_apic_mode(void) #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index ef788635324..91cae6f6e73 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 36ee760fd13..a7d84763648 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #ifdef CONFIG_ACPI diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index dd6e8d68542..f5e02cffa26 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index eb1486bb002..11eb4cb7ca3 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include static int x2apic_phys; diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 9ae4a92fac8..c1746a198bd 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index a89878e08a4..00e6071cefc 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -62,7 +62,7 @@ #include #include -#include +#include #define __apicdebuginit(type) static type __init diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 1326272cae4..dbf5445727a 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 3957776b193..f13ca1650aa 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 4beb9a13873..e4ac7c80e2d 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -212,7 +212,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) } #ifdef CONFIG_HOTPLUG_CPU -#include +#include /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 5c4f5548384..eedfaebe106 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -46,7 +46,7 @@ #include #include -#include +#include /* * Put the error code here just in case the user cares: diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 20076445319..7f4d2586972 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -29,7 +29,7 @@ #include #include -#include +#include /* * Checksum an MP configuration block. */ diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 48b9ca5e088..bdfad80c3cf 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -11,7 +11,7 @@ * Mikael Pettersson : PM converted to driver model. Disable/enable API. */ -#include +#include #include #include diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index f0f0c2f0596..5a2d75d1fd4 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -31,7 +31,7 @@ #include #include -#include +#include #include #include #include @@ -301,7 +301,7 @@ int __init get_memcfg_numaq(void) #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index b3d5d74e522..be0d554984a 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -17,20 +17,20 @@ #include #include #include -#include +#include #include #include #include #include -#include +#include #include #include #include #include #include #include -#include +#include #include #include @@ -41,7 +41,7 @@ #include #include -#include +#include #ifdef CONFIG_HOTPLUG_CPU #define DEFAULT_SEND_IPI (1) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 32e8f0af292..7c8cd447d5e 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -24,7 +24,7 @@ # include #endif -#include +#include /* * Power off function, if any diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 43d964411c0..deaafd2693e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -97,7 +97,7 @@ #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index eaaffae31cc..13f33ea8cca 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include /* * Some notes on x86 processor bugs affecting SMP operation: * diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index b5f2b698973..562a9fc3bc3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -60,12 +60,12 @@ #include #include #include -#include +#include #include #include #include -#include +#include #include #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 1cf32c325d1..eb31ba276bb 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f396e61bcb3..1a7dfa7cb52 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -15,12 +15,12 @@ #include #include #include -#include +#include #include #include #include -#include +#include static struct bau_control **uv_bau_table_bases __read_mostly; static int uv_bau_retry_limit __read_mostly; diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 75eb5ec5dd2..aeef529917e 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include static void uv_noop(unsigned int irq) diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 4fd646e6dd4..5264fea6c28 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -32,9 +32,9 @@ #include #include -#include +#include -#include +#include #include diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index a1c7b71dc0d..2cc4a90e2cb 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 2a5e0e6a7c0..a4791ef412d 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index bc9893f2c38..f3a5305b8ad 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -55,7 +55,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 15df1baee10..574c8bc95ef 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include int acpi_numa __initdata; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 14c5af4d11e..b641349fe07 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -14,7 +14,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0, }; -#include +#include /* * Smarter SMP flushing macros. * c/o Linus Torvalds. diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index a32a5c7a8ef..202864ad49a 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "op_counter.h" #include "op_x86_model.h" diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 09a237bc9ef..4c4a51c90bc 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "op_x86_model.h" diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 5ebd8f605d7..e9f80c744cf 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index 5601e829c38..8eb295e116f 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index e3dd3fb6729..86497d5f44c 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -36,7 +36,7 @@ #include #include -#include +#include #include #include #include -- cgit v1.2.3 From e641f5f525acb163ba71d92de79c9c7366deae03 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 14:02:01 +0100 Subject: x86, apic: remove duplicate asm/apic.h inclusions Impact: cleanup Signed-off-by: Ingo Molnar --- arch/x86/include/asm/ipi.h | 2 -- arch/x86/kernel/acpi/boot.c | 1 - arch/x86/kernel/apic.c | 1 - arch/x86/kernel/cpu/amd.c | 2 -- arch/x86/kernel/cpu/common.c | 6 ++---- arch/x86/kernel/cpu/intel.c | 1 - arch/x86/kernel/crash.c | 2 -- arch/x86/kernel/es7000_32.c | 1 - arch/x86/kernel/irq_32.c | 1 - arch/x86/kernel/numaq_32.c | 1 - arch/x86/kernel/probe_32.c | 4 ---- arch/x86/kernel/reboot.c | 2 -- arch/x86/kernel/setup.c | 1 - arch/x86/kernel/smpboot.c | 1 - arch/x86/kernel/summit_32.c | 1 - arch/x86/kernel/tlb_uv.c | 2 -- arch/x86/kernel/visws_quirks.c | 7 +------ arch/x86/mm/tlb.c | 1 - 18 files changed, 3 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index 3395c680a97..0b7228268a6 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -123,8 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector); extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector); -#include - extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector); extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 42814152c94..a18eb7ce223 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 7db03a9b61d..c12823eb55b 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -36,7 +36,6 @@ #include #include -#include #include #include #include diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c94ba9311e6..25423a5b80e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -12,8 +12,6 @@ # include #endif -#include - #include "cpu.h" #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 41f3788ec9b..826d5c87627 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -23,11 +23,9 @@ #include #include #include -#ifdef CONFIG_X86_LOCAL_APIC -#include -#include -#include #include + +#ifdef CONFIG_X86_LOCAL_APIC #include #endif diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 290f92e2b7c..7aeef1d327b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -24,7 +24,6 @@ #ifdef CONFIG_X86_LOCAL_APIC #include #include -#include #endif static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 3340cc0f244..ff958248e61 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -28,8 +28,6 @@ #include #include -#include - #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 6cdfb3e4dc3..352d870e5d5 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -387,7 +387,6 @@ void __init es7000_enable_apic_mode(void) #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index e4ac7c80e2d..9dc6b2b2427 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -212,7 +212,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) } #ifdef CONFIG_HOTPLUG_CPU -#include /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 5a2d75d1fd4..40400a58845 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -301,7 +301,6 @@ int __init get_memcfg_numaq(void) #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index be0d554984a..fd1352ac909 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -23,14 +23,12 @@ #include #include #include -#include #include #include #include #include #include #include -#include #include #include @@ -41,8 +39,6 @@ #include #include -#include - #ifdef CONFIG_HOTPLUG_CPU #define DEFAULT_SEND_IPI (1) #else diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 7c8cd447d5e..1cc18d439bb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -24,8 +24,6 @@ # include #endif -#include - /* * Power off function, if any */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index deaafd2693e..b2da0b1d15e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -97,7 +97,6 @@ #include #include -#include #include #include diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 562a9fc3bc3..09e73876a44 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -65,7 +65,6 @@ #include #include -#include #include #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index eb31ba276bb..577b0bd8e53 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 1a7dfa7cb52..f04549afcfe 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -20,8 +20,6 @@ #include #include -#include - static struct bau_control **uv_bau_table_bases __read_mostly; static int uv_bau_retry_limit __read_mostly; diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 5264fea6c28..34199d30ff4 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -29,13 +29,10 @@ #include #include #include +#include #include #include -#include - -#include - #include #include @@ -49,8 +46,6 @@ extern int no_broadcast; -#include - char visws_board_type = -1; char visws_board_rev = -1; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index b641349fe07..a654d59e448 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -14,7 +14,6 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0, }; -#include /* * Smarter SMP flushing macros. * c/o Linus Torvalds. -- cgit v1.2.3 From 5c615feb90ce63f2293a7ac7809f320f46c75e0a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 14:04:24 +0100 Subject: x86, apic: remove stale references to APIC_DEFINITION Impact: cleanup APIC_DEFINITION was a hack from the x86 subarch times, it has no meaning anymore - remove it. Signed-off-by: Ingo Molnar --- arch/x86/kernel/bigsmp_32.c | 1 - arch/x86/kernel/es7000_32.c | 1 - arch/x86/kernel/numaq_32.c | 1 - arch/x86/kernel/summit_32.c | 1 - 4 files changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index 72f4e534051..f49af36351f 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -2,7 +2,6 @@ * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. * Drives the local APIC in "clustered mode". */ -#define APIC_DEFINITION 1 #include #include #include diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 352d870e5d5..cf53a98dbf1 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -383,7 +383,6 @@ void __init es7000_enable_apic_mode(void) /* * APIC driver for the Unisys ES7000 chipset. */ -#define APIC_DEFINITION 1 #include #include #include diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 40400a58845..dcf22f50827 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -297,7 +297,6 @@ int __init get_memcfg_numaq(void) /* * APIC driver for the IBM NUMAQ chipset. */ -#define APIC_DEFINITION 1 #include #include #include diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 577b0bd8e53..30597778914 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -34,7 +34,6 @@ /* * APIC driver for the IBM "Summit" chipset. */ -#define APIC_DEFINITION 1 #include #include #include -- cgit v1.2.3 From 77313190d121dd1fffa965aff6e9f0782a307bb8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 14:09:20 +0100 Subject: x86, apic: clean up arch/x86/kernel/bigsmp_32.c Impact: cleanup - remove unnecessary indirections that were artifacts of the subarch code - clean up include file section - clean up various small details Signed-off-by: Ingo Molnar --- arch/x86/kernel/bigsmp_32.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index f49af36351f..41732abd700 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -1,27 +1,26 @@ /* - * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. + * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs. + * * Drives the local APIC in "clustered mode". */ #include #include -#include -#include -#include -#include -#include #include #include #include #include +#include +#include +#include +#include +#include static inline unsigned bigsmp_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } -#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) - static inline int bigsmp_apic_id_registered(void) { return 1; @@ -36,8 +35,6 @@ static inline const cpumask_t *bigsmp_target_cpus(void) #endif } -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - static inline unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) { @@ -52,9 +49,11 @@ static inline unsigned long bigsmp_check_apicid_present(int bit) static inline unsigned long calculate_ldr(int cpu) { unsigned long val, id; + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - id = xapic_phys_to_log_apicid(cpu); + id = per_cpu(x86_bios_cpu_apicid, cpu); val |= SET_APIC_LOGICAL_ID(id); + return val; } @@ -70,15 +69,16 @@ static inline void bigsmp_init_apic_ldr(void) unsigned long val; int cpu = smp_processor_id(); - apic_write(APIC_DFR, APIC_DFR_VALUE); + apic_write(APIC_DFR, APIC_DFR_FLAT); val = calculate_ldr(cpu); apic_write(APIC_LDR, val); } static inline void bigsmp_setup_apic_routing(void) { - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Physflat", nr_ioapics); + printk(KERN_INFO + "Enabling APIC mode: Physflat. Using %d I/O APICs\n", + nr_ioapics); } static inline int bigsmp_apicid_to_node(int logical_apicid) @@ -100,6 +100,7 @@ static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) } extern u8 cpu_2_logical_apicid[]; + /* Mapping from cpu number to logical apicid */ static inline int bigsmp_cpu_to_logical_apicid(int cpu) { @@ -175,21 +176,24 @@ static int hp_ht_bigsmp(const struct dmi_system_id *d) { printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); dmi_bigsmp = 1; + return 0; } static const struct dmi_system_id bigsmp_dmi_table[] = { { hp_ht_bigsmp, "HP ProLiant DL760 G2", - { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P44-"),} + { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_BIOS_VERSION, "P44-"), + } }, { hp_ht_bigsmp, "HP ProLiant DL740", - { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P47-"),} + { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_BIOS_VERSION, "P47-"), + } }, - { } + { } /* NULL entry stops DMI scanning */ }; static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) @@ -204,6 +208,7 @@ static int probe_bigsmp(void) dmi_bigsmp = 1; else dmi_check_system(bigsmp_dmi_table); + return dmi_bigsmp; } -- cgit v1.2.3 From 2f205bc47f615b7bd0c7aba817d67ce25760eaf1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 14:45:30 +0100 Subject: x86, apic: clean up the cpu_2_logical_apiciddeclaration extern declarations were scattered in 4 files - consolidate them into apic.h. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 4 ++++ arch/x86/kernel/bigsmp_32.c | 2 -- arch/x86/kernel/es7000_32.c | 3 +-- arch/x86/kernel/numaq_32.c | 5 +---- arch/x86/kernel/summit_32.c | 4 +--- 5 files changed, 7 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index c07f5fbf43c..2cdd19e4536 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -541,4 +541,8 @@ static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) #endif /* CONFIG_X86_LOCAL_APIC */ +#ifdef CONFIG_X86_32 +extern u8 cpu_2_logical_apicid[NR_CPUS]; +#endif + #endif /* _ASM_X86_APIC_H */ diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index 41732abd700..0de9eed7c60 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -99,8 +99,6 @@ static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) return physid_mask_of_physid(phys_apicid); } -extern u8 cpu_2_logical_apicid[]; - /* Mapping from cpu number to logical apicid */ static inline int bigsmp_cpu_to_logical_apicid(int cpu) { diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index cf53a98dbf1..3dc48831eb9 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -403,7 +403,6 @@ void __init es7000_enable_apic_mode(void) extern void es7000_enable_apic_mode(void); extern int apic_version [MAX_APICS]; -extern u8 cpu_2_logical_apicid[]; extern unsigned int boot_cpu_physical_apicid; extern int parse_unisys_oem (char *oemptr); @@ -570,7 +569,7 @@ static int es7000_cpu_to_logical_apicid(int cpu) #ifdef CONFIG_SMP if (cpu >= nr_cpu_ids) return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; + return cpu_2_logical_apicid[cpu]; #else return logical_smp_processor_id(); #endif diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index dcf22f50827..9abaacde72e 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -408,14 +408,11 @@ static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) return physids_promote(0xFUL); } -/* Mapping from cpu number to logical apicid */ -extern u8 cpu_2_logical_apicid[]; - static inline int numaq_cpu_to_logical_apicid(int cpu) { if (cpu >= nr_cpu_ids) return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; + return cpu_2_logical_apicid[cpu]; } /* diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 30597778914..7a1db1f2356 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -209,8 +209,6 @@ static inline unsigned long summit_check_apicid_present(int bit) #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) -extern u8 cpu_2_logical_apicid[]; - static inline void summit_init_apic_ldr(void) { unsigned long val, id; @@ -264,7 +262,7 @@ static inline int summit_cpu_to_logical_apicid(int cpu) #ifdef CONFIG_SMP if (cpu >= nr_cpu_ids) return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; + return cpu_2_logical_apicid[cpu]; #else return logical_smp_processor_id(); #endif -- cgit v1.2.3 From 2c4ce18c95d632c9227ebcc6d45da11a9ef1ec70 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 14:57:16 +0100 Subject: x86, es7000: clean up No code changed: arch/x86/kernel/es7000_32.o: text data bss dec hex filename 2813 192 44 3049 be9 es7000_32.o.before 2813 192 44 3049 be9 es7000_32.o.after md5: a593d98a882bf534622c70d9568497ac es7000_32.o.before.asm a593d98a882bf534622c70d9568497ac es7000_32.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/kernel/es7000_32.c | 241 +++++++++++++++++++++----------------------- 1 file changed, 113 insertions(+), 128 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 3dc48831eb9..c7cc9776cca 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -24,90 +24,96 @@ * http://www.unisys.com */ -#include -#include +#include +#include +#include +#include #include -#include +#include +#include #include -#include +#include #include -#include -#include -#include #include -#include -#include -#include -#include +#include +#include + #include -#include +#include +#include +#include #include +#include +#include +#include +#include +#include /* * ES7000 chipsets */ -#define NON_UNISYS 0 -#define ES7000_CLASSIC 1 -#define ES7000_ZORRO 2 +#define NON_UNISYS 0 +#define ES7000_CLASSIC 1 +#define ES7000_ZORRO 2 +#define MIP_REG 1 +#define MIP_PSAI_REG 4 -#define MIP_REG 1 -#define MIP_PSAI_REG 4 +#define MIP_BUSY 1 +#define MIP_SPIN 0xf0000 +#define MIP_VALID 0x0100000000000000ULL -#define MIP_BUSY 1 -#define MIP_SPIN 0xf0000 -#define MIP_VALID 0x0100000000000000ULL -#define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff) +#define MIP_PORT(val) ((val >> 32) & 0xffff) -#define MIP_RD_LO(VALUE) (VALUE & 0xffffffff) +#define MIP_RD_LO(val) (val & 0xffffffff) struct mip_reg_info { - unsigned long long mip_info; - unsigned long long delivery_info; - unsigned long long host_reg; - unsigned long long mip_reg; + unsigned long long mip_info; + unsigned long long delivery_info; + unsigned long long host_reg; + unsigned long long mip_reg; }; struct part_info { - unsigned char type; - unsigned char length; - unsigned char part_id; - unsigned char apic_mode; - unsigned long snum; - char ptype[16]; - char sname[64]; - char pname[64]; + unsigned char type; + unsigned char length; + unsigned char part_id; + unsigned char apic_mode; + unsigned long snum; + char ptype[16]; + char sname[64]; + char pname[64]; }; struct psai { - unsigned long long entry_type; - unsigned long long addr; - unsigned long long bep_addr; + unsigned long long entry_type; + unsigned long long addr; + unsigned long long bep_addr; }; struct es7000_mem_info { - unsigned char type; - unsigned char length; - unsigned char resv[6]; - unsigned long long start; - unsigned long long size; + unsigned char type; + unsigned char length; + unsigned char resv[6]; + unsigned long long start; + unsigned long long size; }; struct es7000_oem_table { - unsigned long long hdr; - struct mip_reg_info mip; - struct part_info pif; - struct es7000_mem_info shm; - struct psai psai; + unsigned long long hdr; + struct mip_reg_info mip; + struct part_info pif; + struct es7000_mem_info shm; + struct psai psai; }; #ifdef CONFIG_ACPI struct oem_table { - struct acpi_table_header Header; - u32 OEMTableAddr; - u32 OEMTableSize; + struct acpi_table_header Header; + u32 OEMTableAddr; + u32 OEMTableSize; }; extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); @@ -115,36 +121,50 @@ extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); #endif struct mip_reg { - unsigned long long off_0; - unsigned long long off_8; - unsigned long long off_10; - unsigned long long off_18; - unsigned long long off_20; - unsigned long long off_28; - unsigned long long off_30; - unsigned long long off_38; + unsigned long long off_0x00; + unsigned long long off_0x08; + unsigned long long off_0x10; + unsigned long long off_0x18; + unsigned long long off_0x20; + unsigned long long off_0x28; + unsigned long long off_0x30; + unsigned long long off_0x38; }; -#define MIP_SW_APIC 0x1020b -#define MIP_FUNC(VALUE) (VALUE & 0xff) +#define MIP_SW_APIC 0x1020b +#define MIP_FUNC(VALUE) (VALUE & 0xff) + +#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) +#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) +#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +extern void es7000_enable_apic_mode(void); +extern int parse_unisys_oem (char *oemptr); +extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); +extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); +extern void setup_unisys(void); + +#define apicid_cluster(apicid) (apicid & 0xF0) /* * ES7000 Globals */ -static volatile unsigned long *psai = NULL; -static struct mip_reg *mip_reg; -static struct mip_reg *host_reg; -static int mip_port; -static unsigned long mip_addr, host_addr; +static volatile unsigned long *psai = NULL; +static struct mip_reg *mip_reg; +static struct mip_reg *host_reg; +static int mip_port; +static unsigned long mip_addr, host_addr; -int es7000_plat; +int es7000_plat; /* * GSI override for ES7000 platforms. */ -static unsigned int base; +static unsigned int base; static int es7000_rename_gsi(int ioapic, int gsi) @@ -160,6 +180,7 @@ es7000_rename_gsi(int ioapic, int gsi) if (!ioapic && (gsi < 16)) gsi += base; + return gsi; } @@ -196,8 +217,7 @@ static int __init es7000_update_genapic(void) return 0; } -void __init -setup_unisys(void) +void __init setup_unisys(void) { /* * Determine the generation of the ES7000 currently running. @@ -219,8 +239,7 @@ setup_unisys(void) * Parse the OEM Table */ -int __init -parse_unisys_oem (char *oemptr) +int __init parse_unisys_oem (char *oemptr) { int i; int success = 0; @@ -277,12 +296,15 @@ parse_unisys_oem (char *oemptr) es7000_plat = NON_UNISYS; } else setup_unisys(); + return es7000_plat; } #ifdef CONFIG_ACPI -static unsigned long oem_addrX; -static unsigned long oem_size; + +static unsigned long oem_addrX; +static unsigned long oem_size; + int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; @@ -315,8 +337,7 @@ void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) } #endif -static void -es7000_spin(int n) +static void es7000_spin(int n) { int i = 0; @@ -327,16 +348,15 @@ es7000_spin(int n) static int __init es7000_mip_write(struct mip_reg *mip_reg) { - int status = 0; - int spin; + int status = 0; + int spin; spin = MIP_SPIN; - while (((unsigned long long)host_reg->off_38 & - (unsigned long long)MIP_VALID) != 0) { - if (--spin <= 0) { - printk("es7000_mip_write: Timeout waiting for Host Valid Flag"); - return -1; - } + while ((host_reg->off_0x38 & MIP_VALID) != 0) { + if (--spin <= 0) { + printk("es7000_mip_write: Timeout waiting for Host Valid Flag"); + return -1; + } es7000_spin(MIP_SPIN); } @@ -345,8 +365,7 @@ es7000_mip_write(struct mip_reg *mip_reg) spin = MIP_SPIN; - while (((unsigned long long)mip_reg->off_38 & - (unsigned long long)MIP_VALID) == 0) { + while ((mip_reg->off_0x38 & MIP_VALID) == 0) { if (--spin <= 0) { printk("es7000_mip_write: Timeout waiting for MIP Valid Flag"); return -1; @@ -354,10 +373,9 @@ es7000_mip_write(struct mip_reg *mip_reg) es7000_spin(MIP_SPIN); } - status = ((unsigned long long)mip_reg->off_0 & - (unsigned long long)0xffff0000000000ULL) >> 48; - mip_reg->off_38 = ((unsigned long long)mip_reg->off_38 & - (unsigned long long)~MIP_VALID); + status = (mip_reg->off_0x00 & 0xffff0000000000ULL) >> 48; + mip_reg->off_0x38 &= ~MIP_VALID; + return status; } @@ -371,8 +389,8 @@ void __init es7000_enable_apic_mode(void) printk("ES7000: Enabling APIC mode.\n"); memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); - es7000_mip_reg.off_0 = MIP_SW_APIC; - es7000_mip_reg.off_38 = MIP_VALID; + es7000_mip_reg.off_0x00 = MIP_SW_APIC; + es7000_mip_reg.off_0x38 = MIP_VALID; while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) { printk("es7000_enable_apic_mode: command failed, status = %x\n", @@ -380,39 +398,6 @@ void __init es7000_enable_apic_mode(void) } } -/* - * APIC driver for the Unisys ES7000 chipset. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) -#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) -#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -extern void es7000_enable_apic_mode(void); -extern int apic_version [MAX_APICS]; -extern unsigned int boot_cpu_physical_apicid; - -extern int parse_unisys_oem (char *oemptr); -extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); -extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); -extern void setup_unisys(void); - -#define apicid_cluster(apicid) (apicid & 0xF0) -#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) - static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus @@ -495,9 +480,9 @@ static unsigned long es7000_check_apicid_present(int bit) static unsigned long calculate_ldr(int cpu) { - unsigned long id = xapic_phys_to_log_apicid(cpu); + unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); - return (SET_APIC_LOGICAL_ID(id)); + return SET_APIC_LOGICAL_ID(id); } /* @@ -547,7 +532,7 @@ static int es7000_cpu_present_to_apicid(int mps_cpu) if (!mps_cpu) return boot_cpu_physical_apicid; else if (mps_cpu < nr_cpu_ids) - return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); + return per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; } @@ -584,7 +569,7 @@ static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) static int es7000_check_phys_apicid_present(int cpu_physical_apicid) { boot_cpu_physical_apicid = read_apic_id(); - return (1); + return 1; } static unsigned int -- cgit v1.2.3 From b9e0d1aa9767707cad24db32d8ce0409df16d491 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 15:09:08 +0100 Subject: x86, apic: remove apicid_cluster() There were multiple definitions of apicid_cluster() scattered around in APIC drivers - but the definitions are equivalent to the already existing generic APIC_CLUSTER() method. So remove apicid_cluster() and change all users to APIC_CLUSTER(). No code changed: md5: 1b8244ba8d3d6a454593ce10f09dfa58 summit_32.o.before.asm 1b8244ba8d3d6a454593ce10f09dfa58 summit_32.o.after.asm md5: a593d98a882bf534622c70d9568497ac es7000_32.o.before.asm a593d98a882bf534622c70d9568497ac es7000_32.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/kernel/es7000_32.c | 8 ++------ arch/x86/kernel/numaq_32.c | 2 -- arch/x86/kernel/summit_32.c | 9 +++------ 3 files changed, 5 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index c7cc9776cca..8a20866b2a3 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -146,8 +146,6 @@ extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); extern void setup_unisys(void); -#define apicid_cluster(apicid) (apicid & 0xF0) - /* * ES7000 Globals */ @@ -595,8 +593,7 @@ es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) if (cpumask_test_cpu(cpu, cpumask)) { int new_apicid = es7000_cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { + if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { printk ("%s: Not a valid mask!\n", __func__); return 0xFF; @@ -630,8 +627,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) if (cpu_isset(cpu, *cpumask)) { int new_apicid = es7000_cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { + if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { printk ("%s: Not a valid mask!\n", __func__); return es7000_cpu_to_logical_apicid(0); diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 9abaacde72e..15328500de6 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -375,8 +375,6 @@ static inline unsigned long numaq_check_apicid_present(int bit) return physid_isset(bit, phys_cpu_present_map); } -#define apicid_cluster(apicid) (apicid & 0xF0) - static inline int numaq_apic_id_registered(void) { return 1; diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 7a1db1f2356..c4690349a54 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -207,14 +207,12 @@ static inline unsigned long summit_check_apicid_present(int bit) return 1; } -#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) - static inline void summit_init_apic_ldr(void) { unsigned long val, id; int count = 0; u8 my_id = (u8)hard_smp_processor_id(); - u8 my_cluster = (u8)apicid_cluster(my_id); + u8 my_cluster = APIC_CLUSTER(my_id); #ifdef CONFIG_SMP u8 lid; int i; @@ -222,7 +220,7 @@ static inline void summit_init_apic_ldr(void) /* Create logical APIC IDs by counting CPUs already in cluster. */ for (count = 0, i = nr_cpu_ids; --i >= 0; ) { lid = cpu_2_logical_apicid[i]; - if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) + if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) ++count; } #endif @@ -319,8 +317,7 @@ static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) if (cpu_isset(cpu, *cpumask)) { int new_apicid = summit_cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { + if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { printk ("%s: Not a valid mask!\n", __func__); return 0xFF; -- cgit v1.2.3 From d3185b37df05e9ad7ce987a9a0419ffe1af9d23f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 15:13:05 +0100 Subject: x86, es7000: remove externs Impact: cleanup In the subarch times there were a number of externs between various bits of the ES7000 code. Now that there's a single es7000-platform support file, the externs can be removed and the functions can be changed the statics. Beyond the cleanup factor, this also shrinks the size of the kernel image a bit: arch/x86/kernel/es7000_32.o: text data bss dec hex filename 2813 192 44 3049 be9 es7000_32.o.before 2693 192 44 2929 b71 es7000_32.o.after Signed-off-by: Ingo Molnar --- arch/x86/kernel/es7000_32.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 8a20866b2a3..d5c3894a983 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -116,8 +116,6 @@ struct oem_table { u32 OEMTableSize; }; -extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); -extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); #endif struct mip_reg { @@ -140,12 +138,6 @@ struct mip_reg { #define APIC_DFR_VALUE (APIC_DFR_FLAT) -extern void es7000_enable_apic_mode(void); -extern int parse_unisys_oem (char *oemptr); -extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); -extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr); -extern void setup_unisys(void); - /* * ES7000 Globals */ @@ -215,7 +207,7 @@ static int __init es7000_update_genapic(void) return 0; } -void __init setup_unisys(void) +static void __init setup_unisys(void) { /* * Determine the generation of the ES7000 currently running. @@ -234,10 +226,9 @@ void __init setup_unisys(void) } /* - * Parse the OEM Table + * Parse the OEM Table: */ - -int __init parse_unisys_oem (char *oemptr) +static int __init parse_unisys_oem (char *oemptr) { int i; int success = 0; @@ -290,9 +281,9 @@ int __init parse_unisys_oem (char *oemptr) tp += size; } - if (success < 2) { + if (success < 2) es7000_plat = NON_UNISYS; - } else + else setup_unisys(); return es7000_plat; @@ -303,7 +294,7 @@ int __init parse_unisys_oem (char *oemptr) static unsigned long oem_addrX; static unsigned long oem_size; -int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) +static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; int i = 0; @@ -326,7 +317,7 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) return -1; } -void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) +static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) { if (!oem_addr) return; @@ -377,7 +368,7 @@ es7000_mip_write(struct mip_reg *mip_reg) return status; } -void __init es7000_enable_apic_mode(void) +static void __init es7000_enable_apic_mode(void) { struct mip_reg es7000_mip_reg; int mip_status; @@ -706,9 +697,9 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) check_dsdt = es7000_check_dsdt(); if (!find_unisys_acpi_oem_table(&oem_addr)) { - if (check_dsdt) + if (check_dsdt) { ret = parse_unisys_oem((char *)oem_addr); - else { + } else { setup_unisys(); ret = 1; } -- cgit v1.2.3 From 352887d1c9d99d4c2f0fbac6176ef0cd4fe7a820 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 15:17:55 +0100 Subject: x86, es7000: remove dead code, clean up Impact: cleanup - a number of structure definitions were stale - remove needless wrappers around apic definitions - fix details noticed by checkpatch No code changed: md5: 029d8fde0aaf6e934ea63bd8b36430fd es7000_32.o.before.asm 029d8fde0aaf6e934ea63bd8b36430fd es7000_32.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/kernel/es7000_32.c | 103 ++++++++++++++------------------------------ 1 file changed, 33 insertions(+), 70 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index d5c3894a983..03acbe95d2b 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -23,7 +23,6 @@ * * http://www.unisys.com */ - #include #include #include @@ -63,11 +62,23 @@ #define MIP_BUSY 1 #define MIP_SPIN 0xf0000 #define MIP_VALID 0x0100000000000000ULL +#define MIP_SW_APIC 0x1020b #define MIP_PORT(val) ((val >> 32) & 0xffff) #define MIP_RD_LO(val) (val & 0xffffffff) +struct mip_reg { + unsigned long long off_0x00; + unsigned long long off_0x08; + unsigned long long off_0x10; + unsigned long long off_0x18; + unsigned long long off_0x20; + unsigned long long off_0x28; + unsigned long long off_0x30; + unsigned long long off_0x38; +}; + struct mip_reg_info { unsigned long long mip_info; unsigned long long delivery_info; @@ -75,69 +86,20 @@ struct mip_reg_info { unsigned long long mip_reg; }; -struct part_info { - unsigned char type; - unsigned char length; - unsigned char part_id; - unsigned char apic_mode; - unsigned long snum; - char ptype[16]; - char sname[64]; - char pname[64]; -}; - struct psai { unsigned long long entry_type; unsigned long long addr; unsigned long long bep_addr; }; -struct es7000_mem_info { - unsigned char type; - unsigned char length; - unsigned char resv[6]; - unsigned long long start; - unsigned long long size; -}; - -struct es7000_oem_table { - unsigned long long hdr; - struct mip_reg_info mip; - struct part_info pif; - struct es7000_mem_info shm; - struct psai psai; -}; - #ifdef CONFIG_ACPI - -struct oem_table { +struct es7000_oem_table { struct acpi_table_header Header; u32 OEMTableAddr; u32 OEMTableSize; }; - #endif -struct mip_reg { - unsigned long long off_0x00; - unsigned long long off_0x08; - unsigned long long off_0x10; - unsigned long long off_0x18; - unsigned long long off_0x20; - unsigned long long off_0x28; - unsigned long long off_0x30; - unsigned long long off_0x38; -}; - -#define MIP_SW_APIC 0x1020b -#define MIP_FUNC(VALUE) (VALUE & 0xff) - -#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) -#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) -#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - /* * ES7000 Globals */ @@ -228,14 +190,14 @@ static void __init setup_unisys(void) /* * Parse the OEM Table: */ -static int __init parse_unisys_oem (char *oemptr) +static int __init parse_unisys_oem(char *oemptr) { - int i; + int i; int success = 0; - unsigned char type, size; - unsigned long val; - char *tp = NULL; - struct psai *psaip = NULL; + unsigned char type, size; + unsigned long val; + char *tp = NULL; + struct psai *psaip = NULL; struct mip_reg_info *mi; struct mip_reg *host, *mip; @@ -243,7 +205,7 @@ static int __init parse_unisys_oem (char *oemptr) tp += 8; - for (i=0; i <= 6; i++) { + for (i = 0; i <= 6; i++) { type = *tp++; size = *tp++; tp -= 2; @@ -302,7 +264,7 @@ static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { - struct oem_table *t = (struct oem_table *)header; + struct es7000_oem_table *t = (void *)header; oem_addrX = t->OEMTableAddr; oem_size = t->OEMTableSize; @@ -377,11 +339,11 @@ static void __init es7000_enable_apic_mode(void) return; printk("ES7000: Enabling APIC mode.\n"); - memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); - es7000_mip_reg.off_0x00 = MIP_SW_APIC; - es7000_mip_reg.off_0x38 = MIP_VALID; + memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); + es7000_mip_reg.off_0x00 = MIP_SW_APIC; + es7000_mip_reg.off_0x38 = MIP_VALID; - while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) { + while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) { printk("es7000_enable_apic_mode: command failed, status = %x\n", mip_status); } @@ -444,7 +406,7 @@ static void es7000_send_IPI_all(int vector) static int es7000_apic_id_registered(void) { - return 1; + return 1; } static const cpumask_t *target_cpus_cluster(void) @@ -486,7 +448,7 @@ static void es7000_init_apic_ldr_cluster(void) unsigned long val; int cpu = smp_processor_id(); - apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER); + apic_write(APIC_DFR, APIC_DFR_CLUSTER); val = calculate_ldr(cpu); apic_write(APIC_LDR, val); } @@ -496,7 +458,7 @@ static void es7000_init_apic_ldr(void) unsigned long val; int cpu = smp_processor_id(); - apic_write(APIC_DFR, APIC_DFR_VALUE); + apic_write(APIC_DFR, APIC_DFR_FLAT); val = calculate_ldr(cpu); apic_write(APIC_LDR, val); } @@ -585,7 +547,7 @@ es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) int new_apicid = es7000_cpu_to_logical_apicid(cpu); if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); + printk("%s: Not a valid mask!\n", __func__); return 0xFF; } @@ -619,7 +581,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) int new_apicid = es7000_cpu_to_logical_apicid(cpu); if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); + printk("%s: Not a valid mask!\n", __func__); return es7000_cpu_to_logical_apicid(0); } @@ -658,8 +620,9 @@ static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) void __init es7000_update_genapic_to_cluster(void) { apic->target_cpus = target_cpus_cluster; - apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER; - apic->irq_dest_mode = INT_DEST_MODE_CLUSTER; + apic->irq_delivery_mode = dest_LowestPrio; + /* logical delivery broadcast to all procs: */ + apic->irq_dest_mode = 1; apic->init_apic_ldr = es7000_init_apic_ldr_cluster; -- cgit v1.2.3 From 7da18ed924b182f8174de243c55a323c56398675 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 15:29:30 +0100 Subject: x86, es7000: misc cleanups These are cleanups that change the md5 signature: - asm/ => linux/ include conversion - simplify the code flow of find_unisys_acpi_oem_table() - move ACPI methods into one #ifdef block - remove 0/NULL initialization of statics - simplify/standardize printouts - update copyrights - more cleanups, pointed out by checkpatch arch/x86/kernel/es7000_32.o: text data bss dec hex filename 2693 192 44 2929 b71 es7000_32.o.before 2688 192 44 2924 b6c es7000_32.o.after Signed-off-by: Ingo Molnar --- arch/x86/kernel/es7000_32.c | 170 +++++++++++++++++++++++--------------------- 1 file changed, 90 insertions(+), 80 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 03acbe95d2b..3519f8cab70 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -1,10 +1,14 @@ /* * Written by: Garry Forsgren, Unisys Corporation * Natalie Protasevich, Unisys Corporation + * * This file contains the code to configure and interface * with Unisys ES7000 series hardware system manager. * - * Copyright (c) 2003 Unisys Corporation. All Rights Reserved. + * Copyright (c) 2003 Unisys Corporation. + * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar + * + * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -35,7 +39,9 @@ #include #include #include +#include #include +#include #include #include @@ -44,9 +50,6 @@ #include #include #include -#include -#include -#include /* * ES7000 chipsets @@ -93,22 +96,28 @@ struct psai { }; #ifdef CONFIG_ACPI + struct es7000_oem_table { struct acpi_table_header Header; u32 OEMTableAddr; u32 OEMTableSize; }; + +static unsigned long oem_addrX; +static unsigned long oem_size; + #endif /* * ES7000 Globals */ -static volatile unsigned long *psai = NULL; +static volatile unsigned long *psai; static struct mip_reg *mip_reg; static struct mip_reg *host_reg; static int mip_port; -static unsigned long mip_addr, host_addr; +static unsigned long mip_addr; +static unsigned long host_addr; int es7000_plat; @@ -252,31 +261,35 @@ static int __init parse_unisys_oem(char *oemptr) } #ifdef CONFIG_ACPI - -static unsigned long oem_addrX; -static unsigned long oem_size; - static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; - int i = 0; + struct es7000_oem_table *table; acpi_size tbl_size; + acpi_status ret; + int i = 0; - while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { - if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { - struct es7000_oem_table *t = (void *)header; + for (;;) { + ret = acpi_get_table_with_size("OEM1", i++, &header, &tbl_size); + if (!ACPI_SUCCESS(ret)) + return -1; - oem_addrX = t->OEMTableAddr; - oem_size = t->OEMTableSize; - early_acpi_os_unmap_memory(header, tbl_size); + if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) + break; - *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, - oem_size); - return 0; - } early_acpi_os_unmap_memory(header, tbl_size); } - return -1; + + table = (void *)header; + + oem_addrX = table->OEMTableAddr; + oem_size = table->OEMTableSize; + + early_acpi_os_unmap_memory(header, tbl_size); + + *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size); + + return 0; } static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) @@ -286,7 +299,47 @@ static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) __acpi_unmap_table((char *)oem_addr, oem_size); } -#endif + +static int es7000_check_dsdt(void) +{ + struct acpi_table_header header; + + if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && + !strncmp(header.oem_id, "UNISYS", 6)) + return 1; + return 0; +} + +/* Hook from generic ACPI tables.c */ +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + unsigned long oem_addr = 0; + int check_dsdt; + int ret = 0; + + /* check dsdt at first to avoid clear fix_map for oem_addr */ + check_dsdt = es7000_check_dsdt(); + + if (!find_unisys_acpi_oem_table(&oem_addr)) { + if (check_dsdt) { + ret = parse_unisys_oem((char *)oem_addr); + } else { + setup_unisys(); + ret = 1; + } + /* + * we need to unmap it + */ + unmap_unisys_acpi_oem_table(oem_addr); + } + return ret; +} +#else /* !CONFIG_ACPI: */ +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return 0; +} +#endif /* !CONFIG_ACPI */ static void es7000_spin(int n) { @@ -305,7 +358,7 @@ es7000_mip_write(struct mip_reg *mip_reg) spin = MIP_SPIN; while ((host_reg->off_0x38 & MIP_VALID) != 0) { if (--spin <= 0) { - printk("es7000_mip_write: Timeout waiting for Host Valid Flag"); + WARN(1, "Timeout waiting for Host Valid Flag\n"); return -1; } es7000_spin(MIP_SPIN); @@ -318,7 +371,7 @@ es7000_mip_write(struct mip_reg *mip_reg) while ((mip_reg->off_0x38 & MIP_VALID) == 0) { if (--spin <= 0) { - printk("es7000_mip_write: Timeout waiting for MIP Valid Flag"); + WARN(1, "Timeout waiting for MIP Valid Flag\n"); return -1; } es7000_spin(MIP_SPIN); @@ -338,15 +391,13 @@ static void __init es7000_enable_apic_mode(void) if (!es7000_plat) return; - printk("ES7000: Enabling APIC mode.\n"); + printk(KERN_INFO "ES7000: Enabling APIC mode.\n"); memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); es7000_mip_reg.off_0x00 = MIP_SW_APIC; es7000_mip_reg.off_0x38 = MIP_VALID; - while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) { - printk("es7000_enable_apic_mode: command failed, status = %x\n", - mip_status); - } + while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) + WARN(1, "Command failed, status = %x\n", mip_status); } static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) @@ -377,18 +428,6 @@ static unsigned int es7000_get_apic_id(unsigned long x) return (x >> 24) & 0xFF; } -#ifdef CONFIG_ACPI -static int es7000_check_dsdt(void) -{ - struct acpi_table_header header; - - if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && - !strncmp(header.oem_id, "UNISYS", 6)) - return 1; - return 0; -} -#endif - static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) { default_send_IPI_mask_sequence_phys(mask, vector); @@ -466,10 +505,12 @@ static void es7000_init_apic_ldr(void) static void es7000_setup_apic_routing(void) { int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); - printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", + + printk(KERN_INFO + "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", - nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); + nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); } static int es7000_apicid_to_node(int logical_apicid) @@ -488,13 +529,14 @@ static int es7000_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } +static int cpu_id; + static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) { - static int id = 0; physid_mask_t mask; - mask = physid_mask_of_physid(id); - ++id; + mask = physid_mask_of_physid(cpu_id); + ++cpu_id; return mask; } @@ -547,7 +589,7 @@ es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) int new_apicid = es7000_cpu_to_logical_apicid(cpu); if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk("%s: Not a valid mask!\n", __func__); + WARN(1, "Not a valid mask!"); return 0xFF; } @@ -648,38 +690,6 @@ es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) return 0; } -#ifdef CONFIG_ACPI -/* Hook from generic ACPI tables.c */ -static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - unsigned long oem_addr = 0; - int check_dsdt; - int ret = 0; - - /* check dsdt at first to avoid clear fix_map for oem_addr */ - check_dsdt = es7000_check_dsdt(); - - if (!find_unisys_acpi_oem_table(&oem_addr)) { - if (check_dsdt) { - ret = parse_unisys_oem((char *)oem_addr); - } else { - setup_unisys(); - ret = 1; - } - /* - * we need to unmap it - */ - unmap_unisys_acpi_oem_table(oem_addr); - } - return ret; -} -#else -static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 0; -} -#endif - struct genapic apic_es7000 = { -- cgit v1.2.3 From 36afc3af04f4d3dfa709f7659fcadb3336d66ed2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 15:50:25 +0100 Subject: x86, numaq_32: clean up Impact: cleanup - refactor smp_dump_qct() - tidy up include files, remove duplicates - misc other cleanups, pointed out by checkpatch No code changed: md5: 9c0bc01a53558c77df0f2ebcda7e11a9 numaq_32.o.before.asm 9c0bc01a53558c77df0f2ebcda7e11a9 numaq_32.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/kernel/numaq_32.c | 195 +++++++++++++++++++++++---------------------- 1 file changed, 100 insertions(+), 95 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 15328500de6..a4ea08a9ac9 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -22,21 +22,54 @@ * * Send feedback to */ - #include #include +#include +#include +#include #include #include +#include +#include +#include +#include #include #include #include -#include +#include +#include #include #include +#include #include +#include +#include -#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) +#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) + +static inline void numaq_register_node(int node, struct sys_cfg_data *scd) +{ + struct eachquadmem *eq = scd->eq + node; + + node_set_online(node); + + /* Convert to pages */ + node_start_pfn[node] = + MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size); + + node_end_pfn[node] = + MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); + + e820_register_active_regions(node, node_start_pfn[node], + node_end_pfn[node]); + + memory_present(node, node_start_pfn[node], node_end_pfn[node]); + + node_remap_size[node] = node_memmap_size_bytes(node, + node_start_pfn[node], + node_end_pfn[node]); +} /* * Function: smp_dump_qct() @@ -46,34 +79,18 @@ */ static void __init smp_dump_qct(void) { + struct sys_cfg_data *scd; int node; - struct eachquadmem *eq; - struct sys_cfg_data *scd = - (struct sys_cfg_data *)__va(SYS_CFG_DATA_PRIV_ADDR); + + scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR); nodes_clear(node_online_map); for_each_node(node) { - if (scd->quads_present31_0 & (1 << node)) { - node_set_online(node); - eq = &scd->eq[node]; - /* Convert to pages */ - node_start_pfn[node] = MB_TO_PAGES( - eq->hi_shrd_mem_start - eq->priv_mem_size); - node_end_pfn[node] = MB_TO_PAGES( - eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); - - e820_register_active_regions(node, node_start_pfn[node], - node_end_pfn[node]); - memory_present(node, - node_start_pfn[node], node_end_pfn[node]); - node_remap_size[node] = node_memmap_size_bytes(node, - node_start_pfn[node], - node_end_pfn[node]); - } + if (scd->quads_present31_0 & (1 << node)) + numaq_register_node(node, scd); } } - void __cpuinit numaq_tsc_disable(void) { if (!found_numaq) @@ -98,20 +115,20 @@ int found_numaq; * hence the mpc_record variable .... can't see a less disgusting way of * doing this .... */ -struct mpc_config_translation { - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; +struct mpc_trans { + unsigned char mpc_type; + unsigned char trans_len; + unsigned char trans_type; + unsigned char trans_quad; + unsigned char trans_global; + unsigned char trans_local; + unsigned short trans_reserved; }; /* x86_quirks member */ -static int mpc_record; -static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] - __cpuinitdata; +static int mpc_record; + +static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; static inline int generate_logical_apicid(int quad, int phys_apicid) { @@ -124,10 +141,12 @@ static int mpc_apic_id(struct mpc_cpu *m) int quad = translation_table[mpc_record]->trans_quad; int logical_apicid = generate_logical_apicid(quad, m->apicid); - printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", - m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8, - (m->cpufeature & CPU_MODEL_MASK) >> 4, - m->apicver, quad, logical_apicid); + printk(KERN_DEBUG + "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", + m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8, + (m->cpufeature & CPU_MODEL_MASK) >> 4, + m->apicver, quad, logical_apicid); + return logical_apicid; } @@ -143,11 +162,11 @@ static void mpc_oem_bus_info(struct mpc_bus *m, char *name) mp_bus_id_to_node[m->busid] = quad; mp_bus_id_to_local[m->busid] = local; - printk(KERN_INFO "Bus #%d is %s (node %d)\n", - m->busid, name, quad); + + printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad); } -int quad_local_to_mp_bus_id [NR_CPUS/4][4]; +int quad_local_to_mp_bus_id[NR_CPUS/4][4]; /* x86_quirks member */ static void mpc_oem_pci_bus(struct mpc_bus *m) @@ -158,7 +177,7 @@ static void mpc_oem_pci_bus(struct mpc_bus *m) quad_local_to_mp_bus_id[quad][local] = m->busid; } -static void __init MP_translation_info(struct mpc_config_translation *m) +static void __init MP_translation_info(struct mpc_trans *m) { printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", @@ -168,7 +187,8 @@ static void __init MP_translation_info(struct mpc_config_translation *m) if (mpc_record >= MAX_MPC_ENTRY) printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); else - translation_table[mpc_record] = m; /* stash this for later */ + translation_table[mpc_record] = m; /* stash this for later */ + if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) node_set_online(m->trans_quad); } @@ -186,16 +206,16 @@ static int __init mpf_checksum(unsigned char *mp, int len) /* * Read/parse the MPC oem tables */ - -static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable, - unsigned short oemsize) +static void __init + smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize) { int count = sizeof(*oemtable); /* the header size */ unsigned char *oemptr = ((unsigned char *)oemtable) + count; mpc_record = 0; - printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", - oemtable); + printk(KERN_INFO + "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); + if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) { printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", @@ -203,16 +223,18 @@ static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable, oemtable->signature[2], oemtable->signature[3]); return; } + if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) { printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); return; } + while (count < oemtable->length) { switch (*oemptr) { case MP_TRANSLATION: { - struct mpc_config_translation *m = - (struct mpc_config_translation *)oemptr; + struct mpc_trans *m = (void *)oemptr; + MP_translation_info(m); oemptr += sizeof(*m); count += sizeof(*m); @@ -220,12 +242,10 @@ static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable, break; } default: - { - printk(KERN_WARNING - "Unrecognised OEM table entry type! - %d\n", - (int)*oemptr); - return; - } + printk(KERN_WARNING + "Unrecognised OEM table entry type! - %d\n", + (int)*oemptr); + return; } } } @@ -244,21 +264,21 @@ static int __init numaq_update_genapic(void) } static struct x86_quirks numaq_x86_quirks __initdata = { - .arch_pre_time_init = numaq_pre_time_init, - .arch_time_init = NULL, - .arch_pre_intr_init = NULL, - .arch_memory_setup = NULL, - .arch_intr_init = NULL, - .arch_trap_init = NULL, - .mach_get_smp_config = NULL, - .mach_find_smp_config = NULL, - .mpc_record = &mpc_record, - .mpc_apic_id = mpc_apic_id, - .mpc_oem_bus_info = mpc_oem_bus_info, - .mpc_oem_pci_bus = mpc_oem_pci_bus, - .smp_read_mpc_oem = smp_read_mpc_oem, - .setup_ioapic_ids = numaq_setup_ioapic_ids, - .update_genapic = numaq_update_genapic, + .arch_pre_time_init = numaq_pre_time_init, + .arch_time_init = NULL, + .arch_pre_intr_init = NULL, + .arch_memory_setup = NULL, + .arch_intr_init = NULL, + .arch_trap_init = NULL, + .mach_get_smp_config = NULL, + .mach_find_smp_config = NULL, + .mpc_record = &mpc_record, + .mpc_apic_id = mpc_apic_id, + .mpc_oem_bus_info = mpc_oem_bus_info, + .mpc_oem_pci_bus = mpc_oem_pci_bus, + .smp_read_mpc_oem = smp_read_mpc_oem, + .setup_ioapic_ids = numaq_setup_ioapic_ids, + .update_genapic = numaq_update_genapic, }; void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) @@ -275,6 +295,7 @@ static __init void early_check_numaq(void) * Find possible boot-time SMP configuration: */ early_find_smp_config(); + /* * get boot-time SMP configuration: */ @@ -291,28 +312,10 @@ int __init get_memcfg_numaq(void) if (!found_numaq) return 0; smp_dump_qct(); + return 1; } -/* - * APIC driver for the IBM NUMAQ chipset. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) static inline unsigned int numaq_get_apic_id(unsigned long x) @@ -337,8 +340,8 @@ static inline void numaq_send_IPI_all(int vector) extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); -#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) -#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) +#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) +#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) /* * Because we use NMIs rather than the INIT-STARTUP sequence to @@ -426,7 +429,7 @@ static inline int numaq_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -static inline int numaq_apicid_to_node(int logical_apicid) +static inline int numaq_apicid_to_node(int logical_apicid) { return logical_apicid >> 4; } @@ -468,7 +471,9 @@ static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } -static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) + +static int +__numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { numaq_mps_oem_check(mpc, oem, productid); return found_numaq; -- cgit v1.2.3 From cb81eaedf12d3e5e6e3dcf3320089660f7fb7464 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 17:53:54 +0100 Subject: x86, numaq_32: clean up, misc Impact: cleanup - misc other cleanups that change the md5 signature - consolidate global variables - remove unnecessary __numaq_mps_oem_check() wrapper - make numaq_mps_oem_check static - update copyrights - misc other cleanups pointed out by checkpatch Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec.h | 2 - arch/x86/kernel/numaq_32.c | 97 +++++++++++++++++++++---------------------- 2 files changed, 48 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 5916c8df09d..642fc7fc8cd 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -167,6 +167,4 @@ extern int generic_mps_oem_check(struct mpc_table *, char *, char *); extern int default_acpi_madt_oem_check(char *, char *); -extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); - #endif /* _ASM_X86_MPSPEC_H */ diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index a4ea08a9ac9..62f9274a2ed 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -2,6 +2,7 @@ * Written by: Patricia Gaughen, IBM Corporation * * Copyright (C) 2002, IBM Corp. + * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar * * All rights reserved. * @@ -23,6 +24,7 @@ * Send feedback to */ #include +#include #include #include #include @@ -33,10 +35,10 @@ #include #include #include +#include #include #include -#include #include #include #include @@ -44,10 +46,36 @@ #include #include #include -#include #define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) +int found_numaq; + +/* + * Have to match translation table entries to main table entries by counter + * hence the mpc_record variable .... can't see a less disgusting way of + * doing this .... + */ +struct mpc_trans { + unsigned char mpc_type; + unsigned char trans_len; + unsigned char trans_type; + unsigned char trans_quad; + unsigned char trans_global; + unsigned char trans_local; + unsigned short trans_reserved; +}; + +/* x86_quirks member */ +static int mpc_record; + +static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; + +int mp_bus_id_to_node[MAX_MP_BUSSES]; +int mp_bus_id_to_local[MAX_MP_BUSSES]; +int quad_local_to_mp_bus_id[NR_CPUS/4][4]; + + static inline void numaq_register_node(int node, struct sys_cfg_data *scd) { struct eachquadmem *eq = scd->eq + node; @@ -108,28 +136,6 @@ static int __init numaq_pre_time_init(void) return 0; } -int found_numaq; - -/* - * Have to match translation table entries to main table entries by counter - * hence the mpc_record variable .... can't see a less disgusting way of - * doing this .... - */ -struct mpc_trans { - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; -}; - -/* x86_quirks member */ -static int mpc_record; - -static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; - static inline int generate_logical_apicid(int quad, int phys_apicid) { return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1); @@ -150,10 +156,6 @@ static int mpc_apic_id(struct mpc_cpu *m) return logical_apicid; } -int mp_bus_id_to_node[MAX_MP_BUSSES]; - -int mp_bus_id_to_local[MAX_MP_BUSSES]; - /* x86_quirks member */ static void mpc_oem_bus_info(struct mpc_bus *m, char *name) { @@ -166,8 +168,6 @@ static void mpc_oem_bus_info(struct mpc_bus *m, char *name) printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad); } -int quad_local_to_mp_bus_id[NR_CPUS/4][4]; - /* x86_quirks member */ static void mpc_oem_pci_bus(struct mpc_bus *m) { @@ -180,7 +180,7 @@ static void mpc_oem_pci_bus(struct mpc_bus *m) static void __init MP_translation_info(struct mpc_trans *m) { printk(KERN_INFO - "Translation: record %d, type %d, quad %d, global %d, local %d\n", + "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); @@ -281,14 +281,6 @@ static struct x86_quirks numaq_x86_quirks __initdata = { .update_genapic = numaq_update_genapic, }; -void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - if (strncmp(oem, "IBM NUMA", 8)) - printk("Warning! Not a NUMA-Q system!\n"); - else - found_numaq = 1; -} - static __init void early_check_numaq(void) { /* @@ -338,8 +330,6 @@ static inline void numaq_send_IPI_all(int vector) numaq_send_IPI_mask(cpu_online_mask, vector); } -extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); - #define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) #define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) @@ -355,7 +345,7 @@ static inline void numaq_smp_callin_clear_local_apic(void) static inline void numaq_store_NMI_vector(unsigned short *high, unsigned short *low) { - printk("Storing NMI vector\n"); + printk(KERN_ERR "Storing NMI vector\n"); *high = *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)); *low = @@ -390,8 +380,9 @@ static inline void numaq_init_apic_ldr(void) static inline void numaq_setup_apic_routing(void) { - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "NUMA-Q", nr_ioapics); + printk(KERN_INFO + "Enabling APIC mode: NUMA-Q. Using %d I/O APICs\n", + nr_ioapics); } /* @@ -473,9 +464,13 @@ static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) } static int -__numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { - numaq_mps_oem_check(mpc, oem, productid); + if (strncmp(oem, "IBM NUMA", 8)) + printk(KERN_ERR "Warning! Not a NUMA-Q system!\n"); + else + found_numaq = 1; + return found_numaq; } @@ -505,9 +500,13 @@ static void numaq_setup_portio_remap(void) if (num_quads <= 1) return; - printk("Remapping cross-quad port I/O for %d quads\n", num_quads); + printk(KERN_INFO + "Remapping cross-quad port I/O for %d quads\n", num_quads); + xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); - printk("xquad_portio vaddr 0x%08lx, len %08lx\n", + + printk(KERN_INFO + "xquad_portio vaddr 0x%08lx, len %08lx\n", (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); } @@ -542,7 +541,7 @@ struct genapic apic_numaq = { .check_phys_apicid_present = numaq_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = numaq_phys_pkg_id, - .mps_oem_check = __numaq_mps_oem_check, + .mps_oem_check = numaq_mps_oem_check, .get_apic_id = numaq_get_apic_id, .set_apic_id = NULL, -- cgit v1.2.3 From ab6fb7c0b03e2c3286f316c840347be8b9ee3d9f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 16:22:09 +0100 Subject: x86, apic: remove ->store_NMI_vector() Impact: cleanup It's not used by anything anymore. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 1 - arch/x86/kernel/bigsmp_32.c | 1 - arch/x86/kernel/es7000_32.c | 1 - arch/x86/kernel/genapic_flat_64.c | 2 -- arch/x86/kernel/genx2apic_cluster.c | 1 - arch/x86/kernel/genx2apic_phys.c | 1 - arch/x86/kernel/genx2apic_uv_x.c | 1 - arch/x86/kernel/numaq_32.c | 11 ----------- arch/x86/kernel/probe_32.c | 1 - arch/x86/kernel/smpboot.c | 13 +++++-------- arch/x86/kernel/summit_32.c | 1 - 11 files changed, 5 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 2cdd19e4536..122d8eda275 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -313,7 +313,6 @@ struct genapic { void (*wait_for_init_deassert)(atomic_t *deassert); void (*smp_callin_clear_local_apic)(void); - void (*store_NMI_vector)(unsigned short *high, unsigned short *low); void (*inquire_remote_apic)(int apicid); /* apic ops */ diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index 0de9eed7c60..17c25bc26a5 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -263,7 +263,6 @@ struct genapic apic_bigsmp = { .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 3519f8cab70..1d6e99a8edc 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -746,7 +746,6 @@ struct genapic apic_es7000 = { /* Nothing to do for most platforms, since cleared by the INIT cycle: */ .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index a7d84763648..27b81208009 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -227,7 +227,6 @@ struct genapic apic_flat = { .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = NULL, .read = native_apic_mem_read, @@ -379,7 +378,6 @@ struct genapic apic_physflat = { .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = NULL, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f5e02cffa26..b9ef0091c4d 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -232,7 +232,6 @@ struct genapic apic_x2apic_cluster = { .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 11eb4cb7ca3..bb752015776 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -218,7 +218,6 @@ struct genapic apic_x2apic_phys = { .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index c1746a198bd..dcb8c14287d 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -290,7 +290,6 @@ struct genapic apic_x2apic_uv_x = { .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 62f9274a2ed..a709de87819 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -342,16 +342,6 @@ static inline void numaq_smp_callin_clear_local_apic(void) clear_local_APIC(); } -static inline void -numaq_store_NMI_vector(unsigned short *high, unsigned short *low) -{ - printk(KERN_ERR "Storing NMI vector\n"); - *high = - *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH)); - *low = - *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW)); -} - static inline const cpumask_t *numaq_target_cpus(void) { return &CPU_MASK_ALL; @@ -564,7 +554,6 @@ struct genapic apic_numaq = { .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, - .store_NMI_vector = numaq_store_NMI_vector, .inquire_remote_apic = NULL, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index fd1352ac909..5914ffb6e40 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -131,7 +131,6 @@ struct genapic apic_default = { .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 09e73876a44..9ce666387f3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -745,21 +745,21 @@ static void __cpuinit do_fork_idle(struct work_struct *work) complete(&c_idle->done); } -static int __cpuinit do_boot_cpu(int apicid, int cpu) /* * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad * (ie clustered apic addressing mode), this is a LOGICAL apic ID. * Returns zero if CPU booted OK, else error code from ->wakeup_cpu. */ +static int __cpuinit do_boot_cpu(int apicid, int cpu) { unsigned long boot_error = 0; - int timeout; unsigned long start_ip; - unsigned short nmi_high = 0, nmi_low = 0; + int timeout; struct create_idle c_idle = { - .cpu = cpu, - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), }; + INIT_WORK(&c_idle.work, do_fork_idle); alternatives_smp_switch(1); @@ -824,9 +824,6 @@ do_rest: pr_debug("Setting warm reset code and vector.\n"); - if (apic->store_NMI_vector) - apic->store_NMI_vector(&nmi_high, &nmi_low); - smpboot_setup_warm_reset_vector(start_ip); /* * Be paranoid about clearing APIC errors. diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index c4690349a54..8f1a11b072a 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -590,7 +590,6 @@ struct genapic apic_summit = { .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, - .store_NMI_vector = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, -- cgit v1.2.3 From be163a159b223e94b3180afdd47a8d468eb9a492 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 16:28:46 +0100 Subject: x86, apic: rename 'genapic' to 'apic' Impact: cleanup Now that all APIC code is consolidated there's nothing 'gen' about apics anymore - so rename 'struct genapic' to 'struct apic'. This shortens the code and is nicer to read as well. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 18 +++++++++--------- arch/x86/include/asm/setup.h | 2 +- arch/x86/kernel/bigsmp_32.c | 2 +- arch/x86/kernel/es7000_32.c | 10 +++++----- arch/x86/kernel/genapic_64.c | 18 +++++++++--------- arch/x86/kernel/genapic_flat_64.c | 4 ++-- arch/x86/kernel/genx2apic_cluster.c | 2 +- arch/x86/kernel/genx2apic_phys.c | 2 +- arch/x86/kernel/genx2apic_uv_x.c | 2 +- arch/x86/kernel/numaq_32.c | 6 +++--- arch/x86/kernel/probe_32.c | 36 ++++++++++++++++++------------------ arch/x86/kernel/setup.c | 4 ++-- arch/x86/kernel/summit_32.c | 2 +- 13 files changed, 54 insertions(+), 54 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 122d8eda275..dce1bf696cc 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -248,7 +248,7 @@ static inline void disable_local_APIC(void) { } * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ -struct genapic { +struct apic { char *name; int (*probe)(void); @@ -283,7 +283,7 @@ struct genapic { int (*phys_pkg_id)(int cpuid_apic, int index_msb); /* - * When one of the next two hooks returns 1 the genapic + * When one of the next two hooks returns 1 the apic * is switched to this. Essentially they are additional * probe functions: */ @@ -324,7 +324,7 @@ struct genapic { u32 (*safe_wait_icr_idle)(void); }; -extern struct genapic *apic; +extern struct apic *apic; static inline u32 apic_read(u32 reg) { @@ -385,17 +385,17 @@ static inline unsigned default_get_apic_id(unsigned long x) #define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 #ifdef CONFIG_X86_32 -extern void es7000_update_genapic_to_cluster(void); +extern void es7000_update_apic_to_cluster(void); #else -extern struct genapic apic_flat; -extern struct genapic apic_physflat; -extern struct genapic apic_x2apic_cluster; -extern struct genapic apic_x2apic_phys; +extern struct apic apic_flat; +extern struct apic apic_physflat; +extern struct apic apic_x2apic_cluster; +extern struct apic apic_x2apic_phys; extern int default_acpi_madt_oem_check(char *, char *); extern void apic_send_IPI_self(int vector); -extern struct genapic apic_x2apic_uv_x; +extern struct apic apic_x2apic_uv_x; DECLARE_PER_CPU(int, x2apic_extra_bits); extern int default_cpu_present_to_apicid(int mps_cpu); diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index c230189462a..8029369cd6f 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -30,7 +30,7 @@ struct x86_quirks { void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, unsigned short oemsize); int (*setup_ioapic_ids)(void); - int (*update_genapic)(void); + int (*update_apic)(void); }; #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c index 17c25bc26a5..0b1093394fd 100644 --- a/arch/x86/kernel/bigsmp_32.c +++ b/arch/x86/kernel/bigsmp_32.c @@ -210,7 +210,7 @@ static int probe_bigsmp(void) return dmi_bigsmp; } -struct genapic apic_bigsmp = { +struct apic apic_bigsmp = { .name = "bigsmp", .probe = probe_bigsmp, diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 1d6e99a8edc..320f2d2e4e5 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -163,14 +163,14 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) return 0; } -static int __init es7000_update_genapic(void) +static int __init es7000_update_apic(void) { apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; /* MPENTIUMIII */ if (boot_cpu_data.x86 == 6 && (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { - es7000_update_genapic_to_cluster(); + es7000_update_apic_to_cluster(); apic->wait_for_init_deassert = NULL; apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; } @@ -193,7 +193,7 @@ static void __init setup_unisys(void) es7000_plat = ES7000_CLASSIC; ioapic_renumber_irq = es7000_rename_gsi; - x86_quirks->update_genapic = es7000_update_genapic; + x86_quirks->update_apic = es7000_update_apic; } /* @@ -659,7 +659,7 @@ static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) return cpuid_apic >> index_msb; } -void __init es7000_update_genapic_to_cluster(void) +void __init es7000_update_apic_to_cluster(void) { apic->target_cpus = target_cpus_cluster; apic->irq_delivery_mode = dest_LowestPrio; @@ -691,7 +691,7 @@ es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) } -struct genapic apic_es7000 = { +struct apic apic_es7000 = { .name = "es7000", .probe = probe_es7000, diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 91cae6f6e73..70935dd904d 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -23,16 +23,16 @@ #include #include -extern struct genapic apic_flat; -extern struct genapic apic_physflat; -extern struct genapic apic_x2xpic_uv_x; -extern struct genapic apic_x2apic_phys; -extern struct genapic apic_x2apic_cluster; +extern struct apic apic_flat; +extern struct apic apic_physflat; +extern struct apic apic_x2xpic_uv_x; +extern struct apic apic_x2apic_phys; +extern struct apic apic_x2apic_cluster; -struct genapic __read_mostly *apic = &apic_flat; +struct apic __read_mostly *apic = &apic_flat; EXPORT_SYMBOL_GPL(apic); -static struct genapic *apic_probe[] __initdata = { +static struct apic *apic_probe[] __initdata = { #ifdef CONFIG_X86_UV &apic_x2apic_uv_x, #endif @@ -62,8 +62,8 @@ void __init default_setup_apic_routing(void) printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); } - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); + if (x86_quirks->update_apic) + x86_quirks->update_apic(); } /* Same for both flat and physical. */ diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 27b81208009..3b002995e14 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -178,7 +178,7 @@ static int flat_phys_pkg_id(int initial_apic_id, int index_msb) return hard_smp_processor_id() >> index_msb; } -struct genapic apic_flat = { +struct apic apic_flat = { .name = "flat", .probe = NULL, .acpi_madt_oem_check = flat_acpi_madt_oem_check, @@ -327,7 +327,7 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, return BAD_APICID; } -struct genapic apic_physflat = { +struct apic apic_physflat = { .name = "physical flat", .probe = NULL, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index b9ef0091c4d..4e39d9ad4d5 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -182,7 +182,7 @@ static void init_x2apic_ldr(void) per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); } -struct genapic apic_x2apic_cluster = { +struct apic apic_x2apic_cluster = { .name = "cluster x2apic", .probe = NULL, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index bb752015776..d2d52eb9f7e 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -168,7 +168,7 @@ static void init_x2apic_ldr(void) { } -struct genapic apic_x2apic_phys = { +struct apic apic_x2apic_phys = { .name = "physical x2apic", .probe = NULL, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index dcb8c14287d..20b4ad07c3a 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -240,7 +240,7 @@ static void uv_send_IPI_self(int vector) apic_write(APIC_SELF_IPI, vector); } -struct genapic apic_x2apic_uv_x = { +struct apic apic_x2apic_uv_x = { .name = "UV large system", .probe = NULL, diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index a709de87819..d9d6d61eed8 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -256,7 +256,7 @@ static int __init numaq_setup_ioapic_ids(void) return 1; } -static int __init numaq_update_genapic(void) +static int __init numaq_update_apic(void) { apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; @@ -278,7 +278,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = { .mpc_oem_pci_bus = mpc_oem_pci_bus, .smp_read_mpc_oem = smp_read_mpc_oem, .setup_ioapic_ids = numaq_setup_ioapic_ids, - .update_genapic = numaq_update_genapic, + .update_apic = numaq_update_apic, }; static __init void early_check_numaq(void) @@ -500,7 +500,7 @@ static void numaq_setup_portio_remap(void) (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); } -struct genapic apic_numaq = { +struct apic apic_numaq = { .name = "NUMAQ", .probe = probe_numaq, diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c index 5914ffb6e40..5fa48332c5c 100644 --- a/arch/x86/kernel/probe_32.c +++ b/arch/x86/kernel/probe_32.c @@ -78,7 +78,7 @@ static int probe_default(void) return 1; } -struct genapic apic_default = { +struct apic apic_default = { .name = "default", .probe = probe_default, @@ -141,16 +141,16 @@ struct genapic apic_default = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; -extern struct genapic apic_numaq; -extern struct genapic apic_summit; -extern struct genapic apic_bigsmp; -extern struct genapic apic_es7000; -extern struct genapic apic_default; +extern struct apic apic_numaq; +extern struct apic apic_summit; +extern struct apic apic_bigsmp; +extern struct apic apic_es7000; +extern struct apic apic_default; -struct genapic *apic = &apic_default; +struct apic *apic = &apic_default; EXPORT_SYMBOL_GPL(apic); -static struct genapic *apic_probe[] __initdata = { +static struct apic *apic_probe[] __initdata = { #ifdef CONFIG_X86_NUMAQ &apic_numaq, #endif @@ -183,8 +183,8 @@ static int __init parse_apic(char *arg) } } - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); + if (x86_quirks->update_apic) + x86_quirks->update_apic(); /* Parsed again by __setup for debug/verbose */ return 0; @@ -204,8 +204,8 @@ void __init generic_bigsmp_probe(void) if (!cmdline_apic && apic == &apic_default) { if (apic_bigsmp.probe()) { apic = &apic_bigsmp; - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); + if (x86_quirks->update_apic) + x86_quirks->update_apic(); printk(KERN_INFO "Overriding APIC driver with %s\n", apic->name); } @@ -227,8 +227,8 @@ void __init generic_apic_probe(void) if (!apic_probe[i]) panic("Didn't find an APIC driver"); - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); + if (x86_quirks->update_apic) + x86_quirks->update_apic(); } printk(KERN_INFO "Using APIC driver %s\n", apic->name); } @@ -248,8 +248,8 @@ generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) if (!cmdline_apic) { apic = apic_probe[i]; - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); + if (x86_quirks->update_apic) + x86_quirks->update_apic(); printk(KERN_INFO "Switched to APIC driver `%s'.\n", apic->name); } @@ -270,8 +270,8 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) if (!cmdline_apic) { apic = apic_probe[i]; - if (x86_quirks->update_genapic) - x86_quirks->update_genapic(); + if (x86_quirks->update_apic) + x86_quirks->update_apic(); printk(KERN_INFO "Switched to APIC driver `%s'.\n", apic->name); } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b2da0b1d15e..6b588d6b388 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -599,7 +599,7 @@ static int __init setup_elfcorehdr(char *arg) early_param("elfcorehdr", setup_elfcorehdr); #endif -static int __init default_update_genapic(void) +static int __init default_update_apic(void) { #ifdef CONFIG_SMP if (!apic->wakeup_cpu) @@ -610,7 +610,7 @@ static int __init default_update_genapic(void) } static struct x86_quirks default_x86_quirks __initdata = { - .update_genapic = default_update_genapic, + .update_apic = default_update_apic, }; struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 8f1a11b072a..cfe7b09015d 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -537,7 +537,7 @@ void __init setup_summit(void) } #endif -struct genapic apic_summit = { +struct apic apic_summit = { .name = "summit", .probe = probe_summit, -- cgit v1.2.3 From f62bae5009c1ba596cd475cafbc83e0570a36e26 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 18:09:24 +0100 Subject: x86, apic: move APIC drivers to arch/x86/kernel/apic/* arch/x86/kernel/ is getting a bit crowded, and the APIC drivers are scattered into various different files. Move them to arch/x86/kernel/apic/*, and also remove the 'gen' prefix from those which had it. Also move APIC related functionality: the IO-APIC driver, the NMI and the IPI code. Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 27 +- arch/x86/kernel/apic.c | 2212 ------------------ arch/x86/kernel/apic/Makefile | 15 + arch/x86/kernel/apic/apic.c | 2212 ++++++++++++++++++ arch/x86/kernel/apic/apic_64.c | 89 + arch/x86/kernel/apic/apic_flat_64.c | 389 +++ arch/x86/kernel/apic/io_apic.c | 4160 +++++++++++++++++++++++++++++++++ arch/x86/kernel/apic/ipi.c | 164 ++ arch/x86/kernel/apic/nmi.c | 564 +++++ arch/x86/kernel/apic/x2apic_cluster.c | 243 ++ arch/x86/kernel/apic/x2apic_phys.c | 229 ++ arch/x86/kernel/apic/x2apic_uv_x.c | 643 +++++ arch/x86/kernel/genapic_64.c | 89 - arch/x86/kernel/genapic_flat_64.c | 389 --- arch/x86/kernel/genx2apic_cluster.c | 243 -- arch/x86/kernel/genx2apic_phys.c | 229 -- arch/x86/kernel/genx2apic_uv_x.c | 643 ----- arch/x86/kernel/io_apic.c | 4160 --------------------------------- arch/x86/kernel/ipi.c | 164 -- arch/x86/kernel/nmi.c | 564 ----- 20 files changed, 8719 insertions(+), 8709 deletions(-) delete mode 100644 arch/x86/kernel/apic.c create mode 100644 arch/x86/kernel/apic/Makefile create mode 100644 arch/x86/kernel/apic/apic.c create mode 100644 arch/x86/kernel/apic/apic_64.c create mode 100644 arch/x86/kernel/apic/apic_flat_64.c create mode 100644 arch/x86/kernel/apic/io_apic.c create mode 100644 arch/x86/kernel/apic/ipi.c create mode 100644 arch/x86/kernel/apic/nmi.c create mode 100644 arch/x86/kernel/apic/x2apic_cluster.c create mode 100644 arch/x86/kernel/apic/x2apic_phys.c create mode 100644 arch/x86/kernel/apic/x2apic_uv_x.c delete mode 100644 arch/x86/kernel/genapic_64.c delete mode 100644 arch/x86/kernel/genapic_flat_64.c delete mode 100644 arch/x86/kernel/genx2apic_cluster.c delete mode 100644 arch/x86/kernel/genx2apic_phys.c delete mode 100644 arch/x86/kernel/genx2apic_uv_x.c delete mode 100644 arch/x86/kernel/io_apic.c delete mode 100644 arch/x86/kernel/ipi.c delete mode 100644 arch/x86/kernel/nmi.c (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 1cefd218f76..9139ff69471 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -58,13 +58,12 @@ obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o ipi.o +obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o obj-$(CONFIG_SMP) += setup_percpu.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_MPPARSE) += mpparse.o -obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o ipi.o -obj-$(CONFIG_X86_IO_APIC) += io_apic.o +obj-$(CONFIG_X86_LOCAL_APIC) += apic/ obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o @@ -116,17 +115,13 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) - obj-y += genapic_64.o genapic_flat_64.o - obj-$(CONFIG_X86_X2APIC) += genx2apic_cluster.o - obj-$(CONFIG_X86_X2APIC) += genx2apic_phys.o - obj-$(CONFIG_X86_UV) += genx2apic_uv_x.o tlb_uv.o - obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o - obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o - obj-$(CONFIG_AUDIT) += audit_64.o - - obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o - obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o - obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o - - obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o + obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o + obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o + obj-$(CONFIG_AUDIT) += audit_64.o + + obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o + obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o + obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o + + obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o endif diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c deleted file mode 100644 index c12823eb55b..00000000000 --- a/arch/x86/kernel/apic.c +++ /dev/null @@ -1,2212 +0,0 @@ -/* - * Local APIC handling, local APIC timers - * - * (c) 1999, 2000, 2009 Ingo Molnar - * - * Fixes - * Maciej W. Rozycki : Bits for genuine 82489DX APICs; - * thanks to Eric Gilmore - * and Rolf G. Tews - * for testing these extensively. - * Maciej W. Rozycki : Various updates and fixes. - * Mikael Pettersson : Power Management for UP-APIC. - * Pavel Machek and - * Mikael Pettersson : PM converted to driver model. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -unsigned int num_processors; - -unsigned disabled_cpus __cpuinitdata; - -/* Processor that is doing the boot up */ -unsigned int boot_cpu_physical_apicid = -1U; - -/* - * The highest APIC ID seen during enumeration. - * - * This determines the messaging protocol we can use: if all APIC IDs - * are in the 0 ... 7 range, then we can use logical addressing which - * has some performance advantages (better broadcasting). - * - * If there's an APIC ID above 8, we use physical addressing. - */ -unsigned int max_physical_apicid; - -/* - * Bitmask of physically existing CPUs: - */ -physid_mask_t phys_cpu_present_map; - -/* - * Map cpu index to physical APIC ID - */ -DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); -DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); -EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); -EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); - -#ifdef CONFIG_X86_32 -/* - * Knob to control our willingness to enable the local APIC. - * - * +1=force-enable - */ -static int force_enable_local_apic; -/* - * APIC command line parameters - */ -static int __init parse_lapic(char *arg) -{ - force_enable_local_apic = 1; - return 0; -} -early_param("lapic", parse_lapic); -/* Local APIC was disabled by the BIOS and enabled by the kernel */ -static int enabled_via_apicbase; - -#endif - -#ifdef CONFIG_X86_64 -static int apic_calibrate_pmtmr __initdata; -static __init int setup_apicpmtimer(char *s) -{ - apic_calibrate_pmtmr = 1; - notsc_setup(NULL); - return 0; -} -__setup("apicpmtimer", setup_apicpmtimer); -#endif - -#ifdef CONFIG_X86_X2APIC -int x2apic; -/* x2apic enabled before OS handover */ -static int x2apic_preenabled; -static int disable_x2apic; -static __init int setup_nox2apic(char *str) -{ - disable_x2apic = 1; - setup_clear_cpu_cap(X86_FEATURE_X2APIC); - return 0; -} -early_param("nox2apic", setup_nox2apic); -#endif - -unsigned long mp_lapic_addr; -int disable_apic; -/* Disable local APIC timer from the kernel commandline or via dmi quirk */ -static int disable_apic_timer __cpuinitdata; -/* Local APIC timer works in C2 */ -int local_apic_timer_c2_ok; -EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); - -int first_system_vector = 0xfe; - -/* - * Debug level, exported for io_apic.c - */ -unsigned int apic_verbosity; - -int pic_mode; - -/* Have we found an MP table */ -int smp_found_config; - -static struct resource lapic_resource = { - .name = "Local APIC", - .flags = IORESOURCE_MEM | IORESOURCE_BUSY, -}; - -static unsigned int calibration_result; - -static int lapic_next_event(unsigned long delta, - struct clock_event_device *evt); -static void lapic_timer_setup(enum clock_event_mode mode, - struct clock_event_device *evt); -static void lapic_timer_broadcast(const struct cpumask *mask); -static void apic_pm_activate(void); - -/* - * The local apic timer can be used for any function which is CPU local. - */ -static struct clock_event_device lapic_clockevent = { - .name = "lapic", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT - | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, - .shift = 32, - .set_mode = lapic_timer_setup, - .set_next_event = lapic_next_event, - .broadcast = lapic_timer_broadcast, - .rating = 100, - .irq = -1, -}; -static DEFINE_PER_CPU(struct clock_event_device, lapic_events); - -static unsigned long apic_phys; - -/* - * Get the LAPIC version - */ -static inline int lapic_get_version(void) -{ - return GET_APIC_VERSION(apic_read(APIC_LVR)); -} - -/* - * Check, if the APIC is integrated or a separate chip - */ -static inline int lapic_is_integrated(void) -{ -#ifdef CONFIG_X86_64 - return 1; -#else - return APIC_INTEGRATED(lapic_get_version()); -#endif -} - -/* - * Check, whether this is a modern or a first generation APIC - */ -static int modern_apic(void) -{ - /* AMD systems use old APIC versions, so check the CPU */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 >= 0xf) - return 1; - return lapic_get_version() >= 0x14; -} - -void native_apic_wait_icr_idle(void) -{ - while (apic_read(APIC_ICR) & APIC_ICR_BUSY) - cpu_relax(); -} - -u32 native_safe_apic_wait_icr_idle(void) -{ - u32 send_status; - int timeout; - - timeout = 0; - do { - send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; - if (!send_status) - break; - udelay(100); - } while (timeout++ < 1000); - - return send_status; -} - -void native_apic_icr_write(u32 low, u32 id) -{ - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); - apic_write(APIC_ICR, low); -} - -u64 native_apic_icr_read(void) -{ - u32 icr1, icr2; - - icr2 = apic_read(APIC_ICR2); - icr1 = apic_read(APIC_ICR); - - return icr1 | ((u64)icr2 << 32); -} - -/** - * enable_NMI_through_LVT0 - enable NMI through local vector table 0 - */ -void __cpuinit enable_NMI_through_LVT0(void) -{ - unsigned int v; - - /* unmask and set to NMI */ - v = APIC_DM_NMI; - - /* Level triggered for 82489DX (32bit mode) */ - if (!lapic_is_integrated()) - v |= APIC_LVT_LEVEL_TRIGGER; - - apic_write(APIC_LVT0, v); -} - -#ifdef CONFIG_X86_32 -/** - * get_physical_broadcast - Get number of physical broadcast IDs - */ -int get_physical_broadcast(void) -{ - return modern_apic() ? 0xff : 0xf; -} -#endif - -/** - * lapic_get_maxlvt - get the maximum number of local vector table entries - */ -int lapic_get_maxlvt(void) -{ - unsigned int v; - - v = apic_read(APIC_LVR); - /* - * - we always have APIC integrated on 64bit mode - * - 82489DXs do not report # of LVT entries - */ - return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; -} - -/* - * Local APIC timer - */ - -/* Clock divisor */ -#define APIC_DIVISOR 16 - -/* - * This function sets up the local APIC timer, with a timeout of - * 'clocks' APIC bus clock. During calibration we actually call - * this function twice on the boot CPU, once with a bogus timeout - * value, second time for real. The other (noncalibrating) CPUs - * call this function only once, with the real, calibrated value. - * - * We do reads before writes even if unnecessary, to get around the - * P5 APIC double write bug. - */ -static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) -{ - unsigned int lvtt_value, tmp_value; - - lvtt_value = LOCAL_TIMER_VECTOR; - if (!oneshot) - lvtt_value |= APIC_LVT_TIMER_PERIODIC; - if (!lapic_is_integrated()) - lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); - - if (!irqen) - lvtt_value |= APIC_LVT_MASKED; - - apic_write(APIC_LVTT, lvtt_value); - - /* - * Divide PICLK by 16 - */ - tmp_value = apic_read(APIC_TDCR); - apic_write(APIC_TDCR, - (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | - APIC_TDR_DIV_16); - - if (!oneshot) - apic_write(APIC_TMICT, clocks / APIC_DIVISOR); -} - -/* - * Setup extended LVT, AMD specific (K8, family 10h) - * - * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and - * MCE interrupts are supported. Thus MCE offset must be set to 0. - * - * If mask=1, the LVT entry does not generate interrupts while mask=0 - * enables the vector. See also the BKDGs. - */ - -#define APIC_EILVT_LVTOFF_MCE 0 -#define APIC_EILVT_LVTOFF_IBS 1 - -static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) -{ - unsigned long reg = (lvt_off << 4) + APIC_EILVT0; - unsigned int v = (mask << 16) | (msg_type << 8) | vector; - - apic_write(reg, v); -} - -u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) -{ - setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); - return APIC_EILVT_LVTOFF_MCE; -} - -u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) -{ - setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); - return APIC_EILVT_LVTOFF_IBS; -} -EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); - -/* - * Program the next event, relative to now - */ -static int lapic_next_event(unsigned long delta, - struct clock_event_device *evt) -{ - apic_write(APIC_TMICT, delta); - return 0; -} - -/* - * Setup the lapic timer in periodic or oneshot mode - */ -static void lapic_timer_setup(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - unsigned long flags; - unsigned int v; - - /* Lapic used as dummy for broadcast ? */ - if (evt->features & CLOCK_EVT_FEAT_DUMMY) - return; - - local_irq_save(flags); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - case CLOCK_EVT_MODE_ONESHOT: - __setup_APIC_LVTT(calibration_result, - mode != CLOCK_EVT_MODE_PERIODIC, 1); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - v = apic_read(APIC_LVTT); - v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); - apic_write(APIC_LVTT, v); - apic_write(APIC_TMICT, 0xffffffff); - break; - case CLOCK_EVT_MODE_RESUME: - /* Nothing to do here */ - break; - } - - local_irq_restore(flags); -} - -/* - * Local APIC timer broadcast function - */ -static void lapic_timer_broadcast(const struct cpumask *mask) -{ -#ifdef CONFIG_SMP - apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR); -#endif -} - -/* - * Setup the local APIC timer for this CPU. Copy the initilized values - * of the boot CPU and register the clock event in the framework. - */ -static void __cpuinit setup_APIC_timer(void) -{ - struct clock_event_device *levt = &__get_cpu_var(lapic_events); - - memcpy(levt, &lapic_clockevent, sizeof(*levt)); - levt->cpumask = cpumask_of(smp_processor_id()); - - clockevents_register_device(levt); -} - -/* - * In this functions we calibrate APIC bus clocks to the external timer. - * - * We want to do the calibration only once since we want to have local timer - * irqs syncron. CPUs connected by the same APIC bus have the very same bus - * frequency. - * - * This was previously done by reading the PIT/HPET and waiting for a wrap - * around to find out, that a tick has elapsed. I have a box, where the PIT - * readout is broken, so it never gets out of the wait loop again. This was - * also reported by others. - * - * Monitoring the jiffies value is inaccurate and the clockevents - * infrastructure allows us to do a simple substitution of the interrupt - * handler. - * - * The calibration routine also uses the pm_timer when possible, as the PIT - * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes - * back to normal later in the boot process). - */ - -#define LAPIC_CAL_LOOPS (HZ/10) - -static __initdata int lapic_cal_loops = -1; -static __initdata long lapic_cal_t1, lapic_cal_t2; -static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2; -static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; -static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; - -/* - * Temporary interrupt handler. - */ -static void __init lapic_cal_handler(struct clock_event_device *dev) -{ - unsigned long long tsc = 0; - long tapic = apic_read(APIC_TMCCT); - unsigned long pm = acpi_pm_read_early(); - - if (cpu_has_tsc) - rdtscll(tsc); - - switch (lapic_cal_loops++) { - case 0: - lapic_cal_t1 = tapic; - lapic_cal_tsc1 = tsc; - lapic_cal_pm1 = pm; - lapic_cal_j1 = jiffies; - break; - - case LAPIC_CAL_LOOPS: - lapic_cal_t2 = tapic; - lapic_cal_tsc2 = tsc; - if (pm < lapic_cal_pm1) - pm += ACPI_PM_OVRRUN; - lapic_cal_pm2 = pm; - lapic_cal_j2 = jiffies; - break; - } -} - -static int __init -calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) -{ - const long pm_100ms = PMTMR_TICKS_PER_SEC / 10; - const long pm_thresh = pm_100ms / 100; - unsigned long mult; - u64 res; - -#ifndef CONFIG_X86_PM_TIMER - return -1; -#endif - - apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm); - - /* Check, if the PM timer is available */ - if (!deltapm) - return -1; - - mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22); - - if (deltapm > (pm_100ms - pm_thresh) && - deltapm < (pm_100ms + pm_thresh)) { - apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n"); - return 0; - } - - res = (((u64)deltapm) * mult) >> 22; - do_div(res, 1000000); - pr_warning("APIC calibration not consistent " - "with PM-Timer: %ldms instead of 100ms\n",(long)res); - - /* Correct the lapic counter value */ - res = (((u64)(*delta)) * pm_100ms); - do_div(res, deltapm); - pr_info("APIC delta adjusted to PM-Timer: " - "%lu (%ld)\n", (unsigned long)res, *delta); - *delta = (long)res; - - /* Correct the tsc counter value */ - if (cpu_has_tsc) { - res = (((u64)(*deltatsc)) * pm_100ms); - do_div(res, deltapm); - apic_printk(APIC_VERBOSE, "TSC delta adjusted to " - "PM-Timer: %lu (%ld) \n", - (unsigned long)res, *deltatsc); - *deltatsc = (long)res; - } - - return 0; -} - -static int __init calibrate_APIC_clock(void) -{ - struct clock_event_device *levt = &__get_cpu_var(lapic_events); - void (*real_handler)(struct clock_event_device *dev); - unsigned long deltaj; - long delta, deltatsc; - int pm_referenced = 0; - - local_irq_disable(); - - /* Replace the global interrupt handler */ - real_handler = global_clock_event->event_handler; - global_clock_event->event_handler = lapic_cal_handler; - - /* - * Setup the APIC counter to maximum. There is no way the lapic - * can underflow in the 100ms detection time frame - */ - __setup_APIC_LVTT(0xffffffff, 0, 0); - - /* Let the interrupts run */ - local_irq_enable(); - - while (lapic_cal_loops <= LAPIC_CAL_LOOPS) - cpu_relax(); - - local_irq_disable(); - - /* Restore the real event handler */ - global_clock_event->event_handler = real_handler; - - /* Build delta t1-t2 as apic timer counts down */ - delta = lapic_cal_t1 - lapic_cal_t2; - apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); - - deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1); - - /* we trust the PM based calibration if possible */ - pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, - &delta, &deltatsc); - - /* Calculate the scaled math multiplication factor */ - lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, - lapic_clockevent.shift); - lapic_clockevent.max_delta_ns = - clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); - lapic_clockevent.min_delta_ns = - clockevent_delta2ns(0xF, &lapic_clockevent); - - calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; - - apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); - apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult); - apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", - calibration_result); - - if (cpu_has_tsc) { - apic_printk(APIC_VERBOSE, "..... CPU clock speed is " - "%ld.%04ld MHz.\n", - (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ), - (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ)); - } - - apic_printk(APIC_VERBOSE, "..... host bus clock speed is " - "%u.%04u MHz.\n", - calibration_result / (1000000 / HZ), - calibration_result % (1000000 / HZ)); - - /* - * Do a sanity check on the APIC calibration result - */ - if (calibration_result < (1000000 / HZ)) { - local_irq_enable(); - pr_warning("APIC frequency too slow, disabling apic timer\n"); - return -1; - } - - levt->features &= ~CLOCK_EVT_FEAT_DUMMY; - - /* - * PM timer calibration failed or not turned on - * so lets try APIC timer based calibration - */ - if (!pm_referenced) { - apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); - - /* - * Setup the apic timer manually - */ - levt->event_handler = lapic_cal_handler; - lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt); - lapic_cal_loops = -1; - - /* Let the interrupts run */ - local_irq_enable(); - - while (lapic_cal_loops <= LAPIC_CAL_LOOPS) - cpu_relax(); - - /* Stop the lapic timer */ - lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt); - - /* Jiffies delta */ - deltaj = lapic_cal_j2 - lapic_cal_j1; - apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj); - - /* Check, if the jiffies result is consistent */ - if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2) - apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); - else - levt->features |= CLOCK_EVT_FEAT_DUMMY; - } else - local_irq_enable(); - - if (levt->features & CLOCK_EVT_FEAT_DUMMY) { - pr_warning("APIC timer disabled due to verification failure\n"); - return -1; - } - - return 0; -} - -/* - * Setup the boot APIC - * - * Calibrate and verify the result. - */ -void __init setup_boot_APIC_clock(void) -{ - /* - * The local apic timer can be disabled via the kernel - * commandline or from the CPU detection code. Register the lapic - * timer as a dummy clock event source on SMP systems, so the - * broadcast mechanism is used. On UP systems simply ignore it. - */ - if (disable_apic_timer) { - pr_info("Disabling APIC timer\n"); - /* No broadcast on UP ! */ - if (num_possible_cpus() > 1) { - lapic_clockevent.mult = 1; - setup_APIC_timer(); - } - return; - } - - apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" - "calibrating APIC timer ...\n"); - - if (calibrate_APIC_clock()) { - /* No broadcast on UP ! */ - if (num_possible_cpus() > 1) - setup_APIC_timer(); - return; - } - - /* - * If nmi_watchdog is set to IO_APIC, we need the - * PIT/HPET going. Otherwise register lapic as a dummy - * device. - */ - if (nmi_watchdog != NMI_IO_APIC) - lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; - else - pr_warning("APIC timer registered as dummy," - " due to nmi_watchdog=%d!\n", nmi_watchdog); - - /* Setup the lapic or request the broadcast */ - setup_APIC_timer(); -} - -void __cpuinit setup_secondary_APIC_clock(void) -{ - setup_APIC_timer(); -} - -/* - * The guts of the apic timer interrupt - */ -static void local_apic_timer_interrupt(void) -{ - int cpu = smp_processor_id(); - struct clock_event_device *evt = &per_cpu(lapic_events, cpu); - - /* - * Normally we should not be here till LAPIC has been initialized but - * in some cases like kdump, its possible that there is a pending LAPIC - * timer interrupt from previous kernel's context and is delivered in - * new kernel the moment interrupts are enabled. - * - * Interrupts are enabled early and LAPIC is setup much later, hence - * its possible that when we get here evt->event_handler is NULL. - * Check for event_handler being NULL and discard the interrupt as - * spurious. - */ - if (!evt->event_handler) { - pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu); - /* Switch it off */ - lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); - return; - } - - /* - * the NMI deadlock-detector uses this. - */ - inc_irq_stat(apic_timer_irqs); - - evt->event_handler(evt); -} - -/* - * Local APIC timer interrupt. This is the most natural way for doing - * local interrupts, but local timer interrupts can be emulated by - * broadcast interrupts too. [in case the hw doesn't support APIC timers] - * - * [ if a single-CPU system runs an SMP kernel then we call the local - * interrupt as well. Thus we cannot inline the local irq ... ] - */ -void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - - /* - * NOTE! We'd better ACK the irq immediately, - * because timer handling can be slow. - */ - ack_APIC_irq(); - /* - * update_process_times() expects us to have done irq_enter(). - * Besides, if we don't timer interrupts ignore the global - * interrupt lock, which is the WrongThing (tm) to do. - */ - exit_idle(); - irq_enter(); - local_apic_timer_interrupt(); - irq_exit(); - - set_irq_regs(old_regs); -} - -int setup_profiling_timer(unsigned int multiplier) -{ - return -EINVAL; -} - -/* - * Local APIC start and shutdown - */ - -/** - * clear_local_APIC - shutdown the local APIC - * - * This is called, when a CPU is disabled and before rebooting, so the state of - * the local APIC has no dangling leftovers. Also used to cleanout any BIOS - * leftovers during boot. - */ -void clear_local_APIC(void) -{ - int maxlvt; - u32 v; - - /* APIC hasn't been mapped yet */ - if (!apic_phys) - return; - - maxlvt = lapic_get_maxlvt(); - /* - * Masking an LVT entry can trigger a local APIC error - * if the vector is zero. Mask LVTERR first to prevent this. - */ - if (maxlvt >= 3) { - v = ERROR_APIC_VECTOR; /* any non-zero vector will do */ - apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); - } - /* - * Careful: we have to set masks only first to deassert - * any level-triggered sources. - */ - v = apic_read(APIC_LVTT); - apic_write(APIC_LVTT, v | APIC_LVT_MASKED); - v = apic_read(APIC_LVT0); - apic_write(APIC_LVT0, v | APIC_LVT_MASKED); - v = apic_read(APIC_LVT1); - apic_write(APIC_LVT1, v | APIC_LVT_MASKED); - if (maxlvt >= 4) { - v = apic_read(APIC_LVTPC); - apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); - } - - /* lets not touch this if we didn't frob it */ -#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL) - if (maxlvt >= 5) { - v = apic_read(APIC_LVTTHMR); - apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); - } -#endif - /* - * Clean APIC state for other OSs: - */ - apic_write(APIC_LVTT, APIC_LVT_MASKED); - apic_write(APIC_LVT0, APIC_LVT_MASKED); - apic_write(APIC_LVT1, APIC_LVT_MASKED); - if (maxlvt >= 3) - apic_write(APIC_LVTERR, APIC_LVT_MASKED); - if (maxlvt >= 4) - apic_write(APIC_LVTPC, APIC_LVT_MASKED); - - /* Integrated APIC (!82489DX) ? */ - if (lapic_is_integrated()) { - if (maxlvt > 3) - /* Clear ESR due to Pentium errata 3AP and 11AP */ - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - } -} - -/** - * disable_local_APIC - clear and disable the local APIC - */ -void disable_local_APIC(void) -{ - unsigned int value; - - /* APIC hasn't been mapped yet */ - if (!apic_phys) - return; - - clear_local_APIC(); - - /* - * Disable APIC (implies clearing of registers - * for 82489DX!). - */ - value = apic_read(APIC_SPIV); - value &= ~APIC_SPIV_APIC_ENABLED; - apic_write(APIC_SPIV, value); - -#ifdef CONFIG_X86_32 - /* - * When LAPIC was disabled by the BIOS and enabled by the kernel, - * restore the disabled state. - */ - if (enabled_via_apicbase) { - unsigned int l, h; - - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_ENABLE; - wrmsr(MSR_IA32_APICBASE, l, h); - } -#endif -} - -/* - * If Linux enabled the LAPIC against the BIOS default disable it down before - * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and - * not power-off. Additionally clear all LVT entries before disable_local_APIC - * for the case where Linux didn't enable the LAPIC. - */ -void lapic_shutdown(void) -{ - unsigned long flags; - - if (!cpu_has_apic) - return; - - local_irq_save(flags); - -#ifdef CONFIG_X86_32 - if (!enabled_via_apicbase) - clear_local_APIC(); - else -#endif - disable_local_APIC(); - - - local_irq_restore(flags); -} - -/* - * This is to verify that we're looking at a real local APIC. - * Check these against your board if the CPUs aren't getting - * started for no apparent reason. - */ -int __init verify_local_APIC(void) -{ - unsigned int reg0, reg1; - - /* - * The version register is read-only in a real APIC. - */ - reg0 = apic_read(APIC_LVR); - apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0); - apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK); - reg1 = apic_read(APIC_LVR); - apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1); - - /* - * The two version reads above should print the same - * numbers. If the second one is different, then we - * poke at a non-APIC. - */ - if (reg1 != reg0) - return 0; - - /* - * Check if the version looks reasonably. - */ - reg1 = GET_APIC_VERSION(reg0); - if (reg1 == 0x00 || reg1 == 0xff) - return 0; - reg1 = lapic_get_maxlvt(); - if (reg1 < 0x02 || reg1 == 0xff) - return 0; - - /* - * The ID register is read/write in a real APIC. - */ - reg0 = apic_read(APIC_ID); - apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); - apic_write(APIC_ID, reg0 ^ apic->apic_id_mask); - reg1 = apic_read(APIC_ID); - apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); - apic_write(APIC_ID, reg0); - if (reg1 != (reg0 ^ apic->apic_id_mask)) - return 0; - - /* - * The next two are just to see if we have sane values. - * They're only really relevant if we're in Virtual Wire - * compatibility mode, but most boxes are anymore. - */ - reg0 = apic_read(APIC_LVT0); - apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0); - reg1 = apic_read(APIC_LVT1); - apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1); - - return 1; -} - -/** - * sync_Arb_IDs - synchronize APIC bus arbitration IDs - */ -void __init sync_Arb_IDs(void) -{ - /* - * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not - * needed on AMD. - */ - if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - return; - - /* - * Wait for idle. - */ - apic_wait_icr_idle(); - - apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); - apic_write(APIC_ICR, APIC_DEST_ALLINC | - APIC_INT_LEVELTRIG | APIC_DM_INIT); -} - -/* - * An initial setup of the virtual wire mode. - */ -void __init init_bsp_APIC(void) -{ - unsigned int value; - - /* - * Don't do the setup now if we have a SMP BIOS as the - * through-I/O-APIC virtual wire mode might be active. - */ - if (smp_found_config || !cpu_has_apic) - return; - - /* - * Do not trust the local APIC being empty at bootup. - */ - clear_local_APIC(); - - /* - * Enable APIC. - */ - value = apic_read(APIC_SPIV); - value &= ~APIC_VECTOR_MASK; - value |= APIC_SPIV_APIC_ENABLED; - -#ifdef CONFIG_X86_32 - /* This bit is reserved on P4/Xeon and should be cleared */ - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && - (boot_cpu_data.x86 == 15)) - value &= ~APIC_SPIV_FOCUS_DISABLED; - else -#endif - value |= APIC_SPIV_FOCUS_DISABLED; - value |= SPURIOUS_APIC_VECTOR; - apic_write(APIC_SPIV, value); - - /* - * Set up the virtual wire mode. - */ - apic_write(APIC_LVT0, APIC_DM_EXTINT); - value = APIC_DM_NMI; - if (!lapic_is_integrated()) /* 82489DX */ - value |= APIC_LVT_LEVEL_TRIGGER; - apic_write(APIC_LVT1, value); -} - -static void __cpuinit lapic_setup_esr(void) -{ - unsigned int oldvalue, value, maxlvt; - - if (!lapic_is_integrated()) { - pr_info("No ESR for 82489DX.\n"); - return; - } - - if (apic->disable_esr) { - /* - * Something untraceable is creating bad interrupts on - * secondary quads ... for the moment, just leave the - * ESR disabled - we can't do anything useful with the - * errors anyway - mbligh - */ - pr_info("Leaving ESR disabled.\n"); - return; - } - - maxlvt = lapic_get_maxlvt(); - if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ - apic_write(APIC_ESR, 0); - oldvalue = apic_read(APIC_ESR); - - /* enables sending errors */ - value = ERROR_APIC_VECTOR; - apic_write(APIC_LVTERR, value); - - /* - * spec says clear errors after enabling vector. - */ - if (maxlvt > 3) - apic_write(APIC_ESR, 0); - value = apic_read(APIC_ESR); - if (value != oldvalue) - apic_printk(APIC_VERBOSE, "ESR value before enabling " - "vector: 0x%08x after: 0x%08x\n", - oldvalue, value); -} - - -/** - * setup_local_APIC - setup the local APIC - */ -void __cpuinit setup_local_APIC(void) -{ - unsigned int value; - int i, j; - - if (disable_apic) { - arch_disable_smp_support(); - return; - } - -#ifdef CONFIG_X86_32 - /* Pound the ESR really hard over the head with a big hammer - mbligh */ - if (lapic_is_integrated() && apic->disable_esr) { - apic_write(APIC_ESR, 0); - apic_write(APIC_ESR, 0); - apic_write(APIC_ESR, 0); - apic_write(APIC_ESR, 0); - } -#endif - - preempt_disable(); - - /* - * Double-check whether this APIC is really registered. - * This is meaningless in clustered apic mode, so we skip it. - */ - if (!apic->apic_id_registered()) - BUG(); - - /* - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ - apic->init_apic_ldr(); - - /* - * Set Task Priority to 'accept all'. We never change this - * later on. - */ - value = apic_read(APIC_TASKPRI); - value &= ~APIC_TPRI_MASK; - apic_write(APIC_TASKPRI, value); - - /* - * After a crash, we no longer service the interrupts and a pending - * interrupt from previous kernel might still have ISR bit set. - * - * Most probably by now CPU has serviced that pending interrupt and - * it might not have done the ack_APIC_irq() because it thought, - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it - * does not clear the ISR bit and cpu thinks it has already serivced - * the interrupt. Hence a vector might get locked. It was noticed - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. - */ - for (i = APIC_ISR_NR - 1; i >= 0; i--) { - value = apic_read(APIC_ISR + i*0x10); - for (j = 31; j >= 0; j--) { - if (value & (1< 1) || - (boot_cpu_data.x86 >= 15)) - break; - goto no_apic; - case X86_VENDOR_INTEL: - if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 || - (boot_cpu_data.x86 == 5 && cpu_has_apic)) - break; - goto no_apic; - default: - goto no_apic; - } - - if (!cpu_has_apic) { - /* - * Over-ride BIOS and try to enable the local APIC only if - * "lapic" specified. - */ - if (!force_enable_local_apic) { - pr_info("Local APIC disabled by BIOS -- " - "you can enable it with \"lapic\"\n"); - return -1; - } - /* - * Some BIOSes disable the local APIC in the APIC_BASE - * MSR. This can only be done in software for Intel P6 or later - * and AMD K7 (Model > 1) or later. - */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (!(l & MSR_IA32_APICBASE_ENABLE)) { - pr_info("Local APIC disabled by BIOS -- reenabling.\n"); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; - wrmsr(MSR_IA32_APICBASE, l, h); - enabled_via_apicbase = 1; - } - } - /* - * The APIC feature bit should now be enabled - * in `cpuid' - */ - features = cpuid_edx(1); - if (!(features & (1 << X86_FEATURE_APIC))) { - pr_warning("Could not enable APIC!\n"); - return -1; - } - set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - - /* The BIOS may have set up the APIC at some other address */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (l & MSR_IA32_APICBASE_ENABLE) - mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; - - pr_info("Found and enabled local APIC!\n"); - - apic_pm_activate(); - - return 0; - -no_apic: - pr_info("No local APIC present or hardware disabled\n"); - return -1; -} -#endif - -#ifdef CONFIG_X86_64 -void __init early_init_lapic_mapping(void) -{ - unsigned long phys_addr; - - /* - * If no local APIC can be found then go out - * : it means there is no mpatable and MADT - */ - if (!smp_found_config) - return; - - phys_addr = mp_lapic_addr; - - set_fixmap_nocache(FIX_APIC_BASE, phys_addr); - apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", - APIC_BASE, phys_addr); - - /* - * Fetch the APIC ID of the BSP in case we have a - * default configuration (or the MP table is broken). - */ - boot_cpu_physical_apicid = read_apic_id(); -} -#endif - -/** - * init_apic_mappings - initialize APIC mappings - */ -void __init init_apic_mappings(void) -{ -#ifdef CONFIG_X86_X2APIC - if (x2apic) { - boot_cpu_physical_apicid = read_apic_id(); - return; - } -#endif - - /* - * If no local APIC can be found then set up a fake all - * zeroes page to simulate the local APIC and another - * one for the IO-APIC. - */ - if (!smp_found_config && detect_init_APIC()) { - apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); - apic_phys = __pa(apic_phys); - } else - apic_phys = mp_lapic_addr; - - set_fixmap_nocache(FIX_APIC_BASE, apic_phys); - apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", - APIC_BASE, apic_phys); - - /* - * Fetch the APIC ID of the BSP in case we have a - * default configuration (or the MP table is broken). - */ - if (boot_cpu_physical_apicid == -1U) - boot_cpu_physical_apicid = read_apic_id(); -} - -/* - * This initializes the IO-APIC and APIC hardware if this is - * a UP kernel. - */ -int apic_version[MAX_APICS]; - -int __init APIC_init_uniprocessor(void) -{ - if (disable_apic) { - pr_info("Apic disabled\n"); - return -1; - } -#ifdef CONFIG_X86_64 - if (!cpu_has_apic) { - disable_apic = 1; - pr_info("Apic disabled by BIOS\n"); - return -1; - } -#else - if (!smp_found_config && !cpu_has_apic) - return -1; - - /* - * Complain if the BIOS pretends there is one. - */ - if (!cpu_has_apic && - APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { - pr_err("BIOS bug, local APIC 0x%x not detected!...\n", - boot_cpu_physical_apicid); - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); - return -1; - } -#endif - - enable_IR_x2apic(); -#ifdef CONFIG_X86_64 - default_setup_apic_routing(); -#endif - - verify_local_APIC(); - connect_bsp_APIC(); - -#ifdef CONFIG_X86_64 - apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid)); -#else - /* - * Hack: In case of kdump, after a crash, kernel might be booting - * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid - * might be zero if read from MP tables. Get it from LAPIC. - */ -# ifdef CONFIG_CRASH_DUMP - boot_cpu_physical_apicid = read_apic_id(); -# endif -#endif - physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); - setup_local_APIC(); - -#ifdef CONFIG_X86_IO_APIC - /* - * Now enable IO-APICs, actually call clear_IO_APIC - * We need clear_IO_APIC before enabling error vector - */ - if (!skip_ioapic_setup && nr_ioapics) - enable_IO_APIC(); -#endif - - end_local_APIC_setup(); - -#ifdef CONFIG_X86_IO_APIC - if (smp_found_config && !skip_ioapic_setup && nr_ioapics) - setup_IO_APIC(); - else { - nr_ioapics = 0; - localise_nmi_watchdog(); - } -#else - localise_nmi_watchdog(); -#endif - - setup_boot_clock(); -#ifdef CONFIG_X86_64 - check_nmi_watchdog(); -#endif - - return 0; -} - -/* - * Local APIC interrupts - */ - -/* - * This interrupt should _never_ happen with our APIC/SMP architecture - */ -void smp_spurious_interrupt(struct pt_regs *regs) -{ - u32 v; - - exit_idle(); - irq_enter(); - /* - * Check if this really is a spurious interrupt and ACK it - * if it is a vectored one. Just in case... - * Spurious interrupts should not be ACKed. - */ - v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); - if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) - ack_APIC_irq(); - - inc_irq_stat(irq_spurious_count); - - /* see sw-dev-man vol 3, chapter 7.4.13.5 */ - pr_info("spurious APIC interrupt on CPU#%d, " - "should never happen.\n", smp_processor_id()); - irq_exit(); -} - -/* - * This interrupt should never happen with our APIC/SMP architecture - */ -void smp_error_interrupt(struct pt_regs *regs) -{ - u32 v, v1; - - exit_idle(); - irq_enter(); - /* First tickle the hardware, only then report what went on. -- REW */ - v = apic_read(APIC_ESR); - apic_write(APIC_ESR, 0); - v1 = apic_read(APIC_ESR); - ack_APIC_irq(); - atomic_inc(&irq_err_count); - - /* - * Here is what the APIC error bits mean: - * 0: Send CS error - * 1: Receive CS error - * 2: Send accept error - * 3: Receive accept error - * 4: Reserved - * 5: Send illegal vector - * 6: Received illegal vector - * 7: Illegal register address - */ - pr_debug("APIC error on CPU%d: %02x(%02x)\n", - smp_processor_id(), v , v1); - irq_exit(); -} - -/** - * connect_bsp_APIC - attach the APIC to the interrupt system - */ -void __init connect_bsp_APIC(void) -{ -#ifdef CONFIG_X86_32 - if (pic_mode) { - /* - * Do not trust the local APIC being empty at bootup. - */ - clear_local_APIC(); - /* - * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's - * local APIC to INT and NMI lines. - */ - apic_printk(APIC_VERBOSE, "leaving PIC mode, " - "enabling APIC mode.\n"); - outb(0x70, 0x22); - outb(0x01, 0x23); - } -#endif - if (apic->enable_apic_mode) - apic->enable_apic_mode(); -} - -/** - * disconnect_bsp_APIC - detach the APIC from the interrupt system - * @virt_wire_setup: indicates, whether virtual wire mode is selected - * - * Virtual wire mode is necessary to deliver legacy interrupts even when the - * APIC is disabled. - */ -void disconnect_bsp_APIC(int virt_wire_setup) -{ - unsigned int value; - -#ifdef CONFIG_X86_32 - if (pic_mode) { - /* - * Put the board back into PIC mode (has an effect only on - * certain older boards). Note that APIC interrupts, including - * IPIs, won't work beyond this point! The only exception are - * INIT IPIs. - */ - apic_printk(APIC_VERBOSE, "disabling APIC mode, " - "entering PIC mode.\n"); - outb(0x70, 0x22); - outb(0x00, 0x23); - return; - } -#endif - - /* Go back to Virtual Wire compatibility mode */ - - /* For the spurious interrupt use vector F, and enable it */ - value = apic_read(APIC_SPIV); - value &= ~APIC_VECTOR_MASK; - value |= APIC_SPIV_APIC_ENABLED; - value |= 0xf; - apic_write(APIC_SPIV, value); - - if (!virt_wire_setup) { - /* - * For LVT0 make it edge triggered, active high, - * external and enabled - */ - value = apic_read(APIC_LVT0); - value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | - APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | - APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); - value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; - value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); - apic_write(APIC_LVT0, value); - } else { - /* Disable LVT0 */ - apic_write(APIC_LVT0, APIC_LVT_MASKED); - } - - /* - * For LVT1 make it edge triggered, active high, - * nmi and enabled - */ - value = apic_read(APIC_LVT1); - value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | - APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | - APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); - value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; - value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); - apic_write(APIC_LVT1, value); -} - -void __cpuinit generic_processor_info(int apicid, int version) -{ - int cpu; - - /* - * Validate version - */ - if (version == 0x0) { - pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " - "fixing up to 0x10. (tell your hw vendor)\n", - version); - version = 0x10; - } - apic_version[apicid] = version; - - if (num_processors >= nr_cpu_ids) { - int max = nr_cpu_ids; - int thiscpu = max + disabled_cpus; - - pr_warning( - "ACPI: NR_CPUS/possible_cpus limit of %i reached." - " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); - - disabled_cpus++; - return; - } - - num_processors++; - cpu = cpumask_next_zero(-1, cpu_present_mask); - - if (version != apic_version[boot_cpu_physical_apicid]) - WARN_ONCE(1, - "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", - apic_version[boot_cpu_physical_apicid], cpu, version); - - physid_set(apicid, phys_cpu_present_map); - if (apicid == boot_cpu_physical_apicid) { - /* - * x86_bios_cpu_apicid is required to have processors listed - * in same order as logical cpu numbers. Hence the first - * entry is BSP, and so on. - */ - cpu = 0; - } - if (apicid > max_physical_apicid) - max_physical_apicid = apicid; - -#ifdef CONFIG_X86_32 - /* - * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y - * but we need to work other dependencies like SMP_SUSPEND etc - * before this can be done without some confusion. - * if (CPU_HOTPLUG_ENABLED || num_processors > 8) - * - Ashok Raj - */ - if (max_physical_apicid >= 8) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - if (!APIC_XAPIC(version)) { - def_to_bigsmp = 0; - break; - } - /* If P4 and above fall through */ - case X86_VENDOR_AMD: - def_to_bigsmp = 1; - } - } -#endif - -#if defined(CONFIG_SMP) || defined(CONFIG_X86_64) - early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; - early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; -#endif - - set_cpu_possible(cpu, true); - set_cpu_present(cpu, true); -} - -int hard_smp_processor_id(void) -{ - return read_apic_id(); -} - -void default_init_apic_ldr(void) -{ - unsigned long val; - - apic_write(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); - apic_write(APIC_LDR, val); -} - -#ifdef CONFIG_X86_32 -int default_apicid_to_node(int logical_apicid) -{ -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; -#else - return 0; -#endif -} -#endif - -/* - * Power management - */ -#ifdef CONFIG_PM - -static struct { - /* - * 'active' is true if the local APIC was enabled by us and - * not the BIOS; this signifies that we are also responsible - * for disabling it before entering apm/acpi suspend - */ - int active; - /* r/w apic fields */ - unsigned int apic_id; - unsigned int apic_taskpri; - unsigned int apic_ldr; - unsigned int apic_dfr; - unsigned int apic_spiv; - unsigned int apic_lvtt; - unsigned int apic_lvtpc; - unsigned int apic_lvt0; - unsigned int apic_lvt1; - unsigned int apic_lvterr; - unsigned int apic_tmict; - unsigned int apic_tdcr; - unsigned int apic_thmr; -} apic_pm_state; - -static int lapic_suspend(struct sys_device *dev, pm_message_t state) -{ - unsigned long flags; - int maxlvt; - - if (!apic_pm_state.active) - return 0; - - maxlvt = lapic_get_maxlvt(); - - apic_pm_state.apic_id = apic_read(APIC_ID); - apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); - apic_pm_state.apic_ldr = apic_read(APIC_LDR); - apic_pm_state.apic_dfr = apic_read(APIC_DFR); - apic_pm_state.apic_spiv = apic_read(APIC_SPIV); - apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); - if (maxlvt >= 4) - apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); - apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); - apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); - apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); - apic_pm_state.apic_tmict = apic_read(APIC_TMICT); - apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); -#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) - if (maxlvt >= 5) - apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); -#endif - - local_irq_save(flags); - disable_local_APIC(); - local_irq_restore(flags); - return 0; -} - -static int lapic_resume(struct sys_device *dev) -{ - unsigned int l, h; - unsigned long flags; - int maxlvt; - - if (!apic_pm_state.active) - return 0; - - maxlvt = lapic_get_maxlvt(); - - local_irq_save(flags); - -#ifdef CONFIG_X86_X2APIC - if (x2apic) - enable_x2apic(); - else -#endif - { - /* - * Make sure the APICBASE points to the right address - * - * FIXME! This will be wrong if we ever support suspend on - * SMP! We'll need to do this as part of the CPU restore! - */ - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; - wrmsr(MSR_IA32_APICBASE, l, h); - } - - apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); - apic_write(APIC_ID, apic_pm_state.apic_id); - apic_write(APIC_DFR, apic_pm_state.apic_dfr); - apic_write(APIC_LDR, apic_pm_state.apic_ldr); - apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri); - apic_write(APIC_SPIV, apic_pm_state.apic_spiv); - apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); - apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); -#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) - if (maxlvt >= 5) - apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); -#endif - if (maxlvt >= 4) - apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); - apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); - apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); - apic_write(APIC_TMICT, apic_pm_state.apic_tmict); - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - - local_irq_restore(flags); - - return 0; -} - -/* - * This device has no shutdown method - fully functioning local APICs - * are needed on every CPU up until machine_halt/restart/poweroff. - */ - -static struct sysdev_class lapic_sysclass = { - .name = "lapic", - .resume = lapic_resume, - .suspend = lapic_suspend, -}; - -static struct sys_device device_lapic = { - .id = 0, - .cls = &lapic_sysclass, -}; - -static void __cpuinit apic_pm_activate(void) -{ - apic_pm_state.active = 1; -} - -static int __init init_lapic_sysfs(void) -{ - int error; - - if (!cpu_has_apic) - return 0; - /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ - - error = sysdev_class_register(&lapic_sysclass); - if (!error) - error = sysdev_register(&device_lapic); - return error; -} -device_initcall(init_lapic_sysfs); - -#else /* CONFIG_PM */ - -static void apic_pm_activate(void) { } - -#endif /* CONFIG_PM */ - -#ifdef CONFIG_X86_64 -/* - * apic_is_clustered_box() -- Check if we can expect good TSC - * - * Thus far, the major user of this is IBM's Summit2 series: - * - * Clustered boxes may have unsynced TSC problems if they are - * multi-chassis. Use available data to take a good guess. - * If in doubt, go HPET. - */ -__cpuinit int apic_is_clustered_box(void) -{ - int i, clusters, zeros; - unsigned id; - u16 *bios_cpu_apicid; - DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); - - /* - * there is not this kind of box with AMD CPU yet. - * Some AMD box with quadcore cpu and 8 sockets apicid - * will be [4, 0x23] or [8, 0x27] could be thought to - * vsmp box still need checking... - */ - if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box()) - return 0; - - bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); - bitmap_zero(clustermap, NUM_APIC_CLUSTERS); - - for (i = 0; i < nr_cpu_ids; i++) { - /* are we being called early in kernel startup? */ - if (bios_cpu_apicid) { - id = bios_cpu_apicid[i]; - } else if (i < nr_cpu_ids) { - if (cpu_present(i)) - id = per_cpu(x86_bios_cpu_apicid, i); - else - continue; - } else - break; - - if (id != BAD_APICID) - __set_bit(APIC_CLUSTERID(id), clustermap); - } - - /* Problem: Partially populated chassis may not have CPUs in some of - * the APIC clusters they have been allocated. Only present CPUs have - * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap. - * Since clusters are allocated sequentially, count zeros only if - * they are bounded by ones. - */ - clusters = 0; - zeros = 0; - for (i = 0; i < NUM_APIC_CLUSTERS; i++) { - if (test_bit(i, clustermap)) { - clusters += 1 + zeros; - zeros = 0; - } else - ++zeros; - } - - /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are - * not guaranteed to be synced between boards - */ - if (is_vsmp_box() && clusters > 1) - return 1; - - /* - * If clusters > 2, then should be multi-chassis. - * May have to revisit this when multi-core + hyperthreaded CPUs come - * out, but AFAIK this will work even for them. - */ - return (clusters > 2); -} -#endif - -/* - * APIC command line parameters - */ -static int __init setup_disableapic(char *arg) -{ - disable_apic = 1; - setup_clear_cpu_cap(X86_FEATURE_APIC); - return 0; -} -early_param("disableapic", setup_disableapic); - -/* same as disableapic, for compatibility */ -static int __init setup_nolapic(char *arg) -{ - return setup_disableapic(arg); -} -early_param("nolapic", setup_nolapic); - -static int __init parse_lapic_timer_c2_ok(char *arg) -{ - local_apic_timer_c2_ok = 1; - return 0; -} -early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); - -static int __init parse_disable_apic_timer(char *arg) -{ - disable_apic_timer = 1; - return 0; -} -early_param("noapictimer", parse_disable_apic_timer); - -static int __init parse_nolapic_timer(char *arg) -{ - disable_apic_timer = 1; - return 0; -} -early_param("nolapic_timer", parse_nolapic_timer); - -static int __init apic_set_verbosity(char *arg) -{ - if (!arg) { -#ifdef CONFIG_X86_64 - skip_ioapic_setup = 0; - return 0; -#endif - return -EINVAL; - } - - if (strcmp("debug", arg) == 0) - apic_verbosity = APIC_DEBUG; - else if (strcmp("verbose", arg) == 0) - apic_verbosity = APIC_VERBOSE; - else { - pr_warning("APIC Verbosity level %s not recognised" - " use apic=verbose or apic=debug\n", arg); - return -EINVAL; - } - - return 0; -} -early_param("apic", apic_set_verbosity); - -static int __init lapic_insert_resource(void) -{ - if (!apic_phys) - return -1; - - /* Put local APIC into the resource map. */ - lapic_resource.start = apic_phys; - lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; - insert_resource(&iomem_resource, &lapic_resource); - - return 0; -} - -/* - * need call insert after e820_reserve_resources() - * that is using request_resource - */ -late_initcall(lapic_insert_resource); diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile new file mode 100644 index 00000000000..da20b70c400 --- /dev/null +++ b/arch/x86/kernel/apic/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for local APIC drivers and for the IO-APIC code +# + +obj-y := apic.o ipi.o nmi.o +obj-$(CONFIG_X86_IO_APIC) += io_apic.o +obj-$(CONFIG_SMP) += ipi.o + +ifeq ($(CONFIG_X86_64),y) +obj-y += apic_64.o apic_flat_64.o +obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o +obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o +obj-$(CONFIG_X86_UV) += x2apic_uv_x.o +endif + diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c new file mode 100644 index 00000000000..c12823eb55b --- /dev/null +++ b/arch/x86/kernel/apic/apic.c @@ -0,0 +1,2212 @@ +/* + * Local APIC handling, local APIC timers + * + * (c) 1999, 2000, 2009 Ingo Molnar + * + * Fixes + * Maciej W. Rozycki : Bits for genuine 82489DX APICs; + * thanks to Eric Gilmore + * and Rolf G. Tews + * for testing these extensively. + * Maciej W. Rozycki : Various updates and fixes. + * Mikael Pettersson : Power Management for UP-APIC. + * Pavel Machek and + * Mikael Pettersson : PM converted to driver model. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned int num_processors; + +unsigned disabled_cpus __cpuinitdata; + +/* Processor that is doing the boot up */ +unsigned int boot_cpu_physical_apicid = -1U; + +/* + * The highest APIC ID seen during enumeration. + * + * This determines the messaging protocol we can use: if all APIC IDs + * are in the 0 ... 7 range, then we can use logical addressing which + * has some performance advantages (better broadcasting). + * + * If there's an APIC ID above 8, we use physical addressing. + */ +unsigned int max_physical_apicid; + +/* + * Bitmask of physically existing CPUs: + */ +physid_mask_t phys_cpu_present_map; + +/* + * Map cpu index to physical APIC ID + */ +DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); +DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); +EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); +EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); + +#ifdef CONFIG_X86_32 +/* + * Knob to control our willingness to enable the local APIC. + * + * +1=force-enable + */ +static int force_enable_local_apic; +/* + * APIC command line parameters + */ +static int __init parse_lapic(char *arg) +{ + force_enable_local_apic = 1; + return 0; +} +early_param("lapic", parse_lapic); +/* Local APIC was disabled by the BIOS and enabled by the kernel */ +static int enabled_via_apicbase; + +#endif + +#ifdef CONFIG_X86_64 +static int apic_calibrate_pmtmr __initdata; +static __init int setup_apicpmtimer(char *s) +{ + apic_calibrate_pmtmr = 1; + notsc_setup(NULL); + return 0; +} +__setup("apicpmtimer", setup_apicpmtimer); +#endif + +#ifdef CONFIG_X86_X2APIC +int x2apic; +/* x2apic enabled before OS handover */ +static int x2apic_preenabled; +static int disable_x2apic; +static __init int setup_nox2apic(char *str) +{ + disable_x2apic = 1; + setup_clear_cpu_cap(X86_FEATURE_X2APIC); + return 0; +} +early_param("nox2apic", setup_nox2apic); +#endif + +unsigned long mp_lapic_addr; +int disable_apic; +/* Disable local APIC timer from the kernel commandline or via dmi quirk */ +static int disable_apic_timer __cpuinitdata; +/* Local APIC timer works in C2 */ +int local_apic_timer_c2_ok; +EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); + +int first_system_vector = 0xfe; + +/* + * Debug level, exported for io_apic.c + */ +unsigned int apic_verbosity; + +int pic_mode; + +/* Have we found an MP table */ +int smp_found_config; + +static struct resource lapic_resource = { + .name = "Local APIC", + .flags = IORESOURCE_MEM | IORESOURCE_BUSY, +}; + +static unsigned int calibration_result; + +static int lapic_next_event(unsigned long delta, + struct clock_event_device *evt); +static void lapic_timer_setup(enum clock_event_mode mode, + struct clock_event_device *evt); +static void lapic_timer_broadcast(const struct cpumask *mask); +static void apic_pm_activate(void); + +/* + * The local apic timer can be used for any function which is CPU local. + */ +static struct clock_event_device lapic_clockevent = { + .name = "lapic", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT + | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, + .shift = 32, + .set_mode = lapic_timer_setup, + .set_next_event = lapic_next_event, + .broadcast = lapic_timer_broadcast, + .rating = 100, + .irq = -1, +}; +static DEFINE_PER_CPU(struct clock_event_device, lapic_events); + +static unsigned long apic_phys; + +/* + * Get the LAPIC version + */ +static inline int lapic_get_version(void) +{ + return GET_APIC_VERSION(apic_read(APIC_LVR)); +} + +/* + * Check, if the APIC is integrated or a separate chip + */ +static inline int lapic_is_integrated(void) +{ +#ifdef CONFIG_X86_64 + return 1; +#else + return APIC_INTEGRATED(lapic_get_version()); +#endif +} + +/* + * Check, whether this is a modern or a first generation APIC + */ +static int modern_apic(void) +{ + /* AMD systems use old APIC versions, so check the CPU */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0xf) + return 1; + return lapic_get_version() >= 0x14; +} + +void native_apic_wait_icr_idle(void) +{ + while (apic_read(APIC_ICR) & APIC_ICR_BUSY) + cpu_relax(); +} + +u32 native_safe_apic_wait_icr_idle(void) +{ + u32 send_status; + int timeout; + + timeout = 0; + do { + send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; + if (!send_status) + break; + udelay(100); + } while (timeout++ < 1000); + + return send_status; +} + +void native_apic_icr_write(u32 low, u32 id) +{ + apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); + apic_write(APIC_ICR, low); +} + +u64 native_apic_icr_read(void) +{ + u32 icr1, icr2; + + icr2 = apic_read(APIC_ICR2); + icr1 = apic_read(APIC_ICR); + + return icr1 | ((u64)icr2 << 32); +} + +/** + * enable_NMI_through_LVT0 - enable NMI through local vector table 0 + */ +void __cpuinit enable_NMI_through_LVT0(void) +{ + unsigned int v; + + /* unmask and set to NMI */ + v = APIC_DM_NMI; + + /* Level triggered for 82489DX (32bit mode) */ + if (!lapic_is_integrated()) + v |= APIC_LVT_LEVEL_TRIGGER; + + apic_write(APIC_LVT0, v); +} + +#ifdef CONFIG_X86_32 +/** + * get_physical_broadcast - Get number of physical broadcast IDs + */ +int get_physical_broadcast(void) +{ + return modern_apic() ? 0xff : 0xf; +} +#endif + +/** + * lapic_get_maxlvt - get the maximum number of local vector table entries + */ +int lapic_get_maxlvt(void) +{ + unsigned int v; + + v = apic_read(APIC_LVR); + /* + * - we always have APIC integrated on 64bit mode + * - 82489DXs do not report # of LVT entries + */ + return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; +} + +/* + * Local APIC timer + */ + +/* Clock divisor */ +#define APIC_DIVISOR 16 + +/* + * This function sets up the local APIC timer, with a timeout of + * 'clocks' APIC bus clock. During calibration we actually call + * this function twice on the boot CPU, once with a bogus timeout + * value, second time for real. The other (noncalibrating) CPUs + * call this function only once, with the real, calibrated value. + * + * We do reads before writes even if unnecessary, to get around the + * P5 APIC double write bug. + */ +static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) +{ + unsigned int lvtt_value, tmp_value; + + lvtt_value = LOCAL_TIMER_VECTOR; + if (!oneshot) + lvtt_value |= APIC_LVT_TIMER_PERIODIC; + if (!lapic_is_integrated()) + lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); + + if (!irqen) + lvtt_value |= APIC_LVT_MASKED; + + apic_write(APIC_LVTT, lvtt_value); + + /* + * Divide PICLK by 16 + */ + tmp_value = apic_read(APIC_TDCR); + apic_write(APIC_TDCR, + (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | + APIC_TDR_DIV_16); + + if (!oneshot) + apic_write(APIC_TMICT, clocks / APIC_DIVISOR); +} + +/* + * Setup extended LVT, AMD specific (K8, family 10h) + * + * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and + * MCE interrupts are supported. Thus MCE offset must be set to 0. + * + * If mask=1, the LVT entry does not generate interrupts while mask=0 + * enables the vector. See also the BKDGs. + */ + +#define APIC_EILVT_LVTOFF_MCE 0 +#define APIC_EILVT_LVTOFF_IBS 1 + +static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) +{ + unsigned long reg = (lvt_off << 4) + APIC_EILVT0; + unsigned int v = (mask << 16) | (msg_type << 8) | vector; + + apic_write(reg, v); +} + +u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) +{ + setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); + return APIC_EILVT_LVTOFF_MCE; +} + +u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) +{ + setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); + return APIC_EILVT_LVTOFF_IBS; +} +EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); + +/* + * Program the next event, relative to now + */ +static int lapic_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + apic_write(APIC_TMICT, delta); + return 0; +} + +/* + * Setup the lapic timer in periodic or oneshot mode + */ +static void lapic_timer_setup(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + unsigned long flags; + unsigned int v; + + /* Lapic used as dummy for broadcast ? */ + if (evt->features & CLOCK_EVT_FEAT_DUMMY) + return; + + local_irq_save(flags); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_MODE_ONESHOT: + __setup_APIC_LVTT(calibration_result, + mode != CLOCK_EVT_MODE_PERIODIC, 1); + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + v = apic_read(APIC_LVTT); + v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); + apic_write(APIC_LVTT, v); + apic_write(APIC_TMICT, 0xffffffff); + break; + case CLOCK_EVT_MODE_RESUME: + /* Nothing to do here */ + break; + } + + local_irq_restore(flags); +} + +/* + * Local APIC timer broadcast function + */ +static void lapic_timer_broadcast(const struct cpumask *mask) +{ +#ifdef CONFIG_SMP + apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR); +#endif +} + +/* + * Setup the local APIC timer for this CPU. Copy the initilized values + * of the boot CPU and register the clock event in the framework. + */ +static void __cpuinit setup_APIC_timer(void) +{ + struct clock_event_device *levt = &__get_cpu_var(lapic_events); + + memcpy(levt, &lapic_clockevent, sizeof(*levt)); + levt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_register_device(levt); +} + +/* + * In this functions we calibrate APIC bus clocks to the external timer. + * + * We want to do the calibration only once since we want to have local timer + * irqs syncron. CPUs connected by the same APIC bus have the very same bus + * frequency. + * + * This was previously done by reading the PIT/HPET and waiting for a wrap + * around to find out, that a tick has elapsed. I have a box, where the PIT + * readout is broken, so it never gets out of the wait loop again. This was + * also reported by others. + * + * Monitoring the jiffies value is inaccurate and the clockevents + * infrastructure allows us to do a simple substitution of the interrupt + * handler. + * + * The calibration routine also uses the pm_timer when possible, as the PIT + * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes + * back to normal later in the boot process). + */ + +#define LAPIC_CAL_LOOPS (HZ/10) + +static __initdata int lapic_cal_loops = -1; +static __initdata long lapic_cal_t1, lapic_cal_t2; +static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2; +static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; +static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; + +/* + * Temporary interrupt handler. + */ +static void __init lapic_cal_handler(struct clock_event_device *dev) +{ + unsigned long long tsc = 0; + long tapic = apic_read(APIC_TMCCT); + unsigned long pm = acpi_pm_read_early(); + + if (cpu_has_tsc) + rdtscll(tsc); + + switch (lapic_cal_loops++) { + case 0: + lapic_cal_t1 = tapic; + lapic_cal_tsc1 = tsc; + lapic_cal_pm1 = pm; + lapic_cal_j1 = jiffies; + break; + + case LAPIC_CAL_LOOPS: + lapic_cal_t2 = tapic; + lapic_cal_tsc2 = tsc; + if (pm < lapic_cal_pm1) + pm += ACPI_PM_OVRRUN; + lapic_cal_pm2 = pm; + lapic_cal_j2 = jiffies; + break; + } +} + +static int __init +calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) +{ + const long pm_100ms = PMTMR_TICKS_PER_SEC / 10; + const long pm_thresh = pm_100ms / 100; + unsigned long mult; + u64 res; + +#ifndef CONFIG_X86_PM_TIMER + return -1; +#endif + + apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm); + + /* Check, if the PM timer is available */ + if (!deltapm) + return -1; + + mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22); + + if (deltapm > (pm_100ms - pm_thresh) && + deltapm < (pm_100ms + pm_thresh)) { + apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n"); + return 0; + } + + res = (((u64)deltapm) * mult) >> 22; + do_div(res, 1000000); + pr_warning("APIC calibration not consistent " + "with PM-Timer: %ldms instead of 100ms\n",(long)res); + + /* Correct the lapic counter value */ + res = (((u64)(*delta)) * pm_100ms); + do_div(res, deltapm); + pr_info("APIC delta adjusted to PM-Timer: " + "%lu (%ld)\n", (unsigned long)res, *delta); + *delta = (long)res; + + /* Correct the tsc counter value */ + if (cpu_has_tsc) { + res = (((u64)(*deltatsc)) * pm_100ms); + do_div(res, deltapm); + apic_printk(APIC_VERBOSE, "TSC delta adjusted to " + "PM-Timer: %lu (%ld) \n", + (unsigned long)res, *deltatsc); + *deltatsc = (long)res; + } + + return 0; +} + +static int __init calibrate_APIC_clock(void) +{ + struct clock_event_device *levt = &__get_cpu_var(lapic_events); + void (*real_handler)(struct clock_event_device *dev); + unsigned long deltaj; + long delta, deltatsc; + int pm_referenced = 0; + + local_irq_disable(); + + /* Replace the global interrupt handler */ + real_handler = global_clock_event->event_handler; + global_clock_event->event_handler = lapic_cal_handler; + + /* + * Setup the APIC counter to maximum. There is no way the lapic + * can underflow in the 100ms detection time frame + */ + __setup_APIC_LVTT(0xffffffff, 0, 0); + + /* Let the interrupts run */ + local_irq_enable(); + + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) + cpu_relax(); + + local_irq_disable(); + + /* Restore the real event handler */ + global_clock_event->event_handler = real_handler; + + /* Build delta t1-t2 as apic timer counts down */ + delta = lapic_cal_t1 - lapic_cal_t2; + apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); + + deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1); + + /* we trust the PM based calibration if possible */ + pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, + &delta, &deltatsc); + + /* Calculate the scaled math multiplication factor */ + lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, + lapic_clockevent.shift); + lapic_clockevent.max_delta_ns = + clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); + lapic_clockevent.min_delta_ns = + clockevent_delta2ns(0xF, &lapic_clockevent); + + calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; + + apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); + apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult); + apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", + calibration_result); + + if (cpu_has_tsc) { + apic_printk(APIC_VERBOSE, "..... CPU clock speed is " + "%ld.%04ld MHz.\n", + (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ), + (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ)); + } + + apic_printk(APIC_VERBOSE, "..... host bus clock speed is " + "%u.%04u MHz.\n", + calibration_result / (1000000 / HZ), + calibration_result % (1000000 / HZ)); + + /* + * Do a sanity check on the APIC calibration result + */ + if (calibration_result < (1000000 / HZ)) { + local_irq_enable(); + pr_warning("APIC frequency too slow, disabling apic timer\n"); + return -1; + } + + levt->features &= ~CLOCK_EVT_FEAT_DUMMY; + + /* + * PM timer calibration failed or not turned on + * so lets try APIC timer based calibration + */ + if (!pm_referenced) { + apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); + + /* + * Setup the apic timer manually + */ + levt->event_handler = lapic_cal_handler; + lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt); + lapic_cal_loops = -1; + + /* Let the interrupts run */ + local_irq_enable(); + + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) + cpu_relax(); + + /* Stop the lapic timer */ + lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt); + + /* Jiffies delta */ + deltaj = lapic_cal_j2 - lapic_cal_j1; + apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj); + + /* Check, if the jiffies result is consistent */ + if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2) + apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); + else + levt->features |= CLOCK_EVT_FEAT_DUMMY; + } else + local_irq_enable(); + + if (levt->features & CLOCK_EVT_FEAT_DUMMY) { + pr_warning("APIC timer disabled due to verification failure\n"); + return -1; + } + + return 0; +} + +/* + * Setup the boot APIC + * + * Calibrate and verify the result. + */ +void __init setup_boot_APIC_clock(void) +{ + /* + * The local apic timer can be disabled via the kernel + * commandline or from the CPU detection code. Register the lapic + * timer as a dummy clock event source on SMP systems, so the + * broadcast mechanism is used. On UP systems simply ignore it. + */ + if (disable_apic_timer) { + pr_info("Disabling APIC timer\n"); + /* No broadcast on UP ! */ + if (num_possible_cpus() > 1) { + lapic_clockevent.mult = 1; + setup_APIC_timer(); + } + return; + } + + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" + "calibrating APIC timer ...\n"); + + if (calibrate_APIC_clock()) { + /* No broadcast on UP ! */ + if (num_possible_cpus() > 1) + setup_APIC_timer(); + return; + } + + /* + * If nmi_watchdog is set to IO_APIC, we need the + * PIT/HPET going. Otherwise register lapic as a dummy + * device. + */ + if (nmi_watchdog != NMI_IO_APIC) + lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; + else + pr_warning("APIC timer registered as dummy," + " due to nmi_watchdog=%d!\n", nmi_watchdog); + + /* Setup the lapic or request the broadcast */ + setup_APIC_timer(); +} + +void __cpuinit setup_secondary_APIC_clock(void) +{ + setup_APIC_timer(); +} + +/* + * The guts of the apic timer interrupt + */ +static void local_apic_timer_interrupt(void) +{ + int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(lapic_events, cpu); + + /* + * Normally we should not be here till LAPIC has been initialized but + * in some cases like kdump, its possible that there is a pending LAPIC + * timer interrupt from previous kernel's context and is delivered in + * new kernel the moment interrupts are enabled. + * + * Interrupts are enabled early and LAPIC is setup much later, hence + * its possible that when we get here evt->event_handler is NULL. + * Check for event_handler being NULL and discard the interrupt as + * spurious. + */ + if (!evt->event_handler) { + pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu); + /* Switch it off */ + lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); + return; + } + + /* + * the NMI deadlock-detector uses this. + */ + inc_irq_stat(apic_timer_irqs); + + evt->event_handler(evt); +} + +/* + * Local APIC timer interrupt. This is the most natural way for doing + * local interrupts, but local timer interrupts can be emulated by + * broadcast interrupts too. [in case the hw doesn't support APIC timers] + * + * [ if a single-CPU system runs an SMP kernel then we call the local + * interrupt as well. Thus we cannot inline the local irq ... ] + */ +void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + /* + * NOTE! We'd better ACK the irq immediately, + * because timer handling can be slow. + */ + ack_APIC_irq(); + /* + * update_process_times() expects us to have done irq_enter(). + * Besides, if we don't timer interrupts ignore the global + * interrupt lock, which is the WrongThing (tm) to do. + */ + exit_idle(); + irq_enter(); + local_apic_timer_interrupt(); + irq_exit(); + + set_irq_regs(old_regs); +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + +/* + * Local APIC start and shutdown + */ + +/** + * clear_local_APIC - shutdown the local APIC + * + * This is called, when a CPU is disabled and before rebooting, so the state of + * the local APIC has no dangling leftovers. Also used to cleanout any BIOS + * leftovers during boot. + */ +void clear_local_APIC(void) +{ + int maxlvt; + u32 v; + + /* APIC hasn't been mapped yet */ + if (!apic_phys) + return; + + maxlvt = lapic_get_maxlvt(); + /* + * Masking an LVT entry can trigger a local APIC error + * if the vector is zero. Mask LVTERR first to prevent this. + */ + if (maxlvt >= 3) { + v = ERROR_APIC_VECTOR; /* any non-zero vector will do */ + apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); + } + /* + * Careful: we have to set masks only first to deassert + * any level-triggered sources. + */ + v = apic_read(APIC_LVTT); + apic_write(APIC_LVTT, v | APIC_LVT_MASKED); + v = apic_read(APIC_LVT0); + apic_write(APIC_LVT0, v | APIC_LVT_MASKED); + v = apic_read(APIC_LVT1); + apic_write(APIC_LVT1, v | APIC_LVT_MASKED); + if (maxlvt >= 4) { + v = apic_read(APIC_LVTPC); + apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); + } + + /* lets not touch this if we didn't frob it */ +#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL) + if (maxlvt >= 5) { + v = apic_read(APIC_LVTTHMR); + apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); + } +#endif + /* + * Clean APIC state for other OSs: + */ + apic_write(APIC_LVTT, APIC_LVT_MASKED); + apic_write(APIC_LVT0, APIC_LVT_MASKED); + apic_write(APIC_LVT1, APIC_LVT_MASKED); + if (maxlvt >= 3) + apic_write(APIC_LVTERR, APIC_LVT_MASKED); + if (maxlvt >= 4) + apic_write(APIC_LVTPC, APIC_LVT_MASKED); + + /* Integrated APIC (!82489DX) ? */ + if (lapic_is_integrated()) { + if (maxlvt > 3) + /* Clear ESR due to Pentium errata 3AP and 11AP */ + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + } +} + +/** + * disable_local_APIC - clear and disable the local APIC + */ +void disable_local_APIC(void) +{ + unsigned int value; + + /* APIC hasn't been mapped yet */ + if (!apic_phys) + return; + + clear_local_APIC(); + + /* + * Disable APIC (implies clearing of registers + * for 82489DX!). + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_SPIV_APIC_ENABLED; + apic_write(APIC_SPIV, value); + +#ifdef CONFIG_X86_32 + /* + * When LAPIC was disabled by the BIOS and enabled by the kernel, + * restore the disabled state. + */ + if (enabled_via_apicbase) { + unsigned int l, h; + + rdmsr(MSR_IA32_APICBASE, l, h); + l &= ~MSR_IA32_APICBASE_ENABLE; + wrmsr(MSR_IA32_APICBASE, l, h); + } +#endif +} + +/* + * If Linux enabled the LAPIC against the BIOS default disable it down before + * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and + * not power-off. Additionally clear all LVT entries before disable_local_APIC + * for the case where Linux didn't enable the LAPIC. + */ +void lapic_shutdown(void) +{ + unsigned long flags; + + if (!cpu_has_apic) + return; + + local_irq_save(flags); + +#ifdef CONFIG_X86_32 + if (!enabled_via_apicbase) + clear_local_APIC(); + else +#endif + disable_local_APIC(); + + + local_irq_restore(flags); +} + +/* + * This is to verify that we're looking at a real local APIC. + * Check these against your board if the CPUs aren't getting + * started for no apparent reason. + */ +int __init verify_local_APIC(void) +{ + unsigned int reg0, reg1; + + /* + * The version register is read-only in a real APIC. + */ + reg0 = apic_read(APIC_LVR); + apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0); + apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK); + reg1 = apic_read(APIC_LVR); + apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1); + + /* + * The two version reads above should print the same + * numbers. If the second one is different, then we + * poke at a non-APIC. + */ + if (reg1 != reg0) + return 0; + + /* + * Check if the version looks reasonably. + */ + reg1 = GET_APIC_VERSION(reg0); + if (reg1 == 0x00 || reg1 == 0xff) + return 0; + reg1 = lapic_get_maxlvt(); + if (reg1 < 0x02 || reg1 == 0xff) + return 0; + + /* + * The ID register is read/write in a real APIC. + */ + reg0 = apic_read(APIC_ID); + apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); + apic_write(APIC_ID, reg0 ^ apic->apic_id_mask); + reg1 = apic_read(APIC_ID); + apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); + apic_write(APIC_ID, reg0); + if (reg1 != (reg0 ^ apic->apic_id_mask)) + return 0; + + /* + * The next two are just to see if we have sane values. + * They're only really relevant if we're in Virtual Wire + * compatibility mode, but most boxes are anymore. + */ + reg0 = apic_read(APIC_LVT0); + apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0); + reg1 = apic_read(APIC_LVT1); + apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1); + + return 1; +} + +/** + * sync_Arb_IDs - synchronize APIC bus arbitration IDs + */ +void __init sync_Arb_IDs(void) +{ + /* + * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not + * needed on AMD. + */ + if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return; + + /* + * Wait for idle. + */ + apic_wait_icr_idle(); + + apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); + apic_write(APIC_ICR, APIC_DEST_ALLINC | + APIC_INT_LEVELTRIG | APIC_DM_INIT); +} + +/* + * An initial setup of the virtual wire mode. + */ +void __init init_bsp_APIC(void) +{ + unsigned int value; + + /* + * Don't do the setup now if we have a SMP BIOS as the + * through-I/O-APIC virtual wire mode might be active. + */ + if (smp_found_config || !cpu_has_apic) + return; + + /* + * Do not trust the local APIC being empty at bootup. + */ + clear_local_APIC(); + + /* + * Enable APIC. + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_VECTOR_MASK; + value |= APIC_SPIV_APIC_ENABLED; + +#ifdef CONFIG_X86_32 + /* This bit is reserved on P4/Xeon and should be cleared */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + (boot_cpu_data.x86 == 15)) + value &= ~APIC_SPIV_FOCUS_DISABLED; + else +#endif + value |= APIC_SPIV_FOCUS_DISABLED; + value |= SPURIOUS_APIC_VECTOR; + apic_write(APIC_SPIV, value); + + /* + * Set up the virtual wire mode. + */ + apic_write(APIC_LVT0, APIC_DM_EXTINT); + value = APIC_DM_NMI; + if (!lapic_is_integrated()) /* 82489DX */ + value |= APIC_LVT_LEVEL_TRIGGER; + apic_write(APIC_LVT1, value); +} + +static void __cpuinit lapic_setup_esr(void) +{ + unsigned int oldvalue, value, maxlvt; + + if (!lapic_is_integrated()) { + pr_info("No ESR for 82489DX.\n"); + return; + } + + if (apic->disable_esr) { + /* + * Something untraceable is creating bad interrupts on + * secondary quads ... for the moment, just leave the + * ESR disabled - we can't do anything useful with the + * errors anyway - mbligh + */ + pr_info("Leaving ESR disabled.\n"); + return; + } + + maxlvt = lapic_get_maxlvt(); + if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ + apic_write(APIC_ESR, 0); + oldvalue = apic_read(APIC_ESR); + + /* enables sending errors */ + value = ERROR_APIC_VECTOR; + apic_write(APIC_LVTERR, value); + + /* + * spec says clear errors after enabling vector. + */ + if (maxlvt > 3) + apic_write(APIC_ESR, 0); + value = apic_read(APIC_ESR); + if (value != oldvalue) + apic_printk(APIC_VERBOSE, "ESR value before enabling " + "vector: 0x%08x after: 0x%08x\n", + oldvalue, value); +} + + +/** + * setup_local_APIC - setup the local APIC + */ +void __cpuinit setup_local_APIC(void) +{ + unsigned int value; + int i, j; + + if (disable_apic) { + arch_disable_smp_support(); + return; + } + +#ifdef CONFIG_X86_32 + /* Pound the ESR really hard over the head with a big hammer - mbligh */ + if (lapic_is_integrated() && apic->disable_esr) { + apic_write(APIC_ESR, 0); + apic_write(APIC_ESR, 0); + apic_write(APIC_ESR, 0); + apic_write(APIC_ESR, 0); + } +#endif + + preempt_disable(); + + /* + * Double-check whether this APIC is really registered. + * This is meaningless in clustered apic mode, so we skip it. + */ + if (!apic->apic_id_registered()) + BUG(); + + /* + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ + apic->init_apic_ldr(); + + /* + * Set Task Priority to 'accept all'. We never change this + * later on. + */ + value = apic_read(APIC_TASKPRI); + value &= ~APIC_TPRI_MASK; + apic_write(APIC_TASKPRI, value); + + /* + * After a crash, we no longer service the interrupts and a pending + * interrupt from previous kernel might still have ISR bit set. + * + * Most probably by now CPU has serviced that pending interrupt and + * it might not have done the ack_APIC_irq() because it thought, + * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it + * does not clear the ISR bit and cpu thinks it has already serivced + * the interrupt. Hence a vector might get locked. It was noticed + * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. + */ + for (i = APIC_ISR_NR - 1; i >= 0; i--) { + value = apic_read(APIC_ISR + i*0x10); + for (j = 31; j >= 0; j--) { + if (value & (1< 1) || + (boot_cpu_data.x86 >= 15)) + break; + goto no_apic; + case X86_VENDOR_INTEL: + if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 || + (boot_cpu_data.x86 == 5 && cpu_has_apic)) + break; + goto no_apic; + default: + goto no_apic; + } + + if (!cpu_has_apic) { + /* + * Over-ride BIOS and try to enable the local APIC only if + * "lapic" specified. + */ + if (!force_enable_local_apic) { + pr_info("Local APIC disabled by BIOS -- " + "you can enable it with \"lapic\"\n"); + return -1; + } + /* + * Some BIOSes disable the local APIC in the APIC_BASE + * MSR. This can only be done in software for Intel P6 or later + * and AMD K7 (Model > 1) or later. + */ + rdmsr(MSR_IA32_APICBASE, l, h); + if (!(l & MSR_IA32_APICBASE_ENABLE)) { + pr_info("Local APIC disabled by BIOS -- reenabling.\n"); + l &= ~MSR_IA32_APICBASE_BASE; + l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; + wrmsr(MSR_IA32_APICBASE, l, h); + enabled_via_apicbase = 1; + } + } + /* + * The APIC feature bit should now be enabled + * in `cpuid' + */ + features = cpuid_edx(1); + if (!(features & (1 << X86_FEATURE_APIC))) { + pr_warning("Could not enable APIC!\n"); + return -1; + } + set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + + /* The BIOS may have set up the APIC at some other address */ + rdmsr(MSR_IA32_APICBASE, l, h); + if (l & MSR_IA32_APICBASE_ENABLE) + mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + + pr_info("Found and enabled local APIC!\n"); + + apic_pm_activate(); + + return 0; + +no_apic: + pr_info("No local APIC present or hardware disabled\n"); + return -1; +} +#endif + +#ifdef CONFIG_X86_64 +void __init early_init_lapic_mapping(void) +{ + unsigned long phys_addr; + + /* + * If no local APIC can be found then go out + * : it means there is no mpatable and MADT + */ + if (!smp_found_config) + return; + + phys_addr = mp_lapic_addr; + + set_fixmap_nocache(FIX_APIC_BASE, phys_addr); + apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", + APIC_BASE, phys_addr); + + /* + * Fetch the APIC ID of the BSP in case we have a + * default configuration (or the MP table is broken). + */ + boot_cpu_physical_apicid = read_apic_id(); +} +#endif + +/** + * init_apic_mappings - initialize APIC mappings + */ +void __init init_apic_mappings(void) +{ +#ifdef CONFIG_X86_X2APIC + if (x2apic) { + boot_cpu_physical_apicid = read_apic_id(); + return; + } +#endif + + /* + * If no local APIC can be found then set up a fake all + * zeroes page to simulate the local APIC and another + * one for the IO-APIC. + */ + if (!smp_found_config && detect_init_APIC()) { + apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); + apic_phys = __pa(apic_phys); + } else + apic_phys = mp_lapic_addr; + + set_fixmap_nocache(FIX_APIC_BASE, apic_phys); + apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", + APIC_BASE, apic_phys); + + /* + * Fetch the APIC ID of the BSP in case we have a + * default configuration (or the MP table is broken). + */ + if (boot_cpu_physical_apicid == -1U) + boot_cpu_physical_apicid = read_apic_id(); +} + +/* + * This initializes the IO-APIC and APIC hardware if this is + * a UP kernel. + */ +int apic_version[MAX_APICS]; + +int __init APIC_init_uniprocessor(void) +{ + if (disable_apic) { + pr_info("Apic disabled\n"); + return -1; + } +#ifdef CONFIG_X86_64 + if (!cpu_has_apic) { + disable_apic = 1; + pr_info("Apic disabled by BIOS\n"); + return -1; + } +#else + if (!smp_found_config && !cpu_has_apic) + return -1; + + /* + * Complain if the BIOS pretends there is one. + */ + if (!cpu_has_apic && + APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { + pr_err("BIOS bug, local APIC 0x%x not detected!...\n", + boot_cpu_physical_apicid); + clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); + return -1; + } +#endif + + enable_IR_x2apic(); +#ifdef CONFIG_X86_64 + default_setup_apic_routing(); +#endif + + verify_local_APIC(); + connect_bsp_APIC(); + +#ifdef CONFIG_X86_64 + apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid)); +#else + /* + * Hack: In case of kdump, after a crash, kernel might be booting + * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid + * might be zero if read from MP tables. Get it from LAPIC. + */ +# ifdef CONFIG_CRASH_DUMP + boot_cpu_physical_apicid = read_apic_id(); +# endif +#endif + physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); + setup_local_APIC(); + +#ifdef CONFIG_X86_IO_APIC + /* + * Now enable IO-APICs, actually call clear_IO_APIC + * We need clear_IO_APIC before enabling error vector + */ + if (!skip_ioapic_setup && nr_ioapics) + enable_IO_APIC(); +#endif + + end_local_APIC_setup(); + +#ifdef CONFIG_X86_IO_APIC + if (smp_found_config && !skip_ioapic_setup && nr_ioapics) + setup_IO_APIC(); + else { + nr_ioapics = 0; + localise_nmi_watchdog(); + } +#else + localise_nmi_watchdog(); +#endif + + setup_boot_clock(); +#ifdef CONFIG_X86_64 + check_nmi_watchdog(); +#endif + + return 0; +} + +/* + * Local APIC interrupts + */ + +/* + * This interrupt should _never_ happen with our APIC/SMP architecture + */ +void smp_spurious_interrupt(struct pt_regs *regs) +{ + u32 v; + + exit_idle(); + irq_enter(); + /* + * Check if this really is a spurious interrupt and ACK it + * if it is a vectored one. Just in case... + * Spurious interrupts should not be ACKed. + */ + v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); + if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) + ack_APIC_irq(); + + inc_irq_stat(irq_spurious_count); + + /* see sw-dev-man vol 3, chapter 7.4.13.5 */ + pr_info("spurious APIC interrupt on CPU#%d, " + "should never happen.\n", smp_processor_id()); + irq_exit(); +} + +/* + * This interrupt should never happen with our APIC/SMP architecture + */ +void smp_error_interrupt(struct pt_regs *regs) +{ + u32 v, v1; + + exit_idle(); + irq_enter(); + /* First tickle the hardware, only then report what went on. -- REW */ + v = apic_read(APIC_ESR); + apic_write(APIC_ESR, 0); + v1 = apic_read(APIC_ESR); + ack_APIC_irq(); + atomic_inc(&irq_err_count); + + /* + * Here is what the APIC error bits mean: + * 0: Send CS error + * 1: Receive CS error + * 2: Send accept error + * 3: Receive accept error + * 4: Reserved + * 5: Send illegal vector + * 6: Received illegal vector + * 7: Illegal register address + */ + pr_debug("APIC error on CPU%d: %02x(%02x)\n", + smp_processor_id(), v , v1); + irq_exit(); +} + +/** + * connect_bsp_APIC - attach the APIC to the interrupt system + */ +void __init connect_bsp_APIC(void) +{ +#ifdef CONFIG_X86_32 + if (pic_mode) { + /* + * Do not trust the local APIC being empty at bootup. + */ + clear_local_APIC(); + /* + * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's + * local APIC to INT and NMI lines. + */ + apic_printk(APIC_VERBOSE, "leaving PIC mode, " + "enabling APIC mode.\n"); + outb(0x70, 0x22); + outb(0x01, 0x23); + } +#endif + if (apic->enable_apic_mode) + apic->enable_apic_mode(); +} + +/** + * disconnect_bsp_APIC - detach the APIC from the interrupt system + * @virt_wire_setup: indicates, whether virtual wire mode is selected + * + * Virtual wire mode is necessary to deliver legacy interrupts even when the + * APIC is disabled. + */ +void disconnect_bsp_APIC(int virt_wire_setup) +{ + unsigned int value; + +#ifdef CONFIG_X86_32 + if (pic_mode) { + /* + * Put the board back into PIC mode (has an effect only on + * certain older boards). Note that APIC interrupts, including + * IPIs, won't work beyond this point! The only exception are + * INIT IPIs. + */ + apic_printk(APIC_VERBOSE, "disabling APIC mode, " + "entering PIC mode.\n"); + outb(0x70, 0x22); + outb(0x00, 0x23); + return; + } +#endif + + /* Go back to Virtual Wire compatibility mode */ + + /* For the spurious interrupt use vector F, and enable it */ + value = apic_read(APIC_SPIV); + value &= ~APIC_VECTOR_MASK; + value |= APIC_SPIV_APIC_ENABLED; + value |= 0xf; + apic_write(APIC_SPIV, value); + + if (!virt_wire_setup) { + /* + * For LVT0 make it edge triggered, active high, + * external and enabled + */ + value = apic_read(APIC_LVT0); + value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | + APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | + APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); + value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; + value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); + apic_write(APIC_LVT0, value); + } else { + /* Disable LVT0 */ + apic_write(APIC_LVT0, APIC_LVT_MASKED); + } + + /* + * For LVT1 make it edge triggered, active high, + * nmi and enabled + */ + value = apic_read(APIC_LVT1); + value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | + APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | + APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); + value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; + value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); + apic_write(APIC_LVT1, value); +} + +void __cpuinit generic_processor_info(int apicid, int version) +{ + int cpu; + + /* + * Validate version + */ + if (version == 0x0) { + pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " + "fixing up to 0x10. (tell your hw vendor)\n", + version); + version = 0x10; + } + apic_version[apicid] = version; + + if (num_processors >= nr_cpu_ids) { + int max = nr_cpu_ids; + int thiscpu = max + disabled_cpus; + + pr_warning( + "ACPI: NR_CPUS/possible_cpus limit of %i reached." + " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); + + disabled_cpus++; + return; + } + + num_processors++; + cpu = cpumask_next_zero(-1, cpu_present_mask); + + if (version != apic_version[boot_cpu_physical_apicid]) + WARN_ONCE(1, + "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", + apic_version[boot_cpu_physical_apicid], cpu, version); + + physid_set(apicid, phys_cpu_present_map); + if (apicid == boot_cpu_physical_apicid) { + /* + * x86_bios_cpu_apicid is required to have processors listed + * in same order as logical cpu numbers. Hence the first + * entry is BSP, and so on. + */ + cpu = 0; + } + if (apicid > max_physical_apicid) + max_physical_apicid = apicid; + +#ifdef CONFIG_X86_32 + /* + * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y + * but we need to work other dependencies like SMP_SUSPEND etc + * before this can be done without some confusion. + * if (CPU_HOTPLUG_ENABLED || num_processors > 8) + * - Ashok Raj + */ + if (max_physical_apicid >= 8) { + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + if (!APIC_XAPIC(version)) { + def_to_bigsmp = 0; + break; + } + /* If P4 and above fall through */ + case X86_VENDOR_AMD: + def_to_bigsmp = 1; + } + } +#endif + +#if defined(CONFIG_SMP) || defined(CONFIG_X86_64) + early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; + early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; +#endif + + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); +} + +int hard_smp_processor_id(void) +{ + return read_apic_id(); +} + +void default_init_apic_ldr(void) +{ + unsigned long val; + + apic_write(APIC_DFR, APIC_DFR_VALUE); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); + apic_write(APIC_LDR, val); +} + +#ifdef CONFIG_X86_32 +int default_apicid_to_node(int logical_apicid) +{ +#ifdef CONFIG_SMP + return apicid_2_node[hard_smp_processor_id()]; +#else + return 0; +#endif +} +#endif + +/* + * Power management + */ +#ifdef CONFIG_PM + +static struct { + /* + * 'active' is true if the local APIC was enabled by us and + * not the BIOS; this signifies that we are also responsible + * for disabling it before entering apm/acpi suspend + */ + int active; + /* r/w apic fields */ + unsigned int apic_id; + unsigned int apic_taskpri; + unsigned int apic_ldr; + unsigned int apic_dfr; + unsigned int apic_spiv; + unsigned int apic_lvtt; + unsigned int apic_lvtpc; + unsigned int apic_lvt0; + unsigned int apic_lvt1; + unsigned int apic_lvterr; + unsigned int apic_tmict; + unsigned int apic_tdcr; + unsigned int apic_thmr; +} apic_pm_state; + +static int lapic_suspend(struct sys_device *dev, pm_message_t state) +{ + unsigned long flags; + int maxlvt; + + if (!apic_pm_state.active) + return 0; + + maxlvt = lapic_get_maxlvt(); + + apic_pm_state.apic_id = apic_read(APIC_ID); + apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); + apic_pm_state.apic_ldr = apic_read(APIC_LDR); + apic_pm_state.apic_dfr = apic_read(APIC_DFR); + apic_pm_state.apic_spiv = apic_read(APIC_SPIV); + apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); + if (maxlvt >= 4) + apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); + apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); + apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); + apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); + apic_pm_state.apic_tmict = apic_read(APIC_TMICT); + apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); +#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) + if (maxlvt >= 5) + apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); +#endif + + local_irq_save(flags); + disable_local_APIC(); + local_irq_restore(flags); + return 0; +} + +static int lapic_resume(struct sys_device *dev) +{ + unsigned int l, h; + unsigned long flags; + int maxlvt; + + if (!apic_pm_state.active) + return 0; + + maxlvt = lapic_get_maxlvt(); + + local_irq_save(flags); + +#ifdef CONFIG_X86_X2APIC + if (x2apic) + enable_x2apic(); + else +#endif + { + /* + * Make sure the APICBASE points to the right address + * + * FIXME! This will be wrong if we ever support suspend on + * SMP! We'll need to do this as part of the CPU restore! + */ + rdmsr(MSR_IA32_APICBASE, l, h); + l &= ~MSR_IA32_APICBASE_BASE; + l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; + wrmsr(MSR_IA32_APICBASE, l, h); + } + + apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); + apic_write(APIC_ID, apic_pm_state.apic_id); + apic_write(APIC_DFR, apic_pm_state.apic_dfr); + apic_write(APIC_LDR, apic_pm_state.apic_ldr); + apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri); + apic_write(APIC_SPIV, apic_pm_state.apic_spiv); + apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); + apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); +#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) + if (maxlvt >= 5) + apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); +#endif + if (maxlvt >= 4) + apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); + apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); + apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); + apic_write(APIC_TMICT, apic_pm_state.apic_tmict); + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + + local_irq_restore(flags); + + return 0; +} + +/* + * This device has no shutdown method - fully functioning local APICs + * are needed on every CPU up until machine_halt/restart/poweroff. + */ + +static struct sysdev_class lapic_sysclass = { + .name = "lapic", + .resume = lapic_resume, + .suspend = lapic_suspend, +}; + +static struct sys_device device_lapic = { + .id = 0, + .cls = &lapic_sysclass, +}; + +static void __cpuinit apic_pm_activate(void) +{ + apic_pm_state.active = 1; +} + +static int __init init_lapic_sysfs(void) +{ + int error; + + if (!cpu_has_apic) + return 0; + /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ + + error = sysdev_class_register(&lapic_sysclass); + if (!error) + error = sysdev_register(&device_lapic); + return error; +} +device_initcall(init_lapic_sysfs); + +#else /* CONFIG_PM */ + +static void apic_pm_activate(void) { } + +#endif /* CONFIG_PM */ + +#ifdef CONFIG_X86_64 +/* + * apic_is_clustered_box() -- Check if we can expect good TSC + * + * Thus far, the major user of this is IBM's Summit2 series: + * + * Clustered boxes may have unsynced TSC problems if they are + * multi-chassis. Use available data to take a good guess. + * If in doubt, go HPET. + */ +__cpuinit int apic_is_clustered_box(void) +{ + int i, clusters, zeros; + unsigned id; + u16 *bios_cpu_apicid; + DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); + + /* + * there is not this kind of box with AMD CPU yet. + * Some AMD box with quadcore cpu and 8 sockets apicid + * will be [4, 0x23] or [8, 0x27] could be thought to + * vsmp box still need checking... + */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box()) + return 0; + + bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); + bitmap_zero(clustermap, NUM_APIC_CLUSTERS); + + for (i = 0; i < nr_cpu_ids; i++) { + /* are we being called early in kernel startup? */ + if (bios_cpu_apicid) { + id = bios_cpu_apicid[i]; + } else if (i < nr_cpu_ids) { + if (cpu_present(i)) + id = per_cpu(x86_bios_cpu_apicid, i); + else + continue; + } else + break; + + if (id != BAD_APICID) + __set_bit(APIC_CLUSTERID(id), clustermap); + } + + /* Problem: Partially populated chassis may not have CPUs in some of + * the APIC clusters they have been allocated. Only present CPUs have + * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap. + * Since clusters are allocated sequentially, count zeros only if + * they are bounded by ones. + */ + clusters = 0; + zeros = 0; + for (i = 0; i < NUM_APIC_CLUSTERS; i++) { + if (test_bit(i, clustermap)) { + clusters += 1 + zeros; + zeros = 0; + } else + ++zeros; + } + + /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are + * not guaranteed to be synced between boards + */ + if (is_vsmp_box() && clusters > 1) + return 1; + + /* + * If clusters > 2, then should be multi-chassis. + * May have to revisit this when multi-core + hyperthreaded CPUs come + * out, but AFAIK this will work even for them. + */ + return (clusters > 2); +} +#endif + +/* + * APIC command line parameters + */ +static int __init setup_disableapic(char *arg) +{ + disable_apic = 1; + setup_clear_cpu_cap(X86_FEATURE_APIC); + return 0; +} +early_param("disableapic", setup_disableapic); + +/* same as disableapic, for compatibility */ +static int __init setup_nolapic(char *arg) +{ + return setup_disableapic(arg); +} +early_param("nolapic", setup_nolapic); + +static int __init parse_lapic_timer_c2_ok(char *arg) +{ + local_apic_timer_c2_ok = 1; + return 0; +} +early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); + +static int __init parse_disable_apic_timer(char *arg) +{ + disable_apic_timer = 1; + return 0; +} +early_param("noapictimer", parse_disable_apic_timer); + +static int __init parse_nolapic_timer(char *arg) +{ + disable_apic_timer = 1; + return 0; +} +early_param("nolapic_timer", parse_nolapic_timer); + +static int __init apic_set_verbosity(char *arg) +{ + if (!arg) { +#ifdef CONFIG_X86_64 + skip_ioapic_setup = 0; + return 0; +#endif + return -EINVAL; + } + + if (strcmp("debug", arg) == 0) + apic_verbosity = APIC_DEBUG; + else if (strcmp("verbose", arg) == 0) + apic_verbosity = APIC_VERBOSE; + else { + pr_warning("APIC Verbosity level %s not recognised" + " use apic=verbose or apic=debug\n", arg); + return -EINVAL; + } + + return 0; +} +early_param("apic", apic_set_verbosity); + +static int __init lapic_insert_resource(void) +{ + if (!apic_phys) + return -1; + + /* Put local APIC into the resource map. */ + lapic_resource.start = apic_phys; + lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; + insert_resource(&iomem_resource, &lapic_resource); + + return 0; +} + +/* + * need call insert after e820_reserve_resources() + * that is using request_resource + */ +late_initcall(lapic_insert_resource); diff --git a/arch/x86/kernel/apic/apic_64.c b/arch/x86/kernel/apic/apic_64.c new file mode 100644 index 00000000000..70935dd904d --- /dev/null +++ b/arch/x86/kernel/apic/apic_64.c @@ -0,0 +1,89 @@ +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC sub-arch probe layer. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +extern struct apic apic_flat; +extern struct apic apic_physflat; +extern struct apic apic_x2xpic_uv_x; +extern struct apic apic_x2apic_phys; +extern struct apic apic_x2apic_cluster; + +struct apic __read_mostly *apic = &apic_flat; +EXPORT_SYMBOL_GPL(apic); + +static struct apic *apic_probe[] __initdata = { +#ifdef CONFIG_X86_UV + &apic_x2apic_uv_x, +#endif +#ifdef CONFIG_X86_X2APIC + &apic_x2apic_phys, + &apic_x2apic_cluster, +#endif + &apic_physflat, + NULL, +}; + +/* + * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. + */ +void __init default_setup_apic_routing(void) +{ +#ifdef CONFIG_X86_X2APIC + if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { + if (!intr_remapping_enabled) + apic = &apic_flat; + } +#endif + + if (apic == &apic_flat) { + if (max_physical_apicid >= 8) + apic = &apic_physflat; + printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); + } + + if (x86_quirks->update_apic) + x86_quirks->update_apic(); +} + +/* Same for both flat and physical. */ + +void apic_send_IPI_self(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); +} + +int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + int i; + + for (i = 0; apic_probe[i]; ++i) { + if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { + apic = apic_probe[i]; + printk(KERN_INFO "Setting APIC routing to %s.\n", + apic->name); + return 1; + } + } + return 0; +} diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c new file mode 100644 index 00000000000..3b002995e14 --- /dev/null +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -0,0 +1,389 @@ +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Flat APIC subarch code. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_ACPI +#include +#endif + +static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return 1; +} + +static const struct cpumask *flat_target_cpus(void) +{ + return cpu_online_mask; +} + +static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +static void flat_init_apic_ldr(void) +{ + unsigned long val; + unsigned long num, id; + + num = smp_processor_id(); + id = 1UL << num; + apic_write(APIC_DFR, APIC_DFR_FLAT); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_APIC_LOGICAL_ID(id); + apic_write(APIC_LDR, val); +} + +static inline void _flat_send_IPI_mask(unsigned long mask, int vector) +{ + unsigned long flags; + + local_irq_save(flags); + __default_send_IPI_dest_field(mask, vector, apic->dest_logical); + local_irq_restore(flags); +} + +static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + + _flat_send_IPI_mask(mask, vector); +} + +static void + flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + int cpu = smp_processor_id(); + + if (cpu < BITS_PER_LONG) + clear_bit(cpu, &mask); + + _flat_send_IPI_mask(mask, vector); +} + +static void flat_send_IPI_allbutself(int vector) +{ + int cpu = smp_processor_id(); +#ifdef CONFIG_HOTPLUG_CPU + int hotplug = 1; +#else + int hotplug = 0; +#endif + if (hotplug || vector == NMI_VECTOR) { + if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { + unsigned long mask = cpumask_bits(cpu_online_mask)[0]; + + if (cpu < BITS_PER_LONG) + clear_bit(cpu, &mask); + + _flat_send_IPI_mask(mask, vector); + } + } else if (num_online_cpus() > 1) { + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, + vector, apic->dest_logical); + } +} + +static void flat_send_IPI_all(int vector) +{ + if (vector == NMI_VECTOR) { + flat_send_IPI_mask(cpu_online_mask, vector); + } else { + __default_send_IPI_shortcut(APIC_DEST_ALLINC, + vector, apic->dest_logical); + } +} + +static unsigned int flat_get_apic_id(unsigned long x) +{ + unsigned int id; + + id = (((x)>>24) & 0xFFu); + + return id; +} + +static unsigned long set_apic_id(unsigned int id) +{ + unsigned long x; + + x = ((id & 0xFFu)<<24); + return x; +} + +static unsigned int read_xapic_id(void) +{ + unsigned int id; + + id = flat_get_apic_id(apic_read(APIC_ID)); + return id; +} + +static int flat_apic_id_registered(void) +{ + return physid_isset(read_xapic_id(), phys_cpu_present_map); +} + +static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; +} + +static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; + unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS; + + return mask1 & mask2; +} + +static int flat_phys_pkg_id(int initial_apic_id, int index_msb) +{ + return hard_smp_processor_id() >> index_msb; +} + +struct apic apic_flat = { + .name = "flat", + .probe = NULL, + .acpi_madt_oem_check = flat_acpi_madt_oem_check, + .apic_id_registered = flat_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + .irq_dest_mode = 1, /* logical */ + + .target_cpus = flat_target_cpus, + .disable_esr = 0, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .vector_allocation_domain = flat_vector_allocation_domain, + .init_apic_ldr = flat_init_apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = flat_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = flat_get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFu << 24, + + .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, + + .send_IPI_mask = flat_send_IPI_mask, + .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, + .send_IPI_allbutself = flat_send_IPI_allbutself, + .send_IPI_all = flat_send_IPI_all, + .send_IPI_self = apic_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = NULL, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; + +/* + * Physflat mode is used when there are more than 8 CPUs on a AMD system. + * We cannot use logical delivery in this case because the mask + * overflows, so use physical mode. + */ +static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ +#ifdef CONFIG_ACPI + /* + * Quirk: some x86_64 machines can only use physical APIC mode + * regardless of how many processors are present (x86_64 ES7000 + * is an example). + */ + if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && + (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { + printk(KERN_DEBUG "system APIC only can use physical flat"); + return 1; + } +#endif + + return 0; +} + +static const struct cpumask *physflat_target_cpus(void) +{ + return cpu_online_mask; +} + +static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + +static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) +{ + default_send_IPI_mask_sequence_phys(cpumask, vector); +} + +static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, + int vector) +{ + default_send_IPI_mask_allbutself_phys(cpumask, vector); +} + +static void physflat_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); +} + +static void physflat_send_IPI_all(int vector) +{ + physflat_send_IPI_mask(cpu_online_mask, vector); +} + +static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + cpu = cpumask_first(cpumask); + if ((unsigned)cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); + else + return BAD_APICID; +} + +static unsigned int +physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); + + return BAD_APICID; +} + +struct apic apic_physflat = { + + .name = "physical flat", + .probe = NULL, + .acpi_madt_oem_check = physflat_acpi_madt_oem_check, + .apic_id_registered = flat_apic_id_registered, + + .irq_delivery_mode = dest_Fixed, + .irq_dest_mode = 0, /* physical */ + + .target_cpus = physflat_target_cpus, + .disable_esr = 0, + .dest_logical = 0, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .vector_allocation_domain = physflat_vector_allocation_domain, + /* not needed, but shouldn't hurt: */ + .init_apic_ldr = flat_init_apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = flat_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = flat_get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFu << 24, + + .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, + + .send_IPI_mask = physflat_send_IPI_mask, + .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, + .send_IPI_allbutself = physflat_send_IPI_allbutself, + .send_IPI_all = physflat_send_IPI_all, + .send_IPI_self = apic_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = NULL, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c new file mode 100644 index 00000000000..00e6071cefc --- /dev/null +++ b/arch/x86/kernel/apic/io_apic.c @@ -0,0 +1,4160 @@ +/* + * Intel IO-APIC support for multi-Pentium hosts. + * + * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo + * + * Many thanks to Stig Venaas for trying out countless experimental + * patches and reporting/debugging problems patiently! + * + * (c) 1999, Multiple IO-APIC support, developed by + * Ken-ichi Yaku and + * Hidemi Kishimoto , + * further tested and cleaned up by Zach Brown + * and Ingo Molnar + * + * Fixes + * Maciej W. Rozycki : Bits for genuine 82489DX APICs; + * thanks to Eric Gilmore + * and Rolf G. Tews + * for testing these extensively + * Paul Diefenbaugh : Added full ACPI support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* time_after() */ +#ifdef CONFIG_ACPI +#include +#endif +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define __apicdebuginit(type) static type __init + +/* + * Is the SiS APIC rmw bug present ? + * -1 = don't know, 0 = no, 1 = yes + */ +int sis_apic_bug = -1; + +static DEFINE_SPINLOCK(ioapic_lock); +static DEFINE_SPINLOCK(vector_lock); + +/* + * # of IRQ routing registers + */ +int nr_ioapic_registers[MAX_IO_APICS]; + +/* I/O APIC entries */ +struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; +int nr_ioapics; + +/* MP IRQ source entries */ +struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; + +/* # of MP IRQ source entries */ +int mp_irq_entries; + +#if defined (CONFIG_MCA) || defined (CONFIG_EISA) +int mp_bus_id_to_type[MAX_MP_BUSSES]; +#endif + +DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); + +int skip_ioapic_setup; + +void arch_disable_smp_support(void) +{ +#ifdef CONFIG_PCI + noioapicquirk = 1; + noioapicreroute = -1; +#endif + skip_ioapic_setup = 1; +} + +static int __init parse_noapic(char *str) +{ + /* disable IO-APIC */ + arch_disable_smp_support(); + return 0; +} +early_param("noapic", parse_noapic); + +struct irq_pin_list; + +/* + * This is performance-critical, we want to do it O(1) + * + * the indexing order of this array favors 1:1 mappings + * between pins and IRQs. + */ + +struct irq_pin_list { + int apic, pin; + struct irq_pin_list *next; +}; + +static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) +{ + struct irq_pin_list *pin; + int node; + + node = cpu_to_node(cpu); + + pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); + + return pin; +} + +struct irq_cfg { + struct irq_pin_list *irq_2_pin; + cpumask_var_t domain; + cpumask_var_t old_domain; + unsigned move_cleanup_count; + u8 vector; + u8 move_in_progress : 1; +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + u8 move_desc_pending : 1; +#endif +}; + +/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ +#ifdef CONFIG_SPARSE_IRQ +static struct irq_cfg irq_cfgx[] = { +#else +static struct irq_cfg irq_cfgx[NR_IRQS] = { +#endif + [0] = { .vector = IRQ0_VECTOR, }, + [1] = { .vector = IRQ1_VECTOR, }, + [2] = { .vector = IRQ2_VECTOR, }, + [3] = { .vector = IRQ3_VECTOR, }, + [4] = { .vector = IRQ4_VECTOR, }, + [5] = { .vector = IRQ5_VECTOR, }, + [6] = { .vector = IRQ6_VECTOR, }, + [7] = { .vector = IRQ7_VECTOR, }, + [8] = { .vector = IRQ8_VECTOR, }, + [9] = { .vector = IRQ9_VECTOR, }, + [10] = { .vector = IRQ10_VECTOR, }, + [11] = { .vector = IRQ11_VECTOR, }, + [12] = { .vector = IRQ12_VECTOR, }, + [13] = { .vector = IRQ13_VECTOR, }, + [14] = { .vector = IRQ14_VECTOR, }, + [15] = { .vector = IRQ15_VECTOR, }, +}; + +int __init arch_early_irq_init(void) +{ + struct irq_cfg *cfg; + struct irq_desc *desc; + int count; + int i; + + cfg = irq_cfgx; + count = ARRAY_SIZE(irq_cfgx); + + for (i = 0; i < count; i++) { + desc = irq_to_desc(i); + desc->chip_data = &cfg[i]; + alloc_bootmem_cpumask_var(&cfg[i].domain); + alloc_bootmem_cpumask_var(&cfg[i].old_domain); + if (i < NR_IRQS_LEGACY) + cpumask_setall(cfg[i].domain); + } + + return 0; +} + +#ifdef CONFIG_SPARSE_IRQ +static struct irq_cfg *irq_cfg(unsigned int irq) +{ + struct irq_cfg *cfg = NULL; + struct irq_desc *desc; + + desc = irq_to_desc(irq); + if (desc) + cfg = desc->chip_data; + + return cfg; +} + +static struct irq_cfg *get_one_free_irq_cfg(int cpu) +{ + struct irq_cfg *cfg; + int node; + + node = cpu_to_node(cpu); + + cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); + if (cfg) { + if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { + kfree(cfg); + cfg = NULL; + } else if (!alloc_cpumask_var_node(&cfg->old_domain, + GFP_ATOMIC, node)) { + free_cpumask_var(cfg->domain); + kfree(cfg); + cfg = NULL; + } else { + cpumask_clear(cfg->domain); + cpumask_clear(cfg->old_domain); + } + } + + return cfg; +} + +int arch_init_chip_data(struct irq_desc *desc, int cpu) +{ + struct irq_cfg *cfg; + + cfg = desc->chip_data; + if (!cfg) { + desc->chip_data = get_one_free_irq_cfg(cpu); + if (!desc->chip_data) { + printk(KERN_ERR "can not alloc irq_cfg\n"); + BUG_ON(1); + } + } + + return 0; +} + +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + +static void +init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu) +{ + struct irq_pin_list *old_entry, *head, *tail, *entry; + + cfg->irq_2_pin = NULL; + old_entry = old_cfg->irq_2_pin; + if (!old_entry) + return; + + entry = get_one_free_irq_2_pin(cpu); + if (!entry) + return; + + entry->apic = old_entry->apic; + entry->pin = old_entry->pin; + head = entry; + tail = entry; + old_entry = old_entry->next; + while (old_entry) { + entry = get_one_free_irq_2_pin(cpu); + if (!entry) { + entry = head; + while (entry) { + head = entry->next; + kfree(entry); + entry = head; + } + /* still use the old one */ + return; + } + entry->apic = old_entry->apic; + entry->pin = old_entry->pin; + tail->next = entry; + tail = entry; + old_entry = old_entry->next; + } + + tail->next = NULL; + cfg->irq_2_pin = head; +} + +static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) +{ + struct irq_pin_list *entry, *next; + + if (old_cfg->irq_2_pin == cfg->irq_2_pin) + return; + + entry = old_cfg->irq_2_pin; + + while (entry) { + next = entry->next; + kfree(entry); + entry = next; + } + old_cfg->irq_2_pin = NULL; +} + +void arch_init_copy_chip_data(struct irq_desc *old_desc, + struct irq_desc *desc, int cpu) +{ + struct irq_cfg *cfg; + struct irq_cfg *old_cfg; + + cfg = get_one_free_irq_cfg(cpu); + + if (!cfg) + return; + + desc->chip_data = cfg; + + old_cfg = old_desc->chip_data; + + memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); + + init_copy_irq_2_pin(old_cfg, cfg, cpu); +} + +static void free_irq_cfg(struct irq_cfg *old_cfg) +{ + kfree(old_cfg); +} + +void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) +{ + struct irq_cfg *old_cfg, *cfg; + + old_cfg = old_desc->chip_data; + cfg = desc->chip_data; + + if (old_cfg == cfg) + return; + + if (old_cfg) { + free_irq_2_pin(old_cfg, cfg); + free_irq_cfg(old_cfg); + old_desc->chip_data = NULL; + } +} + +static void +set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) +{ + struct irq_cfg *cfg = desc->chip_data; + + if (!cfg->move_in_progress) { + /* it means that domain is not changed */ + if (!cpumask_intersects(desc->affinity, mask)) + cfg->move_desc_pending = 1; + } +} +#endif + +#else +static struct irq_cfg *irq_cfg(unsigned int irq) +{ + return irq < nr_irqs ? irq_cfgx + irq : NULL; +} + +#endif + +#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC +static inline void +set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) +{ +} +#endif + +struct io_apic { + unsigned int index; + unsigned int unused[3]; + unsigned int data; +}; + +static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) +{ + return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) + + (mp_ioapics[idx].apicaddr & ~PAGE_MASK); +} + +static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + writel(reg, &io_apic->index); + return readl(&io_apic->data); +} + +static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + writel(reg, &io_apic->index); + writel(value, &io_apic->data); +} + +/* + * Re-write a value: to be used for read-modify-write + * cycles where the read already set up the index register. + * + * Older SiS APIC requires we rewrite the index register + */ +static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + + if (sis_apic_bug) + writel(reg, &io_apic->index); + writel(value, &io_apic->data); +} + +static bool io_apic_level_ack_pending(struct irq_cfg *cfg) +{ + struct irq_pin_list *entry; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + entry = cfg->irq_2_pin; + for (;;) { + unsigned int reg; + int pin; + + if (!entry) + break; + pin = entry->pin; + reg = io_apic_read(entry->apic, 0x10 + pin*2); + /* Is the remote IRR bit set? */ + if (reg & IO_APIC_REDIR_REMOTE_IRR) { + spin_unlock_irqrestore(&ioapic_lock, flags); + return true; + } + if (!entry->next) + break; + entry = entry->next; + } + spin_unlock_irqrestore(&ioapic_lock, flags); + + return false; +} + +union entry_union { + struct { u32 w1, w2; }; + struct IO_APIC_route_entry entry; +}; + +static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) +{ + union entry_union eu; + unsigned long flags; + spin_lock_irqsave(&ioapic_lock, flags); + eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); + eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); + spin_unlock_irqrestore(&ioapic_lock, flags); + return eu.entry; +} + +/* + * When we write a new IO APIC routing entry, we need to write the high + * word first! If the mask bit in the low word is clear, we will enable + * the interrupt, and we need to make sure the entry is fully populated + * before that happens. + */ +static void +__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) +{ + union entry_union eu; + eu.entry = e; + io_apic_write(apic, 0x11 + 2*pin, eu.w2); + io_apic_write(apic, 0x10 + 2*pin, eu.w1); +} + +void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) +{ + unsigned long flags; + spin_lock_irqsave(&ioapic_lock, flags); + __ioapic_write_entry(apic, pin, e); + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +/* + * When we mask an IO APIC routing entry, we need to write the low + * word first, in order to set the mask bit before we change the + * high bits! + */ +static void ioapic_mask_entry(int apic, int pin) +{ + unsigned long flags; + union entry_union eu = { .entry.mask = 1 }; + + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(apic, 0x10 + 2*pin, eu.w1); + io_apic_write(apic, 0x11 + 2*pin, eu.w2); + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +#ifdef CONFIG_SMP +static void send_cleanup_vector(struct irq_cfg *cfg) +{ + cpumask_var_t cleanup_mask; + + if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { + unsigned int i; + cfg->move_cleanup_count = 0; + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) + cfg->move_cleanup_count++; + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) + apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); + } else { + cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); + cfg->move_cleanup_count = cpumask_weight(cleanup_mask); + apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + free_cpumask_var(cleanup_mask); + } + cfg->move_in_progress = 0; +} + +static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) +{ + int apic, pin; + struct irq_pin_list *entry; + u8 vector = cfg->vector; + + entry = cfg->irq_2_pin; + for (;;) { + unsigned int reg; + + if (!entry) + break; + + apic = entry->apic; + pin = entry->pin; +#ifdef CONFIG_INTR_REMAP + /* + * With interrupt-remapping, destination information comes + * from interrupt-remapping table entry. + */ + if (!irq_remapped(irq)) + io_apic_write(apic, 0x11 + pin*2, dest); +#else + io_apic_write(apic, 0x11 + pin*2, dest); +#endif + reg = io_apic_read(apic, 0x10 + pin*2); + reg &= ~IO_APIC_REDIR_VECTOR_MASK; + reg |= vector; + io_apic_modify(apic, 0x10 + pin*2, reg); + if (!entry->next) + break; + entry = entry->next; + } +} + +static int +assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); + +/* + * Either sets desc->affinity to a valid value, and returns + * ->cpu_mask_to_apicid of that, or returns BAD_APICID and + * leaves desc->affinity untouched. + */ +static unsigned int +set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) +{ + struct irq_cfg *cfg; + unsigned int irq; + + if (!cpumask_intersects(mask, cpu_online_mask)) + return BAD_APICID; + + irq = desc->irq; + cfg = desc->chip_data; + if (assign_irq_vector(irq, cfg, mask)) + return BAD_APICID; + + cpumask_and(desc->affinity, cfg->domain, mask); + set_extra_move_desc(desc, mask); + + return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask); +} + +static void +set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) +{ + struct irq_cfg *cfg; + unsigned long flags; + unsigned int dest; + unsigned int irq; + + irq = desc->irq; + cfg = desc->chip_data; + + spin_lock_irqsave(&ioapic_lock, flags); + dest = set_desc_affinity(desc, mask); + if (dest != BAD_APICID) { + /* Only the high 8 bits are valid. */ + dest = SET_APIC_LOGICAL_ID(dest); + __target_IO_APIC_irq(irq, dest, cfg); + } + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void +set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + + set_ioapic_affinity_irq_desc(desc, mask); +} +#endif /* CONFIG_SMP */ + +/* + * The common case is 1:1 IRQ<->pin mappings. Sometimes there are + * shared ISA-space IRQs, so we have to support them. We are super + * fast in the common case, and fast for shared ISA-space IRQs. + */ +static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin) +{ + struct irq_pin_list *entry; + + entry = cfg->irq_2_pin; + if (!entry) { + entry = get_one_free_irq_2_pin(cpu); + if (!entry) { + printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", + apic, pin); + return; + } + cfg->irq_2_pin = entry; + entry->apic = apic; + entry->pin = pin; + return; + } + + while (entry->next) { + /* not again, please */ + if (entry->apic == apic && entry->pin == pin) + return; + + entry = entry->next; + } + + entry->next = get_one_free_irq_2_pin(cpu); + entry = entry->next; + entry->apic = apic; + entry->pin = pin; +} + +/* + * Reroute an IRQ to a different pin. + */ +static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu, + int oldapic, int oldpin, + int newapic, int newpin) +{ + struct irq_pin_list *entry = cfg->irq_2_pin; + int replaced = 0; + + while (entry) { + if (entry->apic == oldapic && entry->pin == oldpin) { + entry->apic = newapic; + entry->pin = newpin; + replaced = 1; + /* every one is different, right? */ + break; + } + entry = entry->next; + } + + /* why? call replace before add? */ + if (!replaced) + add_pin_to_irq_cpu(cfg, cpu, newapic, newpin); +} + +static inline void io_apic_modify_irq(struct irq_cfg *cfg, + int mask_and, int mask_or, + void (*final)(struct irq_pin_list *entry)) +{ + int pin; + struct irq_pin_list *entry; + + for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { + unsigned int reg; + pin = entry->pin; + reg = io_apic_read(entry->apic, 0x10 + pin * 2); + reg &= mask_and; + reg |= mask_or; + io_apic_modify(entry->apic, 0x10 + pin * 2, reg); + if (final) + final(entry); + } +} + +static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) +{ + io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); +} + +#ifdef CONFIG_X86_64 +static void io_apic_sync(struct irq_pin_list *entry) +{ + /* + * Synchronize the IO-APIC and the CPU by doing + * a dummy read from the IO-APIC + */ + struct io_apic __iomem *io_apic; + io_apic = io_apic_base(entry->apic); + readl(&io_apic->data); +} + +static void __mask_IO_APIC_irq(struct irq_cfg *cfg) +{ + io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); +} +#else /* CONFIG_X86_32 */ +static void __mask_IO_APIC_irq(struct irq_cfg *cfg) +{ + io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL); +} + +static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) +{ + io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER, + IO_APIC_REDIR_MASKED, NULL); +} + +static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg) +{ + io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, + IO_APIC_REDIR_LEVEL_TRIGGER, NULL); +} +#endif /* CONFIG_X86_32 */ + +static void mask_IO_APIC_irq_desc(struct irq_desc *desc) +{ + struct irq_cfg *cfg = desc->chip_data; + unsigned long flags; + + BUG_ON(!cfg); + + spin_lock_irqsave(&ioapic_lock, flags); + __mask_IO_APIC_irq(cfg); + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) +{ + struct irq_cfg *cfg = desc->chip_data; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + __unmask_IO_APIC_irq(cfg); + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void mask_IO_APIC_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + mask_IO_APIC_irq_desc(desc); +} +static void unmask_IO_APIC_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + unmask_IO_APIC_irq_desc(desc); +} + +static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) +{ + struct IO_APIC_route_entry entry; + + /* Check delivery_mode to be sure we're not clearing an SMI pin */ + entry = ioapic_read_entry(apic, pin); + if (entry.delivery_mode == dest_SMI) + return; + /* + * Disable it in the IO-APIC irq-routing table: + */ + ioapic_mask_entry(apic, pin); +} + +static void clear_IO_APIC (void) +{ + int apic, pin; + + for (apic = 0; apic < nr_ioapics; apic++) + for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) + clear_IO_APIC_pin(apic, pin); +} + +#ifdef CONFIG_X86_32 +/* + * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to + * specific CPU-side IRQs. + */ + +#define MAX_PIRQS 8 +static int pirq_entries[MAX_PIRQS] = { + [0 ... MAX_PIRQS - 1] = -1 +}; + +static int __init ioapic_pirq_setup(char *str) +{ + int i, max; + int ints[MAX_PIRQS+1]; + + get_options(str, ARRAY_SIZE(ints), ints); + + apic_printk(APIC_VERBOSE, KERN_INFO + "PIRQ redirection, working around broken MP-BIOS.\n"); + max = MAX_PIRQS; + if (ints[0] < MAX_PIRQS) + max = ints[0]; + + for (i = 0; i < max; i++) { + apic_printk(APIC_VERBOSE, KERN_DEBUG + "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); + /* + * PIRQs are mapped upside down, usually. + */ + pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; + } + return 1; +} + +__setup("pirq=", ioapic_pirq_setup); +#endif /* CONFIG_X86_32 */ + +#ifdef CONFIG_INTR_REMAP +/* I/O APIC RTE contents at the OS boot up */ +static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS]; + +/* + * Saves and masks all the unmasked IO-APIC RTE's + */ +int save_mask_IO_APIC_setup(void) +{ + union IO_APIC_reg_01 reg_01; + unsigned long flags; + int apic, pin; + + /* + * The number of IO-APIC IRQ registers (== #pins): + */ + for (apic = 0; apic < nr_ioapics; apic++) { + spin_lock_irqsave(&ioapic_lock, flags); + reg_01.raw = io_apic_read(apic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + nr_ioapic_registers[apic] = reg_01.bits.entries+1; + } + + for (apic = 0; apic < nr_ioapics; apic++) { + early_ioapic_entries[apic] = + kzalloc(sizeof(struct IO_APIC_route_entry) * + nr_ioapic_registers[apic], GFP_KERNEL); + if (!early_ioapic_entries[apic]) + goto nomem; + } + + for (apic = 0; apic < nr_ioapics; apic++) + for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { + struct IO_APIC_route_entry entry; + + entry = early_ioapic_entries[apic][pin] = + ioapic_read_entry(apic, pin); + if (!entry.mask) { + entry.mask = 1; + ioapic_write_entry(apic, pin, entry); + } + } + + return 0; + +nomem: + while (apic >= 0) + kfree(early_ioapic_entries[apic--]); + memset(early_ioapic_entries, 0, + ARRAY_SIZE(early_ioapic_entries)); + + return -ENOMEM; +} + +void restore_IO_APIC_setup(void) +{ + int apic, pin; + + for (apic = 0; apic < nr_ioapics; apic++) { + if (!early_ioapic_entries[apic]) + break; + for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) + ioapic_write_entry(apic, pin, + early_ioapic_entries[apic][pin]); + kfree(early_ioapic_entries[apic]); + early_ioapic_entries[apic] = NULL; + } +} + +void reinit_intr_remapped_IO_APIC(int intr_remapping) +{ + /* + * for now plain restore of previous settings. + * TBD: In the case of OS enabling interrupt-remapping, + * IO-APIC RTE's need to be setup to point to interrupt-remapping + * table entries. for now, do a plain restore, and wait for + * the setup_IO_APIC_irqs() to do proper initialization. + */ + restore_IO_APIC_setup(); +} +#endif + +/* + * Find the IRQ entry number of a certain pin. + */ +static int find_irq_entry(int apic, int pin, int type) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) + if (mp_irqs[i].irqtype == type && + (mp_irqs[i].dstapic == mp_ioapics[apic].apicid || + mp_irqs[i].dstapic == MP_APIC_ALL) && + mp_irqs[i].dstirq == pin) + return i; + + return -1; +} + +/* + * Find the pin to which IRQ[irq] (ISA) is connected + */ +static int __init find_isa_irq_pin(int irq, int type) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus; + + if (test_bit(lbus, mp_bus_not_pci) && + (mp_irqs[i].irqtype == type) && + (mp_irqs[i].srcbusirq == irq)) + + return mp_irqs[i].dstirq; + } + return -1; +} + +static int __init find_isa_irq_apic(int irq, int type) +{ + int i; + + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus; + + if (test_bit(lbus, mp_bus_not_pci) && + (mp_irqs[i].irqtype == type) && + (mp_irqs[i].srcbusirq == irq)) + break; + } + if (i < mp_irq_entries) { + int apic; + for(apic = 0; apic < nr_ioapics; apic++) { + if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic) + return apic; + } + } + + return -1; +} + +/* + * Find a specific PCI IRQ entry. + * Not an __init, possibly needed by modules + */ +static int pin_2_irq(int idx, int apic, int pin); + +int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) +{ + int apic, i, best_guess = -1; + + apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", + bus, slot, pin); + if (test_bit(bus, mp_bus_not_pci)) { + apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus); + return -1; + } + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus; + + for (apic = 0; apic < nr_ioapics; apic++) + if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || + mp_irqs[i].dstapic == MP_APIC_ALL) + break; + + if (!test_bit(lbus, mp_bus_not_pci) && + !mp_irqs[i].irqtype && + (bus == lbus) && + (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { + int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); + + if (!(apic || IO_APIC_IRQ(irq))) + continue; + + if (pin == (mp_irqs[i].srcbusirq & 3)) + return irq; + /* + * Use the first all-but-pin matching entry as a + * best-guess fuzzy result for broken mptables. + */ + if (best_guess < 0) + best_guess = irq; + } + } + return best_guess; +} + +EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); + +#if defined(CONFIG_EISA) || defined(CONFIG_MCA) +/* + * EISA Edge/Level control register, ELCR + */ +static int EISA_ELCR(unsigned int irq) +{ + if (irq < NR_IRQS_LEGACY) { + unsigned int port = 0x4d0 + (irq >> 3); + return (inb(port) >> (irq & 7)) & 1; + } + apic_printk(APIC_VERBOSE, KERN_INFO + "Broken MPtable reports ISA irq %d\n", irq); + return 0; +} + +#endif + +/* ISA interrupts are always polarity zero edge triggered, + * when listed as conforming in the MP table. */ + +#define default_ISA_trigger(idx) (0) +#define default_ISA_polarity(idx) (0) + +/* EISA interrupts are always polarity zero and can be edge or level + * trigger depending on the ELCR value. If an interrupt is listed as + * EISA conforming in the MP table, that means its trigger type must + * be read in from the ELCR */ + +#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) +#define default_EISA_polarity(idx) default_ISA_polarity(idx) + +/* PCI interrupts are always polarity one level triggered, + * when listed as conforming in the MP table. */ + +#define default_PCI_trigger(idx) (1) +#define default_PCI_polarity(idx) (1) + +/* MCA interrupts are always polarity zero level triggered, + * when listed as conforming in the MP table. */ + +#define default_MCA_trigger(idx) (1) +#define default_MCA_polarity(idx) default_ISA_polarity(idx) + +static int MPBIOS_polarity(int idx) +{ + int bus = mp_irqs[idx].srcbus; + int polarity; + + /* + * Determine IRQ line polarity (high active or low active): + */ + switch (mp_irqs[idx].irqflag & 3) + { + case 0: /* conforms, ie. bus-type dependent polarity */ + if (test_bit(bus, mp_bus_not_pci)) + polarity = default_ISA_polarity(idx); + else + polarity = default_PCI_polarity(idx); + break; + case 1: /* high active */ + { + polarity = 0; + break; + } + case 2: /* reserved */ + { + printk(KERN_WARNING "broken BIOS!!\n"); + polarity = 1; + break; + } + case 3: /* low active */ + { + polarity = 1; + break; + } + default: /* invalid */ + { + printk(KERN_WARNING "broken BIOS!!\n"); + polarity = 1; + break; + } + } + return polarity; +} + +static int MPBIOS_trigger(int idx) +{ + int bus = mp_irqs[idx].srcbus; + int trigger; + + /* + * Determine IRQ trigger mode (edge or level sensitive): + */ + switch ((mp_irqs[idx].irqflag>>2) & 3) + { + case 0: /* conforms, ie. bus-type dependent */ + if (test_bit(bus, mp_bus_not_pci)) + trigger = default_ISA_trigger(idx); + else + trigger = default_PCI_trigger(idx); +#if defined(CONFIG_EISA) || defined(CONFIG_MCA) + switch (mp_bus_id_to_type[bus]) { + case MP_BUS_ISA: /* ISA pin */ + { + /* set before the switch */ + break; + } + case MP_BUS_EISA: /* EISA pin */ + { + trigger = default_EISA_trigger(idx); + break; + } + case MP_BUS_PCI: /* PCI pin */ + { + /* set before the switch */ + break; + } + case MP_BUS_MCA: /* MCA pin */ + { + trigger = default_MCA_trigger(idx); + break; + } + default: + { + printk(KERN_WARNING "broken BIOS!!\n"); + trigger = 1; + break; + } + } +#endif + break; + case 1: /* edge */ + { + trigger = 0; + break; + } + case 2: /* reserved */ + { + printk(KERN_WARNING "broken BIOS!!\n"); + trigger = 1; + break; + } + case 3: /* level */ + { + trigger = 1; + break; + } + default: /* invalid */ + { + printk(KERN_WARNING "broken BIOS!!\n"); + trigger = 0; + break; + } + } + return trigger; +} + +static inline int irq_polarity(int idx) +{ + return MPBIOS_polarity(idx); +} + +static inline int irq_trigger(int idx) +{ + return MPBIOS_trigger(idx); +} + +int (*ioapic_renumber_irq)(int ioapic, int irq); +static int pin_2_irq(int idx, int apic, int pin) +{ + int irq, i; + int bus = mp_irqs[idx].srcbus; + + /* + * Debugging check, we are in big trouble if this message pops up! + */ + if (mp_irqs[idx].dstirq != pin) + printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); + + if (test_bit(bus, mp_bus_not_pci)) { + irq = mp_irqs[idx].srcbusirq; + } else { + /* + * PCI IRQs are mapped in order + */ + i = irq = 0; + while (i < apic) + irq += nr_ioapic_registers[i++]; + irq += pin; + /* + * For MPS mode, so far only needed by ES7000 platform + */ + if (ioapic_renumber_irq) + irq = ioapic_renumber_irq(apic, irq); + } + +#ifdef CONFIG_X86_32 + /* + * PCI IRQ command line redirection. Yes, limits are hardcoded. + */ + if ((pin >= 16) && (pin <= 23)) { + if (pirq_entries[pin-16] != -1) { + if (!pirq_entries[pin-16]) { + apic_printk(APIC_VERBOSE, KERN_DEBUG + "disabling PIRQ%d\n", pin-16); + } else { + irq = pirq_entries[pin-16]; + apic_printk(APIC_VERBOSE, KERN_DEBUG + "using PIRQ%d -> IRQ %d\n", + pin-16, irq); + } + } + } +#endif + + return irq; +} + +void lock_vector_lock(void) +{ + /* Used to the online set of cpus does not change + * during assign_irq_vector. + */ + spin_lock(&vector_lock); +} + +void unlock_vector_lock(void) +{ + spin_unlock(&vector_lock); +} + +static int +__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) +{ + /* + * NOTE! The local APIC isn't very good at handling + * multiple interrupts at the same interrupt level. + * As the interrupt level is determined by taking the + * vector number and shifting that right by 4, we + * want to spread these out a bit so that they don't + * all fall in the same interrupt level. + * + * Also, we've got to be careful not to trash gate + * 0x80, because int 0x80 is hm, kind of importantish. ;) + */ + static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; + unsigned int old_vector; + int cpu, err; + cpumask_var_t tmp_mask; + + if ((cfg->move_in_progress) || cfg->move_cleanup_count) + return -EBUSY; + + if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) + return -ENOMEM; + + old_vector = cfg->vector; + if (old_vector) { + cpumask_and(tmp_mask, mask, cpu_online_mask); + cpumask_and(tmp_mask, cfg->domain, tmp_mask); + if (!cpumask_empty(tmp_mask)) { + free_cpumask_var(tmp_mask); + return 0; + } + } + + /* Only try and allocate irqs on cpus that are present */ + err = -ENOSPC; + for_each_cpu_and(cpu, mask, cpu_online_mask) { + int new_cpu; + int vector, offset; + + apic->vector_allocation_domain(cpu, tmp_mask); + + vector = current_vector; + offset = current_offset; +next: + vector += 8; + if (vector >= first_system_vector) { + /* If out of vectors on large boxen, must share them. */ + offset = (offset + 1) % 8; + vector = FIRST_DEVICE_VECTOR + offset; + } + if (unlikely(current_vector == vector)) + continue; + + if (test_bit(vector, used_vectors)) + goto next; + + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) + if (per_cpu(vector_irq, new_cpu)[vector] != -1) + goto next; + /* Found one! */ + current_vector = vector; + current_offset = offset; + if (old_vector) { + cfg->move_in_progress = 1; + cpumask_copy(cfg->old_domain, cfg->domain); + } + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) + per_cpu(vector_irq, new_cpu)[vector] = irq; + cfg->vector = vector; + cpumask_copy(cfg->domain, tmp_mask); + err = 0; + break; + } + free_cpumask_var(tmp_mask); + return err; +} + +static int +assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) +{ + int err; + unsigned long flags; + + spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, cfg, mask); + spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void __clear_irq_vector(int irq, struct irq_cfg *cfg) +{ + int cpu, vector; + + BUG_ON(!cfg->vector); + + vector = cfg->vector; + for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) + per_cpu(vector_irq, cpu)[vector] = -1; + + cfg->vector = 0; + cpumask_clear(cfg->domain); + + if (likely(!cfg->move_in_progress)) + return; + for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; + vector++) { + if (per_cpu(vector_irq, cpu)[vector] != irq) + continue; + per_cpu(vector_irq, cpu)[vector] = -1; + break; + } + } + cfg->move_in_progress = 0; +} + +void __setup_vector_irq(int cpu) +{ + /* Initialize vector_irq on a new cpu */ + /* This function must be called with vector_lock held */ + int irq, vector; + struct irq_cfg *cfg; + struct irq_desc *desc; + + /* Mark the inuse vectors */ + for_each_irq_desc(irq, desc) { + cfg = desc->chip_data; + if (!cpumask_test_cpu(cpu, cfg->domain)) + continue; + vector = cfg->vector; + per_cpu(vector_irq, cpu)[vector] = irq; + } + /* Mark the free vectors */ + for (vector = 0; vector < NR_VECTORS; ++vector) { + irq = per_cpu(vector_irq, cpu)[vector]; + if (irq < 0) + continue; + + cfg = irq_cfg(irq); + if (!cpumask_test_cpu(cpu, cfg->domain)) + per_cpu(vector_irq, cpu)[vector] = -1; + } +} + +static struct irq_chip ioapic_chip; +#ifdef CONFIG_INTR_REMAP +static struct irq_chip ir_ioapic_chip; +#endif + +#define IOAPIC_AUTO -1 +#define IOAPIC_EDGE 0 +#define IOAPIC_LEVEL 1 + +#ifdef CONFIG_X86_32 +static inline int IO_APIC_irq_trigger(int irq) +{ + int apic, idx, pin; + + for (apic = 0; apic < nr_ioapics; apic++) { + for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { + idx = find_irq_entry(apic, pin, mp_INT); + if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) + return irq_trigger(idx); + } + } + /* + * nonexistent IRQs are edge default + */ + return 0; +} +#else +static inline int IO_APIC_irq_trigger(int irq) +{ + return 1; +} +#endif + +static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) +{ + + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || + trigger == IOAPIC_LEVEL) + desc->status |= IRQ_LEVEL; + else + desc->status &= ~IRQ_LEVEL; + +#ifdef CONFIG_INTR_REMAP + if (irq_remapped(irq)) { + desc->status |= IRQ_MOVE_PCNTXT; + if (trigger) + set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, + handle_fasteoi_irq, + "fasteoi"); + else + set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, + handle_edge_irq, "edge"); + return; + } +#endif + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || + trigger == IOAPIC_LEVEL) + set_irq_chip_and_handler_name(irq, &ioapic_chip, + handle_fasteoi_irq, + "fasteoi"); + else + set_irq_chip_and_handler_name(irq, &ioapic_chip, + handle_edge_irq, "edge"); +} + +int setup_ioapic_entry(int apic_id, int irq, + struct IO_APIC_route_entry *entry, + unsigned int destination, int trigger, + int polarity, int vector) +{ + /* + * add it to the IO-APIC irq-routing table: + */ + memset(entry,0,sizeof(*entry)); + +#ifdef CONFIG_INTR_REMAP + if (intr_remapping_enabled) { + struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); + struct irte irte; + struct IR_IO_APIC_route_entry *ir_entry = + (struct IR_IO_APIC_route_entry *) entry; + int index; + + if (!iommu) + panic("No mapping iommu for ioapic %d\n", apic_id); + + index = alloc_irte(iommu, irq, 1); + if (index < 0) + panic("Failed to allocate IRTE for ioapic %d\n", apic_id); + + memset(&irte, 0, sizeof(irte)); + + irte.present = 1; + irte.dst_mode = apic->irq_dest_mode; + irte.trigger_mode = trigger; + irte.dlvry_mode = apic->irq_delivery_mode; + irte.vector = vector; + irte.dest_id = IRTE_DEST(destination); + + modify_irte(irq, &irte); + + ir_entry->index2 = (index >> 15) & 0x1; + ir_entry->zero = 0; + ir_entry->format = 1; + ir_entry->index = (index & 0x7fff); + } else +#endif + { + entry->delivery_mode = apic->irq_delivery_mode; + entry->dest_mode = apic->irq_dest_mode; + entry->dest = destination; + } + + entry->mask = 0; /* enable IRQ */ + entry->trigger = trigger; + entry->polarity = polarity; + entry->vector = vector; + + /* Mask level triggered irqs. + * Use IRQ_DELAYED_DISABLE for edge triggered irqs. + */ + if (trigger) + entry->mask = 1; + return 0; +} + +static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, + int trigger, int polarity) +{ + struct irq_cfg *cfg; + struct IO_APIC_route_entry entry; + unsigned int dest; + + if (!IO_APIC_IRQ(irq)) + return; + + cfg = desc->chip_data; + + if (assign_irq_vector(irq, cfg, apic->target_cpus())) + return; + + dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); + + apic_printk(APIC_VERBOSE,KERN_DEBUG + "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " + "IRQ %d Mode:%i Active:%i)\n", + apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector, + irq, trigger, polarity); + + + if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry, + dest, trigger, polarity, cfg->vector)) { + printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", + mp_ioapics[apic_id].apicid, pin); + __clear_irq_vector(irq, cfg); + return; + } + + ioapic_register_intr(irq, desc, trigger); + if (irq < NR_IRQS_LEGACY) + disable_8259A_irq(irq); + + ioapic_write_entry(apic_id, pin, entry); +} + +static void __init setup_IO_APIC_irqs(void) +{ + int apic_id, pin, idx, irq; + int notcon = 0; + struct irq_desc *desc; + struct irq_cfg *cfg; + int cpu = boot_cpu_id; + + apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); + + for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { + for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { + + idx = find_irq_entry(apic_id, pin, mp_INT); + if (idx == -1) { + if (!notcon) { + notcon = 1; + apic_printk(APIC_VERBOSE, + KERN_DEBUG " %d-%d", + mp_ioapics[apic_id].apicid, pin); + } else + apic_printk(APIC_VERBOSE, " %d-%d", + mp_ioapics[apic_id].apicid, pin); + continue; + } + if (notcon) { + apic_printk(APIC_VERBOSE, + " (apicid-pin) not connected\n"); + notcon = 0; + } + + irq = pin_2_irq(idx, apic_id, pin); + + /* + * Skip the timer IRQ if there's a quirk handler + * installed and if it returns 1: + */ + if (apic->multi_timer_check && + apic->multi_timer_check(apic_id, irq)) + continue; + + desc = irq_to_desc_alloc_cpu(irq, cpu); + if (!desc) { + printk(KERN_INFO "can not get irq_desc for %d\n", irq); + continue; + } + cfg = desc->chip_data; + add_pin_to_irq_cpu(cfg, cpu, apic_id, pin); + + setup_IO_APIC_irq(apic_id, pin, irq, desc, + irq_trigger(idx), irq_polarity(idx)); + } + } + + if (notcon) + apic_printk(APIC_VERBOSE, + " (apicid-pin) not connected\n"); +} + +/* + * Set up the timer pin, possibly with the 8259A-master behind. + */ +static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, + int vector) +{ + struct IO_APIC_route_entry entry; + +#ifdef CONFIG_INTR_REMAP + if (intr_remapping_enabled) + return; +#endif + + memset(&entry, 0, sizeof(entry)); + + /* + * We use logical delivery to get the timer IRQ + * to the first CPU. + */ + entry.dest_mode = apic->irq_dest_mode; + entry.mask = 0; /* don't mask IRQ for edge */ + entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); + entry.delivery_mode = apic->irq_delivery_mode; + entry.polarity = 0; + entry.trigger = 0; + entry.vector = vector; + + /* + * The timer IRQ doesn't have to know that behind the + * scene we may have a 8259A-master in AEOI mode ... + */ + set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); + + /* + * Add it to the IO-APIC irq-routing table: + */ + ioapic_write_entry(apic_id, pin, entry); +} + + +__apicdebuginit(void) print_IO_APIC(void) +{ + int apic, i; + union IO_APIC_reg_00 reg_00; + union IO_APIC_reg_01 reg_01; + union IO_APIC_reg_02 reg_02; + union IO_APIC_reg_03 reg_03; + unsigned long flags; + struct irq_cfg *cfg; + struct irq_desc *desc; + unsigned int irq; + + if (apic_verbosity == APIC_QUIET) + return; + + printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); + for (i = 0; i < nr_ioapics; i++) + printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", + mp_ioapics[i].apicid, nr_ioapic_registers[i]); + + /* + * We are a bit conservative about what we expect. We have to + * know about every hardware change ASAP. + */ + printk(KERN_INFO "testing the IO APIC.......................\n"); + + for (apic = 0; apic < nr_ioapics; apic++) { + + spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(apic, 0); + reg_01.raw = io_apic_read(apic, 1); + if (reg_01.bits.version >= 0x10) + reg_02.raw = io_apic_read(apic, 2); + if (reg_01.bits.version >= 0x20) + reg_03.raw = io_apic_read(apic, 3); + spin_unlock_irqrestore(&ioapic_lock, flags); + + printk("\n"); + printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); + printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); + printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); + printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); + printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); + + printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); + printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); + + printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); + printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); + + /* + * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, + * but the value of reg_02 is read as the previous read register + * value, so ignore it if reg_02 == reg_01. + */ + if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { + printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); + printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); + } + + /* + * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 + * or reg_03, but the value of reg_0[23] is read as the previous read + * register value, so ignore it if reg_03 == reg_0[12]. + */ + if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && + reg_03.raw != reg_01.raw) { + printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); + printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); + } + + printk(KERN_DEBUG ".... IRQ redirection table:\n"); + + printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" + " Stat Dmod Deli Vect: \n"); + + for (i = 0; i <= reg_01.bits.entries; i++) { + struct IO_APIC_route_entry entry; + + entry = ioapic_read_entry(apic, i); + + printk(KERN_DEBUG " %02x %03X ", + i, + entry.dest + ); + + printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", + entry.mask, + entry.trigger, + entry.irr, + entry.polarity, + entry.delivery_status, + entry.dest_mode, + entry.delivery_mode, + entry.vector + ); + } + } + printk(KERN_DEBUG "IRQ to pin mappings:\n"); + for_each_irq_desc(irq, desc) { + struct irq_pin_list *entry; + + cfg = desc->chip_data; + entry = cfg->irq_2_pin; + if (!entry) + continue; + printk(KERN_DEBUG "IRQ%d ", irq); + for (;;) { + printk("-> %d:%d", entry->apic, entry->pin); + if (!entry->next) + break; + entry = entry->next; + } + printk("\n"); + } + + printk(KERN_INFO ".................................... done.\n"); + + return; +} + +__apicdebuginit(void) print_APIC_bitfield(int base) +{ + unsigned int v; + int i, j; + + if (apic_verbosity == APIC_QUIET) + return; + + printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG); + for (i = 0; i < 8; i++) { + v = apic_read(base + i*0x10); + for (j = 0; j < 32; j++) { + if (v & (1< 3) /* Due to the Pentium erratum 3AP. */ + apic_write(APIC_ESR, 0); + + v = apic_read(APIC_ESR); + printk(KERN_DEBUG "... APIC ESR: %08x\n", v); + } + + icr = apic_icr_read(); + printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); + printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); + + v = apic_read(APIC_LVTT); + printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); + + if (maxlvt > 3) { /* PC is LVT#4. */ + v = apic_read(APIC_LVTPC); + printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); + } + v = apic_read(APIC_LVT0); + printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); + v = apic_read(APIC_LVT1); + printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); + + if (maxlvt > 2) { /* ERR is LVT#3. */ + v = apic_read(APIC_LVTERR); + printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); + } + + v = apic_read(APIC_TMICT); + printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); + v = apic_read(APIC_TMCCT); + printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); + v = apic_read(APIC_TDCR); + printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); + printk("\n"); +} + +__apicdebuginit(void) print_all_local_APICs(void) +{ + int cpu; + + preempt_disable(); + for_each_online_cpu(cpu) + smp_call_function_single(cpu, print_local_APIC, NULL, 1); + preempt_enable(); +} + +__apicdebuginit(void) print_PIC(void) +{ + unsigned int v; + unsigned long flags; + + if (apic_verbosity == APIC_QUIET) + return; + + printk(KERN_DEBUG "\nprinting PIC contents\n"); + + spin_lock_irqsave(&i8259A_lock, flags); + + v = inb(0xa1) << 8 | inb(0x21); + printk(KERN_DEBUG "... PIC IMR: %04x\n", v); + + v = inb(0xa0) << 8 | inb(0x20); + printk(KERN_DEBUG "... PIC IRR: %04x\n", v); + + outb(0x0b,0xa0); + outb(0x0b,0x20); + v = inb(0xa0) << 8 | inb(0x20); + outb(0x0a,0xa0); + outb(0x0a,0x20); + + spin_unlock_irqrestore(&i8259A_lock, flags); + + printk(KERN_DEBUG "... PIC ISR: %04x\n", v); + + v = inb(0x4d1) << 8 | inb(0x4d0); + printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); +} + +__apicdebuginit(int) print_all_ICs(void) +{ + print_PIC(); + print_all_local_APICs(); + print_IO_APIC(); + + return 0; +} + +fs_initcall(print_all_ICs); + + +/* Where if anywhere is the i8259 connect in external int mode */ +static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; + +void __init enable_IO_APIC(void) +{ + union IO_APIC_reg_01 reg_01; + int i8259_apic, i8259_pin; + int apic; + unsigned long flags; + + /* + * The number of IO-APIC IRQ registers (== #pins): + */ + for (apic = 0; apic < nr_ioapics; apic++) { + spin_lock_irqsave(&ioapic_lock, flags); + reg_01.raw = io_apic_read(apic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + nr_ioapic_registers[apic] = reg_01.bits.entries+1; + } + for(apic = 0; apic < nr_ioapics; apic++) { + int pin; + /* See if any of the pins is in ExtINT mode */ + for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { + struct IO_APIC_route_entry entry; + entry = ioapic_read_entry(apic, pin); + + /* If the interrupt line is enabled and in ExtInt mode + * I have found the pin where the i8259 is connected. + */ + if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { + ioapic_i8259.apic = apic; + ioapic_i8259.pin = pin; + goto found_i8259; + } + } + } + found_i8259: + /* Look to see what if the MP table has reported the ExtINT */ + /* If we could not find the appropriate pin by looking at the ioapic + * the i8259 probably is not connected the ioapic but give the + * mptable a chance anyway. + */ + i8259_pin = find_isa_irq_pin(0, mp_ExtINT); + i8259_apic = find_isa_irq_apic(0, mp_ExtINT); + /* Trust the MP table if nothing is setup in the hardware */ + if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { + printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); + ioapic_i8259.pin = i8259_pin; + ioapic_i8259.apic = i8259_apic; + } + /* Complain if the MP table and the hardware disagree */ + if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && + (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) + { + printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); + } + + /* + * Do not trust the IO-APIC being empty at bootup + */ + clear_IO_APIC(); +} + +/* + * Not an __init, needed by the reboot code + */ +void disable_IO_APIC(void) +{ + /* + * Clear the IO-APIC before rebooting: + */ + clear_IO_APIC(); + + /* + * If the i8259 is routed through an IOAPIC + * Put that IOAPIC in virtual wire mode + * so legacy interrupts can be delivered. + */ + if (ioapic_i8259.pin != -1) { + struct IO_APIC_route_entry entry; + + memset(&entry, 0, sizeof(entry)); + entry.mask = 0; /* Enabled */ + entry.trigger = 0; /* Edge */ + entry.irr = 0; + entry.polarity = 0; /* High */ + entry.delivery_status = 0; + entry.dest_mode = 0; /* Physical */ + entry.delivery_mode = dest_ExtINT; /* ExtInt */ + entry.vector = 0; + entry.dest = read_apic_id(); + + /* + * Add it to the IO-APIC irq-routing table: + */ + ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); + } + + disconnect_bsp_APIC(ioapic_i8259.pin != -1); +} + +#ifdef CONFIG_X86_32 +/* + * function to set the IO-APIC physical IDs based on the + * values stored in the MPC table. + * + * by Matt Domsch Tue Dec 21 12:25:05 CST 1999 + */ + +static void __init setup_ioapic_ids_from_mpc(void) +{ + union IO_APIC_reg_00 reg_00; + physid_mask_t phys_id_present_map; + int apic_id; + int i; + unsigned char old_id; + unsigned long flags; + + if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids()) + return; + + /* + * Don't check I/O APIC IDs for xAPIC systems. They have + * no meaning without the serial APIC bus. + */ + if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return; + /* + * This is broken; anything with a real cpu count has to + * circumvent this idiocy regardless. + */ + phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map); + + /* + * Set the IOAPIC ID to the value stored in the MPC table. + */ + for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { + + /* Read the register 0 value */ + spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(apic_id, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + old_id = mp_ioapics[apic_id].apicid; + + if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) { + printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", + apic_id, mp_ioapics[apic_id].apicid); + printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", + reg_00.bits.ID); + mp_ioapics[apic_id].apicid = reg_00.bits.ID; + } + + /* + * Sanity check, is the ID really free? Every APIC in a + * system must have a unique ID or we get lots of nice + * 'stuck on smp_invalidate_needed IPI wait' messages. + */ + if (apic->check_apicid_used(phys_id_present_map, + mp_ioapics[apic_id].apicid)) { + printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", + apic_id, mp_ioapics[apic_id].apicid); + for (i = 0; i < get_physical_broadcast(); i++) + if (!physid_isset(i, phys_id_present_map)) + break; + if (i >= get_physical_broadcast()) + panic("Max APIC ID exceeded!\n"); + printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", + i); + physid_set(i, phys_id_present_map); + mp_ioapics[apic_id].apicid = i; + } else { + physid_mask_t tmp; + tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid); + apic_printk(APIC_VERBOSE, "Setting %d in the " + "phys_id_present_map\n", + mp_ioapics[apic_id].apicid); + physids_or(phys_id_present_map, phys_id_present_map, tmp); + } + + + /* + * We need to adjust the IRQ routing table + * if the ID changed. + */ + if (old_id != mp_ioapics[apic_id].apicid) + for (i = 0; i < mp_irq_entries; i++) + if (mp_irqs[i].dstapic == old_id) + mp_irqs[i].dstapic + = mp_ioapics[apic_id].apicid; + + /* + * Read the right value from the MPC table and + * write it into the ID register. + */ + apic_printk(APIC_VERBOSE, KERN_INFO + "...changing IO-APIC physical APIC ID to %d ...", + mp_ioapics[apic_id].apicid); + + reg_00.bits.ID = mp_ioapics[apic_id].apicid; + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(apic_id, 0, reg_00.raw); + spin_unlock_irqrestore(&ioapic_lock, flags); + + /* + * Sanity check + */ + spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(apic_id, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) + printk("could not set ID!\n"); + else + apic_printk(APIC_VERBOSE, " ok.\n"); + } +} +#endif + +int no_timer_check __initdata; + +static int __init notimercheck(char *s) +{ + no_timer_check = 1; + return 1; +} +__setup("no_timer_check", notimercheck); + +/* + * There is a nasty bug in some older SMP boards, their mptable lies + * about the timer IRQ. We do the following to work around the situation: + * + * - timer IRQ defaults to IO-APIC IRQ + * - if this function detects that timer IRQs are defunct, then we fall + * back to ISA timer IRQs + */ +static int __init timer_irq_works(void) +{ + unsigned long t1 = jiffies; + unsigned long flags; + + if (no_timer_check) + return 1; + + local_save_flags(flags); + local_irq_enable(); + /* Let ten ticks pass... */ + mdelay((10 * 1000) / HZ); + local_irq_restore(flags); + + /* + * Expect a few ticks at least, to be sure some possible + * glue logic does not lock up after one or two first + * ticks in a non-ExtINT mode. Also the local APIC + * might have cached one ExtINT interrupt. Finally, at + * least one tick may be lost due to delays. + */ + + /* jiffies wrap? */ + if (time_after(jiffies, t1 + 4)) + return 1; + return 0; +} + +/* + * In the SMP+IOAPIC case it might happen that there are an unspecified + * number of pending IRQ events unhandled. These cases are very rare, + * so we 'resend' these IRQs via IPIs, to the same CPU. It's much + * better to do it this way as thus we do not have to be aware of + * 'pending' interrupts in the IRQ path, except at this point. + */ +/* + * Edge triggered needs to resend any interrupt + * that was delayed but this is now handled in the device + * independent code. + */ + +/* + * Starting up a edge-triggered IO-APIC interrupt is + * nasty - we need to make sure that we get the edge. + * If it is already asserted for some reason, we need + * return 1 to indicate that is was pending. + * + * This is not complete - we should be able to fake + * an edge even if it isn't on the 8259A... + */ + +static unsigned int startup_ioapic_irq(unsigned int irq) +{ + int was_pending = 0; + unsigned long flags; + struct irq_cfg *cfg; + + spin_lock_irqsave(&ioapic_lock, flags); + if (irq < NR_IRQS_LEGACY) { + disable_8259A_irq(irq); + if (i8259A_irq_pending(irq)) + was_pending = 1; + } + cfg = irq_cfg(irq); + __unmask_IO_APIC_irq(cfg); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return was_pending; +} + +#ifdef CONFIG_X86_64 +static int ioapic_retrigger_irq(unsigned int irq) +{ + + struct irq_cfg *cfg = irq_cfg(irq); + unsigned long flags; + + spin_lock_irqsave(&vector_lock, flags); + apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); + spin_unlock_irqrestore(&vector_lock, flags); + + return 1; +} +#else +static int ioapic_retrigger_irq(unsigned int irq) +{ + apic->send_IPI_self(irq_cfg(irq)->vector); + + return 1; +} +#endif + +/* + * Level and edge triggered IO-APIC interrupts need different handling, + * so we use two separate IRQ descriptors. Edge triggered IRQs can be + * handled with the level-triggered descriptor, but that one has slightly + * more overhead. Level-triggered interrupts cannot be handled with the + * edge-triggered handler, without risking IRQ storms and other ugly + * races. + */ + +#ifdef CONFIG_SMP + +#ifdef CONFIG_INTR_REMAP +static void ir_irq_migration(struct work_struct *work); + +static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); + +/* + * Migrate the IO-APIC irq in the presence of intr-remapping. + * + * For edge triggered, irq migration is a simple atomic update(of vector + * and cpu destination) of IRTE and flush the hardware cache. + * + * For level triggered, we need to modify the io-apic RTE aswell with the update + * vector information, along with modifying IRTE with vector and destination. + * So irq migration for level triggered is little bit more complex compared to + * edge triggered migration. But the good news is, we use the same algorithm + * for level triggered migration as we have today, only difference being, + * we now initiate the irq migration from process context instead of the + * interrupt context. + * + * In future, when we do a directed EOI (combined with cpu EOI broadcast + * suppression) to the IO-APIC, level triggered irq migration will also be + * as simple as edge triggered migration and we can do the irq migration + * with a simple atomic update to IO-APIC RTE. + */ +static void +migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) +{ + struct irq_cfg *cfg; + struct irte irte; + int modify_ioapic_rte; + unsigned int dest; + unsigned long flags; + unsigned int irq; + + if (!cpumask_intersects(mask, cpu_online_mask)) + return; + + irq = desc->irq; + if (get_irte(irq, &irte)) + return; + + cfg = desc->chip_data; + if (assign_irq_vector(irq, cfg, mask)) + return; + + set_extra_move_desc(desc, mask); + + dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); + + modify_ioapic_rte = desc->status & IRQ_LEVEL; + if (modify_ioapic_rte) { + spin_lock_irqsave(&ioapic_lock, flags); + __target_IO_APIC_irq(irq, dest, cfg); + spin_unlock_irqrestore(&ioapic_lock, flags); + } + + irte.vector = cfg->vector; + irte.dest_id = IRTE_DEST(dest); + + /* + * Modified the IRTE and flushes the Interrupt entry cache. + */ + modify_irte(irq, &irte); + + if (cfg->move_in_progress) + send_cleanup_vector(cfg); + + cpumask_copy(desc->affinity, mask); +} + +static int migrate_irq_remapped_level_desc(struct irq_desc *desc) +{ + int ret = -1; + struct irq_cfg *cfg = desc->chip_data; + + mask_IO_APIC_irq_desc(desc); + + if (io_apic_level_ack_pending(cfg)) { + /* + * Interrupt in progress. Migrating irq now will change the + * vector information in the IO-APIC RTE and that will confuse + * the EOI broadcast performed by cpu. + * So, delay the irq migration to the next instance. + */ + schedule_delayed_work(&ir_migration_work, 1); + goto unmask; + } + + /* everthing is clear. we have right of way */ + migrate_ioapic_irq_desc(desc, desc->pending_mask); + + ret = 0; + desc->status &= ~IRQ_MOVE_PENDING; + cpumask_clear(desc->pending_mask); + +unmask: + unmask_IO_APIC_irq_desc(desc); + + return ret; +} + +static void ir_irq_migration(struct work_struct *work) +{ + unsigned int irq; + struct irq_desc *desc; + + for_each_irq_desc(irq, desc) { + if (desc->status & IRQ_MOVE_PENDING) { + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + if (!desc->chip->set_affinity || + !(desc->status & IRQ_MOVE_PENDING)) { + desc->status &= ~IRQ_MOVE_PENDING; + spin_unlock_irqrestore(&desc->lock, flags); + continue; + } + + desc->chip->set_affinity(irq, desc->pending_mask); + spin_unlock_irqrestore(&desc->lock, flags); + } + } +} + +/* + * Migrates the IRQ destination in the process context. + */ +static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, + const struct cpumask *mask) +{ + if (desc->status & IRQ_LEVEL) { + desc->status |= IRQ_MOVE_PENDING; + cpumask_copy(desc->pending_mask, mask); + migrate_irq_remapped_level_desc(desc); + return; + } + + migrate_ioapic_irq_desc(desc, mask); +} +static void set_ir_ioapic_affinity_irq(unsigned int irq, + const struct cpumask *mask) +{ + struct irq_desc *desc = irq_to_desc(irq); + + set_ir_ioapic_affinity_irq_desc(desc, mask); +} +#endif + +asmlinkage void smp_irq_move_cleanup_interrupt(void) +{ + unsigned vector, me; + + ack_APIC_irq(); + exit_idle(); + irq_enter(); + + me = smp_processor_id(); + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + unsigned int irq; + struct irq_desc *desc; + struct irq_cfg *cfg; + irq = __get_cpu_var(vector_irq)[vector]; + + if (irq == -1) + continue; + + desc = irq_to_desc(irq); + if (!desc) + continue; + + cfg = irq_cfg(irq); + spin_lock(&desc->lock); + if (!cfg->move_cleanup_count) + goto unlock; + + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) + goto unlock; + + __get_cpu_var(vector_irq)[vector] = -1; + cfg->move_cleanup_count--; +unlock: + spin_unlock(&desc->lock); + } + + irq_exit(); +} + +static void irq_complete_move(struct irq_desc **descp) +{ + struct irq_desc *desc = *descp; + struct irq_cfg *cfg = desc->chip_data; + unsigned vector, me; + + if (likely(!cfg->move_in_progress)) { +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + if (likely(!cfg->move_desc_pending)) + return; + + /* domain has not changed, but affinity did */ + me = smp_processor_id(); + if (cpumask_test_cpu(me, desc->affinity)) { + *descp = desc = move_irq_desc(desc, me); + /* get the new one */ + cfg = desc->chip_data; + cfg->move_desc_pending = 0; + } +#endif + return; + } + + vector = ~get_irq_regs()->orig_ax; + me = smp_processor_id(); + + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + *descp = desc = move_irq_desc(desc, me); + /* get the new one */ + cfg = desc->chip_data; +#endif + send_cleanup_vector(cfg); + } +} +#else +static inline void irq_complete_move(struct irq_desc **descp) {} +#endif + +#ifdef CONFIG_INTR_REMAP +static void ack_x2apic_level(unsigned int irq) +{ + ack_x2APIC_irq(); +} + +static void ack_x2apic_edge(unsigned int irq) +{ + ack_x2APIC_irq(); +} + +#endif + +static void ack_apic_edge(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + irq_complete_move(&desc); + move_native_irq(irq); + ack_APIC_irq(); +} + +atomic_t irq_mis_count; + +static void ack_apic_level(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + +#ifdef CONFIG_X86_32 + unsigned long v; + int i; +#endif + struct irq_cfg *cfg; + int do_unmask_irq = 0; + + irq_complete_move(&desc); +#ifdef CONFIG_GENERIC_PENDING_IRQ + /* If we are moving the irq we need to mask it */ + if (unlikely(desc->status & IRQ_MOVE_PENDING)) { + do_unmask_irq = 1; + mask_IO_APIC_irq_desc(desc); + } +#endif + +#ifdef CONFIG_X86_32 + /* + * It appears there is an erratum which affects at least version 0x11 + * of I/O APIC (that's the 82093AA and cores integrated into various + * chipsets). Under certain conditions a level-triggered interrupt is + * erroneously delivered as edge-triggered one but the respective IRR + * bit gets set nevertheless. As a result the I/O unit expects an EOI + * message but it will never arrive and further interrupts are blocked + * from the source. The exact reason is so far unknown, but the + * phenomenon was observed when two consecutive interrupt requests + * from a given source get delivered to the same CPU and the source is + * temporarily disabled in between. + * + * A workaround is to simulate an EOI message manually. We achieve it + * by setting the trigger mode to edge and then to level when the edge + * trigger mode gets detected in the TMR of a local APIC for a + * level-triggered interrupt. We mask the source for the time of the + * operation to prevent an edge-triggered interrupt escaping meanwhile. + * The idea is from Manfred Spraul. --macro + */ + cfg = desc->chip_data; + i = cfg->vector; + + v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); +#endif + + /* + * We must acknowledge the irq before we move it or the acknowledge will + * not propagate properly. + */ + ack_APIC_irq(); + + /* Now we can move and renable the irq */ + if (unlikely(do_unmask_irq)) { + /* Only migrate the irq if the ack has been received. + * + * On rare occasions the broadcast level triggered ack gets + * delayed going to ioapics, and if we reprogram the + * vector while Remote IRR is still set the irq will never + * fire again. + * + * To prevent this scenario we read the Remote IRR bit + * of the ioapic. This has two effects. + * - On any sane system the read of the ioapic will + * flush writes (and acks) going to the ioapic from + * this cpu. + * - We get to see if the ACK has actually been delivered. + * + * Based on failed experiments of reprogramming the + * ioapic entry from outside of irq context starting + * with masking the ioapic entry and then polling until + * Remote IRR was clear before reprogramming the + * ioapic I don't trust the Remote IRR bit to be + * completey accurate. + * + * However there appears to be no other way to plug + * this race, so if the Remote IRR bit is not + * accurate and is causing problems then it is a hardware bug + * and you can go talk to the chipset vendor about it. + */ + cfg = desc->chip_data; + if (!io_apic_level_ack_pending(cfg)) + move_masked_irq(irq); + unmask_IO_APIC_irq_desc(desc); + } + +#ifdef CONFIG_X86_32 + if (!(v & (1 << (i & 0x1f)))) { + atomic_inc(&irq_mis_count); + spin_lock(&ioapic_lock); + __mask_and_edge_IO_APIC_irq(cfg); + __unmask_and_level_IO_APIC_irq(cfg); + spin_unlock(&ioapic_lock); + } +#endif +} + +static struct irq_chip ioapic_chip __read_mostly = { + .name = "IO-APIC", + .startup = startup_ioapic_irq, + .mask = mask_IO_APIC_irq, + .unmask = unmask_IO_APIC_irq, + .ack = ack_apic_edge, + .eoi = ack_apic_level, +#ifdef CONFIG_SMP + .set_affinity = set_ioapic_affinity_irq, +#endif + .retrigger = ioapic_retrigger_irq, +}; + +#ifdef CONFIG_INTR_REMAP +static struct irq_chip ir_ioapic_chip __read_mostly = { + .name = "IR-IO-APIC", + .startup = startup_ioapic_irq, + .mask = mask_IO_APIC_irq, + .unmask = unmask_IO_APIC_irq, + .ack = ack_x2apic_edge, + .eoi = ack_x2apic_level, +#ifdef CONFIG_SMP + .set_affinity = set_ir_ioapic_affinity_irq, +#endif + .retrigger = ioapic_retrigger_irq, +}; +#endif + +static inline void init_IO_APIC_traps(void) +{ + int irq; + struct irq_desc *desc; + struct irq_cfg *cfg; + + /* + * NOTE! The local APIC isn't very good at handling + * multiple interrupts at the same interrupt level. + * As the interrupt level is determined by taking the + * vector number and shifting that right by 4, we + * want to spread these out a bit so that they don't + * all fall in the same interrupt level. + * + * Also, we've got to be careful not to trash gate + * 0x80, because int 0x80 is hm, kind of importantish. ;) + */ + for_each_irq_desc(irq, desc) { + cfg = desc->chip_data; + if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { + /* + * Hmm.. We don't have an entry for this, + * so default to an old-fashioned 8259 + * interrupt if we can.. + */ + if (irq < NR_IRQS_LEGACY) + make_8259A_irq(irq); + else + /* Strange. Oh, well.. */ + desc->chip = &no_irq_chip; + } + } +} + +/* + * The local APIC irq-chip implementation: + */ + +static void mask_lapic_irq(unsigned int irq) +{ + unsigned long v; + + v = apic_read(APIC_LVT0); + apic_write(APIC_LVT0, v | APIC_LVT_MASKED); +} + +static void unmask_lapic_irq(unsigned int irq) +{ + unsigned long v; + + v = apic_read(APIC_LVT0); + apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); +} + +static void ack_lapic_irq(unsigned int irq) +{ + ack_APIC_irq(); +} + +static struct irq_chip lapic_chip __read_mostly = { + .name = "local-APIC", + .mask = mask_lapic_irq, + .unmask = unmask_lapic_irq, + .ack = ack_lapic_irq, +}; + +static void lapic_register_intr(int irq, struct irq_desc *desc) +{ + desc->status &= ~IRQ_LEVEL; + set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, + "edge"); +} + +static void __init setup_nmi(void) +{ + /* + * Dirty trick to enable the NMI watchdog ... + * We put the 8259A master into AEOI mode and + * unmask on all local APICs LVT0 as NMI. + * + * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') + * is from Maciej W. Rozycki - so we do not have to EOI from + * the NMI handler or the timer interrupt. + */ + apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); + + enable_NMI_through_LVT0(); + + apic_printk(APIC_VERBOSE, " done.\n"); +} + +/* + * This looks a bit hackish but it's about the only one way of sending + * a few INTA cycles to 8259As and any associated glue logic. ICR does + * not support the ExtINT mode, unfortunately. We need to send these + * cycles as some i82489DX-based boards have glue logic that keeps the + * 8259A interrupt line asserted until INTA. --macro + */ +static inline void __init unlock_ExtINT_logic(void) +{ + int apic, pin, i; + struct IO_APIC_route_entry entry0, entry1; + unsigned char save_control, save_freq_select; + + pin = find_isa_irq_pin(8, mp_INT); + if (pin == -1) { + WARN_ON_ONCE(1); + return; + } + apic = find_isa_irq_apic(8, mp_INT); + if (apic == -1) { + WARN_ON_ONCE(1); + return; + } + + entry0 = ioapic_read_entry(apic, pin); + clear_IO_APIC_pin(apic, pin); + + memset(&entry1, 0, sizeof(entry1)); + + entry1.dest_mode = 0; /* physical delivery */ + entry1.mask = 0; /* unmask IRQ now */ + entry1.dest = hard_smp_processor_id(); + entry1.delivery_mode = dest_ExtINT; + entry1.polarity = entry0.polarity; + entry1.trigger = 0; + entry1.vector = 0; + + ioapic_write_entry(apic, pin, entry1); + + save_control = CMOS_READ(RTC_CONTROL); + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, + RTC_FREQ_SELECT); + CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); + + i = 100; + while (i-- > 0) { + mdelay(10); + if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) + i -= 10; + } + + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + clear_IO_APIC_pin(apic, pin); + + ioapic_write_entry(apic, pin, entry0); +} + +static int disable_timer_pin_1 __initdata; +/* Actually the next is obsolete, but keep it for paranoid reasons -AK */ +static int __init disable_timer_pin_setup(char *arg) +{ + disable_timer_pin_1 = 1; + return 0; +} +early_param("disable_timer_pin_1", disable_timer_pin_setup); + +int timer_through_8259 __initdata; + +/* + * This code may look a bit paranoid, but it's supposed to cooperate with + * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ + * is so screwy. Thanks to Brian Perkins for testing/hacking this beast + * fanatically on his truly buggy board. + * + * FIXME: really need to revamp this for all platforms. + */ +static inline void __init check_timer(void) +{ + struct irq_desc *desc = irq_to_desc(0); + struct irq_cfg *cfg = desc->chip_data; + int cpu = boot_cpu_id; + int apic1, pin1, apic2, pin2; + unsigned long flags; + int no_pin1 = 0; + + local_irq_save(flags); + + /* + * get/set the timer IRQ vector: + */ + disable_8259A_irq(0); + assign_irq_vector(0, cfg, apic->target_cpus()); + + /* + * As IRQ0 is to be enabled in the 8259A, the virtual + * wire has to be disabled in the local APIC. Also + * timer interrupts need to be acknowledged manually in + * the 8259A for the i82489DX when using the NMI + * watchdog as that APIC treats NMIs as level-triggered. + * The AEOI mode will finish them in the 8259A + * automatically. + */ + apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); + init_8259A(1); +#ifdef CONFIG_X86_32 + { + unsigned int ver; + + ver = apic_read(APIC_LVR); + ver = GET_APIC_VERSION(ver); + timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); + } +#endif + + pin1 = find_isa_irq_pin(0, mp_INT); + apic1 = find_isa_irq_apic(0, mp_INT); + pin2 = ioapic_i8259.pin; + apic2 = ioapic_i8259.apic; + + apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " + "apic1=%d pin1=%d apic2=%d pin2=%d\n", + cfg->vector, apic1, pin1, apic2, pin2); + + /* + * Some BIOS writers are clueless and report the ExtINTA + * I/O APIC input from the cascaded 8259A as the timer + * interrupt input. So just in case, if only one pin + * was found above, try it both directly and through the + * 8259A. + */ + if (pin1 == -1) { +#ifdef CONFIG_INTR_REMAP + if (intr_remapping_enabled) + panic("BIOS bug: timer not connected to IO-APIC"); +#endif + pin1 = pin2; + apic1 = apic2; + no_pin1 = 1; + } else if (pin2 == -1) { + pin2 = pin1; + apic2 = apic1; + } + + if (pin1 != -1) { + /* + * Ok, does IRQ0 through the IOAPIC work? + */ + if (no_pin1) { + add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); + setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); + } else { + /* for edge trigger, setup_IO_APIC_irq already + * leave it unmasked. + * so only need to unmask if it is level-trigger + * do we really have level trigger timer? + */ + int idx; + idx = find_irq_entry(apic1, pin1, mp_INT); + if (idx != -1 && irq_trigger(idx)) + unmask_IO_APIC_irq_desc(desc); + } + if (timer_irq_works()) { + if (nmi_watchdog == NMI_IO_APIC) { + setup_nmi(); + enable_8259A_irq(0); + } + if (disable_timer_pin_1 > 0) + clear_IO_APIC_pin(0, pin1); + goto out; + } +#ifdef CONFIG_INTR_REMAP + if (intr_remapping_enabled) + panic("timer doesn't work through Interrupt-remapped IO-APIC"); +#endif + local_irq_disable(); + clear_IO_APIC_pin(apic1, pin1); + if (!no_pin1) + apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " + "8254 timer not connected to IO-APIC\n"); + + apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " + "(IRQ0) through the 8259A ...\n"); + apic_printk(APIC_QUIET, KERN_INFO + "..... (found apic %d pin %d) ...\n", apic2, pin2); + /* + * legacy devices should be connected to IO APIC #0 + */ + replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); + setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); + enable_8259A_irq(0); + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); + timer_through_8259 = 1; + if (nmi_watchdog == NMI_IO_APIC) { + disable_8259A_irq(0); + setup_nmi(); + enable_8259A_irq(0); + } + goto out; + } + /* + * Cleanup, just in case ... + */ + local_irq_disable(); + disable_8259A_irq(0); + clear_IO_APIC_pin(apic2, pin2); + apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); + } + + if (nmi_watchdog == NMI_IO_APIC) { + apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work " + "through the IO-APIC - disabling NMI Watchdog!\n"); + nmi_watchdog = NMI_NONE; + } +#ifdef CONFIG_X86_32 + timer_ack = 0; +#endif + + apic_printk(APIC_QUIET, KERN_INFO + "...trying to set up timer as Virtual Wire IRQ...\n"); + + lapic_register_intr(0, desc); + apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ + enable_8259A_irq(0); + + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); + goto out; + } + local_irq_disable(); + disable_8259A_irq(0); + apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); + apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); + + apic_printk(APIC_QUIET, KERN_INFO + "...trying to set up timer as ExtINT IRQ...\n"); + + init_8259A(0); + make_8259A_irq(0); + apic_write(APIC_LVT0, APIC_DM_EXTINT); + + unlock_ExtINT_logic(); + + if (timer_irq_works()) { + apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); + goto out; + } + local_irq_disable(); + apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); + panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " + "report. Then try booting with the 'noapic' option.\n"); +out: + local_irq_restore(flags); +} + +/* + * Traditionally ISA IRQ2 is the cascade IRQ, and is not available + * to devices. However there may be an I/O APIC pin available for + * this interrupt regardless. The pin may be left unconnected, but + * typically it will be reused as an ExtINT cascade interrupt for + * the master 8259A. In the MPS case such a pin will normally be + * reported as an ExtINT interrupt in the MP table. With ACPI + * there is no provision for ExtINT interrupts, and in the absence + * of an override it would be treated as an ordinary ISA I/O APIC + * interrupt, that is edge-triggered and unmasked by default. We + * used to do this, but it caused problems on some systems because + * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using + * the same ExtINT cascade interrupt to drive the local APIC of the + * bootstrap processor. Therefore we refrain from routing IRQ2 to + * the I/O APIC in all cases now. No actual device should request + * it anyway. --macro + */ +#define PIC_IRQS (1 << PIC_CASCADE_IR) + +void __init setup_IO_APIC(void) +{ + + /* + * calling enable_IO_APIC() is moved to setup_local_APIC for BP + */ + + io_apic_irqs = ~PIC_IRQS; + + apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); + /* + * Set up IO-APIC IRQ routing. + */ +#ifdef CONFIG_X86_32 + if (!acpi_ioapic) + setup_ioapic_ids_from_mpc(); +#endif + sync_Arb_IDs(); + setup_IO_APIC_irqs(); + init_IO_APIC_traps(); + check_timer(); +} + +/* + * Called after all the initialization is done. If we didnt find any + * APIC bugs then we can allow the modify fast path + */ + +static int __init io_apic_bug_finalize(void) +{ + if (sis_apic_bug == -1) + sis_apic_bug = 0; + return 0; +} + +late_initcall(io_apic_bug_finalize); + +struct sysfs_ioapic_data { + struct sys_device dev; + struct IO_APIC_route_entry entry[0]; +}; +static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; + +static int ioapic_suspend(struct sys_device *dev, pm_message_t state) +{ + struct IO_APIC_route_entry *entry; + struct sysfs_ioapic_data *data; + int i; + + data = container_of(dev, struct sysfs_ioapic_data, dev); + entry = data->entry; + for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) + *entry = ioapic_read_entry(dev->id, i); + + return 0; +} + +static int ioapic_resume(struct sys_device *dev) +{ + struct IO_APIC_route_entry *entry; + struct sysfs_ioapic_data *data; + unsigned long flags; + union IO_APIC_reg_00 reg_00; + int i; + + data = container_of(dev, struct sysfs_ioapic_data, dev); + entry = data->entry; + + spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(dev->id, 0); + if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { + reg_00.bits.ID = mp_ioapics[dev->id].apicid; + io_apic_write(dev->id, 0, reg_00.raw); + } + spin_unlock_irqrestore(&ioapic_lock, flags); + for (i = 0; i < nr_ioapic_registers[dev->id]; i++) + ioapic_write_entry(dev->id, i, entry[i]); + + return 0; +} + +static struct sysdev_class ioapic_sysdev_class = { + .name = "ioapic", + .suspend = ioapic_suspend, + .resume = ioapic_resume, +}; + +static int __init ioapic_init_sysfs(void) +{ + struct sys_device * dev; + int i, size, error; + + error = sysdev_class_register(&ioapic_sysdev_class); + if (error) + return error; + + for (i = 0; i < nr_ioapics; i++ ) { + size = sizeof(struct sys_device) + nr_ioapic_registers[i] + * sizeof(struct IO_APIC_route_entry); + mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); + if (!mp_ioapic_data[i]) { + printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); + continue; + } + dev = &mp_ioapic_data[i]->dev; + dev->id = i; + dev->cls = &ioapic_sysdev_class; + error = sysdev_register(dev); + if (error) { + kfree(mp_ioapic_data[i]); + mp_ioapic_data[i] = NULL; + printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); + continue; + } + } + + return 0; +} + +device_initcall(ioapic_init_sysfs); + +static int nr_irqs_gsi = NR_IRQS_LEGACY; +/* + * Dynamic irq allocate and deallocation + */ +unsigned int create_irq_nr(unsigned int irq_want) +{ + /* Allocate an unused irq */ + unsigned int irq; + unsigned int new; + unsigned long flags; + struct irq_cfg *cfg_new = NULL; + int cpu = boot_cpu_id; + struct irq_desc *desc_new = NULL; + + irq = 0; + if (irq_want < nr_irqs_gsi) + irq_want = nr_irqs_gsi; + + spin_lock_irqsave(&vector_lock, flags); + for (new = irq_want; new < nr_irqs; new++) { + desc_new = irq_to_desc_alloc_cpu(new, cpu); + if (!desc_new) { + printk(KERN_INFO "can not get irq_desc for %d\n", new); + continue; + } + cfg_new = desc_new->chip_data; + + if (cfg_new->vector != 0) + continue; + if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) + irq = new; + break; + } + spin_unlock_irqrestore(&vector_lock, flags); + + if (irq > 0) { + dynamic_irq_init(irq); + /* restore it, in case dynamic_irq_init clear it */ + if (desc_new) + desc_new->chip_data = cfg_new; + } + return irq; +} + +int create_irq(void) +{ + unsigned int irq_want; + int irq; + + irq_want = nr_irqs_gsi; + irq = create_irq_nr(irq_want); + + if (irq == 0) + irq = -1; + + return irq; +} + +void destroy_irq(unsigned int irq) +{ + unsigned long flags; + struct irq_cfg *cfg; + struct irq_desc *desc; + + /* store it, in case dynamic_irq_cleanup clear it */ + desc = irq_to_desc(irq); + cfg = desc->chip_data; + dynamic_irq_cleanup(irq); + /* connect back irq_cfg */ + if (desc) + desc->chip_data = cfg; + +#ifdef CONFIG_INTR_REMAP + free_irte(irq); +#endif + spin_lock_irqsave(&vector_lock, flags); + __clear_irq_vector(irq, cfg); + spin_unlock_irqrestore(&vector_lock, flags); +} + +/* + * MSI message composition + */ +#ifdef CONFIG_PCI_MSI +static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) +{ + struct irq_cfg *cfg; + int err; + unsigned dest; + + if (disable_apic) + return -ENXIO; + + cfg = irq_cfg(irq); + err = assign_irq_vector(irq, cfg, apic->target_cpus()); + if (err) + return err; + + dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); + +#ifdef CONFIG_INTR_REMAP + if (irq_remapped(irq)) { + struct irte irte; + int ir_index; + u16 sub_handle; + + ir_index = map_irq_to_irte_handle(irq, &sub_handle); + BUG_ON(ir_index == -1); + + memset (&irte, 0, sizeof(irte)); + + irte.present = 1; + irte.dst_mode = apic->irq_dest_mode; + irte.trigger_mode = 0; /* edge */ + irte.dlvry_mode = apic->irq_delivery_mode; + irte.vector = cfg->vector; + irte.dest_id = IRTE_DEST(dest); + + modify_irte(irq, &irte); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->data = sub_handle; + msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | + MSI_ADDR_IR_SHV | + MSI_ADDR_IR_INDEX1(ir_index) | + MSI_ADDR_IR_INDEX2(ir_index); + } else +#endif + { + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = + MSI_ADDR_BASE_LO | + ((apic->irq_dest_mode == 0) ? + MSI_ADDR_DEST_MODE_PHYSICAL: + MSI_ADDR_DEST_MODE_LOGICAL) | + ((apic->irq_delivery_mode != dest_LowestPrio) ? + MSI_ADDR_REDIRECTION_CPU: + MSI_ADDR_REDIRECTION_LOWPRI) | + MSI_ADDR_DEST_ID(dest); + + msg->data = + MSI_DATA_TRIGGER_EDGE | + MSI_DATA_LEVEL_ASSERT | + ((apic->irq_delivery_mode != dest_LowestPrio) ? + MSI_DATA_DELIVERY_FIXED: + MSI_DATA_DELIVERY_LOWPRI) | + MSI_DATA_VECTOR(cfg->vector); + } + return err; +} + +#ifdef CONFIG_SMP +static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irq_cfg *cfg; + struct msi_msg msg; + unsigned int dest; + + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) + return; + + cfg = desc->chip_data; + + read_msi_msg_desc(desc, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + + write_msi_msg_desc(desc, &msg); +} +#ifdef CONFIG_INTR_REMAP +/* + * Migrate the MSI irq to another cpumask. This migration is + * done in the process context using interrupt-remapping hardware. + */ +static void +ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irq_cfg *cfg = desc->chip_data; + unsigned int dest; + struct irte irte; + + if (get_irte(irq, &irte)) + return; + + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) + return; + + irte.vector = cfg->vector; + irte.dest_id = IRTE_DEST(dest); + + /* + * atomically update the IRTE with the new destination and vector. + */ + modify_irte(irq, &irte); + + /* + * After this point, all the interrupts will start arriving + * at the new destination. So, time to cleanup the previous + * vector allocation. + */ + if (cfg->move_in_progress) + send_cleanup_vector(cfg); +} + +#endif +#endif /* CONFIG_SMP */ + +/* + * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, + * which implement the MSI or MSI-X Capability Structure. + */ +static struct irq_chip msi_chip = { + .name = "PCI-MSI", + .unmask = unmask_msi_irq, + .mask = mask_msi_irq, + .ack = ack_apic_edge, +#ifdef CONFIG_SMP + .set_affinity = set_msi_irq_affinity, +#endif + .retrigger = ioapic_retrigger_irq, +}; + +#ifdef CONFIG_INTR_REMAP +static struct irq_chip msi_ir_chip = { + .name = "IR-PCI-MSI", + .unmask = unmask_msi_irq, + .mask = mask_msi_irq, + .ack = ack_x2apic_edge, +#ifdef CONFIG_SMP + .set_affinity = ir_set_msi_irq_affinity, +#endif + .retrigger = ioapic_retrigger_irq, +}; + +/* + * Map the PCI dev to the corresponding remapping hardware unit + * and allocate 'nvec' consecutive interrupt-remapping table entries + * in it. + */ +static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) +{ + struct intel_iommu *iommu; + int index; + + iommu = map_dev_to_ir(dev); + if (!iommu) { + printk(KERN_ERR + "Unable to map PCI %s to iommu\n", pci_name(dev)); + return -ENOENT; + } + + index = alloc_irte(iommu, irq, nvec); + if (index < 0) { + printk(KERN_ERR + "Unable to allocate %d IRTE for PCI %s\n", nvec, + pci_name(dev)); + return -ENOSPC; + } + return index; +} +#endif + +static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) +{ + int ret; + struct msi_msg msg; + + ret = msi_compose_msg(dev, irq, &msg); + if (ret < 0) + return ret; + + set_irq_msi(irq, msidesc); + write_msi_msg(irq, &msg); + +#ifdef CONFIG_INTR_REMAP + if (irq_remapped(irq)) { + struct irq_desc *desc = irq_to_desc(irq); + /* + * irq migration in process context + */ + desc->status |= IRQ_MOVE_PCNTXT; + set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); + } else +#endif + set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); + + dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); + + return 0; +} + +int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + unsigned int irq; + int ret, sub_handle; + struct msi_desc *msidesc; + unsigned int irq_want; + +#ifdef CONFIG_INTR_REMAP + struct intel_iommu *iommu = 0; + int index = 0; +#endif + + irq_want = nr_irqs_gsi; + sub_handle = 0; + list_for_each_entry(msidesc, &dev->msi_list, list) { + irq = create_irq_nr(irq_want); + if (irq == 0) + return -1; + irq_want = irq + 1; +#ifdef CONFIG_INTR_REMAP + if (!intr_remapping_enabled) + goto no_ir; + + if (!sub_handle) { + /* + * allocate the consecutive block of IRTE's + * for 'nvec' + */ + index = msi_alloc_irte(dev, irq, nvec); + if (index < 0) { + ret = index; + goto error; + } + } else { + iommu = map_dev_to_ir(dev); + if (!iommu) { + ret = -ENOENT; + goto error; + } + /* + * setup the mapping between the irq and the IRTE + * base index, the sub_handle pointing to the + * appropriate interrupt remap table entry. + */ + set_irte_irq(irq, iommu, index, sub_handle); + } +no_ir: +#endif + ret = setup_msi_irq(dev, msidesc, irq); + if (ret < 0) + goto error; + sub_handle++; + } + return 0; + +error: + destroy_irq(irq); + return ret; +} + +void arch_teardown_msi_irq(unsigned int irq) +{ + destroy_irq(irq); +} + +#ifdef CONFIG_DMAR +#ifdef CONFIG_SMP +static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irq_cfg *cfg; + struct msi_msg msg; + unsigned int dest; + + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) + return; + + cfg = desc->chip_data; + + dmar_msi_read(irq, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + + dmar_msi_write(irq, &msg); +} + +#endif /* CONFIG_SMP */ + +struct irq_chip dmar_msi_type = { + .name = "DMAR_MSI", + .unmask = dmar_msi_unmask, + .mask = dmar_msi_mask, + .ack = ack_apic_edge, +#ifdef CONFIG_SMP + .set_affinity = dmar_msi_set_affinity, +#endif + .retrigger = ioapic_retrigger_irq, +}; + +int arch_setup_dmar_msi(unsigned int irq) +{ + int ret; + struct msi_msg msg; + + ret = msi_compose_msg(NULL, irq, &msg); + if (ret < 0) + return ret; + dmar_msi_write(irq, &msg); + set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, + "edge"); + return 0; +} +#endif + +#ifdef CONFIG_HPET_TIMER + +#ifdef CONFIG_SMP +static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irq_cfg *cfg; + struct msi_msg msg; + unsigned int dest; + + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) + return; + + cfg = desc->chip_data; + + hpet_msi_read(irq, &msg); + + msg.data &= ~MSI_DATA_VECTOR_MASK; + msg.data |= MSI_DATA_VECTOR(cfg->vector); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + + hpet_msi_write(irq, &msg); +} + +#endif /* CONFIG_SMP */ + +struct irq_chip hpet_msi_type = { + .name = "HPET_MSI", + .unmask = hpet_msi_unmask, + .mask = hpet_msi_mask, + .ack = ack_apic_edge, +#ifdef CONFIG_SMP + .set_affinity = hpet_msi_set_affinity, +#endif + .retrigger = ioapic_retrigger_irq, +}; + +int arch_setup_hpet_msi(unsigned int irq) +{ + int ret; + struct msi_msg msg; + + ret = msi_compose_msg(NULL, irq, &msg); + if (ret < 0) + return ret; + + hpet_msi_write(irq, &msg); + set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, + "edge"); + + return 0; +} +#endif + +#endif /* CONFIG_PCI_MSI */ +/* + * Hypertransport interrupt support + */ +#ifdef CONFIG_HT_IRQ + +#ifdef CONFIG_SMP + +static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) +{ + struct ht_irq_msg msg; + fetch_ht_irq_msg(irq, &msg); + + msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); + msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); + + msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); + msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); + + write_ht_irq_msg(irq, &msg); +} + +static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irq_cfg *cfg; + unsigned int dest; + + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) + return; + + cfg = desc->chip_data; + + target_ht_irq(irq, dest, cfg->vector); +} + +#endif + +static struct irq_chip ht_irq_chip = { + .name = "PCI-HT", + .mask = mask_ht_irq, + .unmask = unmask_ht_irq, + .ack = ack_apic_edge, +#ifdef CONFIG_SMP + .set_affinity = set_ht_irq_affinity, +#endif + .retrigger = ioapic_retrigger_irq, +}; + +int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) +{ + struct irq_cfg *cfg; + int err; + + if (disable_apic) + return -ENXIO; + + cfg = irq_cfg(irq); + err = assign_irq_vector(irq, cfg, apic->target_cpus()); + if (!err) { + struct ht_irq_msg msg; + unsigned dest; + + dest = apic->cpu_mask_to_apicid_and(cfg->domain, + apic->target_cpus()); + + msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); + + msg.address_lo = + HT_IRQ_LOW_BASE | + HT_IRQ_LOW_DEST_ID(dest) | + HT_IRQ_LOW_VECTOR(cfg->vector) | + ((apic->irq_dest_mode == 0) ? + HT_IRQ_LOW_DM_PHYSICAL : + HT_IRQ_LOW_DM_LOGICAL) | + HT_IRQ_LOW_RQEOI_EDGE | + ((apic->irq_delivery_mode != dest_LowestPrio) ? + HT_IRQ_LOW_MT_FIXED : + HT_IRQ_LOW_MT_ARBITRATED) | + HT_IRQ_LOW_IRQ_MASKED; + + write_ht_irq_msg(irq, &msg); + + set_irq_chip_and_handler_name(irq, &ht_irq_chip, + handle_edge_irq, "edge"); + + dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); + } + return err; +} +#endif /* CONFIG_HT_IRQ */ + +#ifdef CONFIG_X86_UV +/* + * Re-target the irq to the specified CPU and enable the specified MMR located + * on the specified blade to allow the sending of MSIs to the specified CPU. + */ +int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, + unsigned long mmr_offset) +{ + const struct cpumask *eligible_cpu = cpumask_of(cpu); + struct irq_cfg *cfg; + int mmr_pnode; + unsigned long mmr_value; + struct uv_IO_APIC_route_entry *entry; + unsigned long flags; + int err; + + cfg = irq_cfg(irq); + + err = assign_irq_vector(irq, cfg, eligible_cpu); + if (err != 0) + return err; + + spin_lock_irqsave(&vector_lock, flags); + set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, + irq_name); + spin_unlock_irqrestore(&vector_lock, flags); + + mmr_value = 0; + entry = (struct uv_IO_APIC_route_entry *)&mmr_value; + BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); + + entry->vector = cfg->vector; + entry->delivery_mode = apic->irq_delivery_mode; + entry->dest_mode = apic->irq_dest_mode; + entry->polarity = 0; + entry->trigger = 0; + entry->mask = 0; + entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); + + mmr_pnode = uv_blade_to_pnode(mmr_blade); + uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); + + return irq; +} + +/* + * Disable the specified MMR located on the specified blade so that MSIs are + * longer allowed to be sent. + */ +void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset) +{ + unsigned long mmr_value; + struct uv_IO_APIC_route_entry *entry; + int mmr_pnode; + + mmr_value = 0; + entry = (struct uv_IO_APIC_route_entry *)&mmr_value; + BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); + + entry->mask = 1; + + mmr_pnode = uv_blade_to_pnode(mmr_blade); + uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); +} +#endif /* CONFIG_X86_64 */ + +int __init io_apic_get_redir_entries (int ioapic) +{ + union IO_APIC_reg_01 reg_01; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + reg_01.raw = io_apic_read(ioapic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.bits.entries; +} + +void __init probe_nr_irqs_gsi(void) +{ + int nr = 0; + + nr = acpi_probe_gsi(); + if (nr > nr_irqs_gsi) { + nr_irqs_gsi = nr; + } else { + /* for acpi=off or acpi is not compiled in */ + int idx; + + nr = 0; + for (idx = 0; idx < nr_ioapics; idx++) + nr += io_apic_get_redir_entries(idx) + 1; + + if (nr > nr_irqs_gsi) + nr_irqs_gsi = nr; + } + + printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); +} + +#ifdef CONFIG_SPARSE_IRQ +int __init arch_probe_nr_irqs(void) +{ + int nr; + + if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) + nr_irqs = NR_VECTORS * nr_cpu_ids; + + nr = nr_irqs_gsi + 8 * nr_cpu_ids; +#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) + /* + * for MSI and HT dyn irq + */ + nr += nr_irqs_gsi * 16; +#endif + if (nr < nr_irqs) + nr_irqs = nr; + + return 0; +} +#endif + +/* -------------------------------------------------------------------------- + ACPI-based IOAPIC Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI + +#ifdef CONFIG_X86_32 +int __init io_apic_get_unique_id(int ioapic, int apic_id) +{ + union IO_APIC_reg_00 reg_00; + static physid_mask_t apic_id_map = PHYSID_MASK_NONE; + physid_mask_t tmp; + unsigned long flags; + int i = 0; + + /* + * The P4 platform supports up to 256 APIC IDs on two separate APIC + * buses (one for LAPICs, one for IOAPICs), where predecessors only + * supports up to 16 on one shared APIC bus. + * + * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full + * advantage of new APIC bus architecture. + */ + + if (physids_empty(apic_id_map)) + apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map); + + spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(ioapic, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + if (apic_id >= get_physical_broadcast()) { + printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " + "%d\n", ioapic, apic_id, reg_00.bits.ID); + apic_id = reg_00.bits.ID; + } + + /* + * Every APIC in a system must have a unique ID or we get lots of nice + * 'stuck on smp_invalidate_needed IPI wait' messages. + */ + if (apic->check_apicid_used(apic_id_map, apic_id)) { + + for (i = 0; i < get_physical_broadcast(); i++) { + if (!apic->check_apicid_used(apic_id_map, i)) + break; + } + + if (i == get_physical_broadcast()) + panic("Max apic_id exceeded!\n"); + + printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " + "trying %d\n", ioapic, apic_id, i); + + apic_id = i; + } + + tmp = apic->apicid_to_cpu_present(apic_id); + physids_or(apic_id_map, apic_id_map, tmp); + + if (reg_00.bits.ID != apic_id) { + reg_00.bits.ID = apic_id; + + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic, 0, reg_00.raw); + reg_00.raw = io_apic_read(ioapic, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + /* Sanity check */ + if (reg_00.bits.ID != apic_id) { + printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); + return -1; + } + } + + apic_printk(APIC_VERBOSE, KERN_INFO + "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); + + return apic_id; +} + +int __init io_apic_get_version(int ioapic) +{ + union IO_APIC_reg_01 reg_01; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + reg_01.raw = io_apic_read(ioapic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.bits.version; +} +#endif + +int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) +{ + struct irq_desc *desc; + struct irq_cfg *cfg; + int cpu = boot_cpu_id; + + if (!IO_APIC_IRQ(irq)) { + apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", + ioapic); + return -EINVAL; + } + + desc = irq_to_desc_alloc_cpu(irq, cpu); + if (!desc) { + printk(KERN_INFO "can not get irq_desc %d\n", irq); + return 0; + } + + /* + * IRQs < 16 are already in the irq_2_pin[] map + */ + if (irq >= NR_IRQS_LEGACY) { + cfg = desc->chip_data; + add_pin_to_irq_cpu(cfg, cpu, ioapic, pin); + } + + setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity); + + return 0; +} + + +int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) +{ + int i; + + if (skip_ioapic_setup) + return -1; + + for (i = 0; i < mp_irq_entries; i++) + if (mp_irqs[i].irqtype == mp_INT && + mp_irqs[i].srcbusirq == bus_irq) + break; + if (i >= mp_irq_entries) + return -1; + + *trigger = irq_trigger(i); + *polarity = irq_polarity(i); + return 0; +} + +#endif /* CONFIG_ACPI */ + +/* + * This function currently is only a helper for the i386 smp boot process where + * we need to reprogram the ioredtbls to cater for the cpus which have come online + * so mask in all cases should simply be apic->target_cpus() + */ +#ifdef CONFIG_SMP +void __init setup_ioapic_dest(void) +{ + int pin, ioapic, irq, irq_entry; + struct irq_desc *desc; + struct irq_cfg *cfg; + const struct cpumask *mask; + + if (skip_ioapic_setup == 1) + return; + + for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { + for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { + irq_entry = find_irq_entry(ioapic, pin, mp_INT); + if (irq_entry == -1) + continue; + irq = pin_2_irq(irq_entry, ioapic, pin); + + /* setup_IO_APIC_irqs could fail to get vector for some device + * when you have too many devices, because at that time only boot + * cpu is online. + */ + desc = irq_to_desc(irq); + cfg = desc->chip_data; + if (!cfg->vector) { + setup_IO_APIC_irq(ioapic, pin, irq, desc, + irq_trigger(irq_entry), + irq_polarity(irq_entry)); + continue; + + } + + /* + * Honour affinities which have been set in early boot + */ + if (desc->status & + (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) + mask = desc->affinity; + else + mask = apic->target_cpus(); + +#ifdef CONFIG_INTR_REMAP + if (intr_remapping_enabled) + set_ir_ioapic_affinity_irq_desc(desc, mask); + else +#endif + set_ioapic_affinity_irq_desc(desc, mask); + } + + } +} +#endif + +#define IOAPIC_RESOURCE_NAME_SIZE 11 + +static struct resource *ioapic_resources; + +static struct resource * __init ioapic_setup_resources(void) +{ + unsigned long n; + struct resource *res; + char *mem; + int i; + + if (nr_ioapics <= 0) + return NULL; + + n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); + n *= nr_ioapics; + + mem = alloc_bootmem(n); + res = (void *)mem; + + if (mem != NULL) { + mem += sizeof(struct resource) * nr_ioapics; + + for (i = 0; i < nr_ioapics; i++) { + res[i].name = mem; + res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; + sprintf(mem, "IOAPIC %u", i); + mem += IOAPIC_RESOURCE_NAME_SIZE; + } + } + + ioapic_resources = res; + + return res; +} + +void __init ioapic_init_mappings(void) +{ + unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; + struct resource *ioapic_res; + int i; + + ioapic_res = ioapic_setup_resources(); + for (i = 0; i < nr_ioapics; i++) { + if (smp_found_config) { + ioapic_phys = mp_ioapics[i].apicaddr; +#ifdef CONFIG_X86_32 + if (!ioapic_phys) { + printk(KERN_ERR + "WARNING: bogus zero IO-APIC " + "address found in MPTABLE, " + "disabling IO/APIC support!\n"); + smp_found_config = 0; + skip_ioapic_setup = 1; + goto fake_ioapic_page; + } +#endif + } else { +#ifdef CONFIG_X86_32 +fake_ioapic_page: +#endif + ioapic_phys = (unsigned long) + alloc_bootmem_pages(PAGE_SIZE); + ioapic_phys = __pa(ioapic_phys); + } + set_fixmap_nocache(idx, ioapic_phys); + apic_printk(APIC_VERBOSE, + "mapped IOAPIC to %08lx (%08lx)\n", + __fix_to_virt(idx), ioapic_phys); + idx++; + + if (ioapic_res != NULL) { + ioapic_res->start = ioapic_phys; + ioapic_res->end = ioapic_phys + (4 * 1024) - 1; + ioapic_res++; + } + } +} + +static int __init ioapic_insert_resources(void) +{ + int i; + struct resource *r = ioapic_resources; + + if (!r) { + printk(KERN_ERR + "IO APIC resources could be not be allocated.\n"); + return -1; + } + + for (i = 0; i < nr_ioapics; i++) { + insert_resource(&iomem_resource, r); + r++; + } + + return 0; +} + +/* Insert the IO APIC resources after PCI initialization has occured to handle + * IO APICS that are mapped in on a BAR in PCI space. */ +late_initcall(ioapic_insert_resources); diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c new file mode 100644 index 00000000000..dbf5445727a --- /dev/null +++ b/arch/x86/kernel/apic/ipi.c @@ -0,0 +1,164 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) +{ + unsigned long query_cpu; + unsigned long flags; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicast to each CPU instead. + * - mbligh + */ + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, + int vector) +{ + unsigned int this_cpu = smp_processor_id(); + unsigned int query_cpu; + unsigned long flags; + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, + query_cpu), vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector) +{ + unsigned long flags; + unsigned int query_cpu; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicasts to each CPU instead. This + * should be modified to do 1 message per cluster ID - mbligh + */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector, + apic->dest_logical); + local_irq_restore(flags); +} + +void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector) +{ + unsigned long flags; + unsigned int query_cpu; + unsigned int this_cpu = smp_processor_id(); + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __default_send_IPI_dest_field( + apic->cpu_to_logical_apicid(query_cpu), vector, + apic->dest_logical); + } + local_irq_restore(flags); +} + +#ifdef CONFIG_X86_32 + +/* + * This is only used on smaller machines. + */ +void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + unsigned long flags; + + local_irq_save(flags); + WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); + __default_send_IPI_dest_field(mask, vector, apic->dest_logical); + local_irq_restore(flags); +} + +void default_send_IPI_allbutself(int vector) +{ + /* + * if there are no other CPUs in the system then we get an APIC send + * error if we try to broadcast, thus avoid sending IPIs in this case. + */ + if (!(num_online_cpus() > 1)) + return; + + __default_local_send_IPI_allbutself(vector); +} + +void default_send_IPI_all(int vector) +{ + __default_local_send_IPI_all(vector); +} + +void default_send_IPI_self(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); +} + +/* must come after the send_IPI functions above for inlining */ +static int convert_apicid_to_cpu(int apic_id) +{ + int i; + + for_each_possible_cpu(i) { + if (per_cpu(x86_cpu_to_apicid, i) == apic_id) + return i; + } + return -1; +} + +int safe_smp_processor_id(void) +{ + int apicid, cpuid; + + if (!boot_cpu_has(X86_FEATURE_APIC)) + return 0; + + apicid = hard_smp_processor_id(); + if (apicid == BAD_APICID) + return 0; + + cpuid = convert_apicid_to_cpu(apicid); + + return cpuid >= 0 ? cpuid : 0; +} +#endif diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c new file mode 100644 index 00000000000..bdfad80c3cf --- /dev/null +++ b/arch/x86/kernel/apic/nmi.c @@ -0,0 +1,564 @@ +/* + * NMI watchdog support on APIC systems + * + * Started by Ingo Molnar + * + * Fixes: + * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. + * Mikael Pettersson : Power Management for local APIC NMI watchdog. + * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog. + * Pavel Machek and + * Mikael Pettersson : PM converted to driver model. Disable/enable API. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include + +int unknown_nmi_panic; +int nmi_watchdog_enabled; + +static cpumask_t backtrace_mask = CPU_MASK_NONE; + +/* nmi_active: + * >0: the lapic NMI watchdog is active, but can be disabled + * <0: the lapic NMI watchdog has not been set up, and cannot + * be enabled + * 0: the lapic NMI watchdog is disabled, but can be enabled + */ +atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ +EXPORT_SYMBOL(nmi_active); + +unsigned int nmi_watchdog = NMI_NONE; +EXPORT_SYMBOL(nmi_watchdog); + +static int panic_on_timeout; + +static unsigned int nmi_hz = HZ; +static DEFINE_PER_CPU(short, wd_enabled); +static int endflag __initdata; + +static inline unsigned int get_nmi_count(int cpu) +{ + return per_cpu(irq_stat, cpu).__nmi_count; +} + +static inline int mce_in_progress(void) +{ +#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) + return atomic_read(&mce_entry) > 0; +#endif + return 0; +} + +/* + * Take the local apic timer and PIT/HPET into account. We don't + * know which one is active, when we have highres/dyntick on + */ +static inline unsigned int get_timer_irqs(int cpu) +{ + return per_cpu(irq_stat, cpu).apic_timer_irqs + + per_cpu(irq_stat, cpu).irq0_irqs; +} + +#ifdef CONFIG_SMP +/* + * The performance counters used by NMI_LOCAL_APIC don't trigger when + * the CPU is idle. To make sure the NMI watchdog really ticks on all + * CPUs during the test make them busy. + */ +static __init void nmi_cpu_busy(void *data) +{ + local_irq_enable_in_hardirq(); + /* + * Intentionally don't use cpu_relax here. This is + * to make sure that the performance counter really ticks, + * even if there is a simulator or similar that catches the + * pause instruction. On a real HT machine this is fine because + * all other CPUs are busy with "useless" delay loops and don't + * care if they get somewhat less cycles. + */ + while (endflag == 0) + mb(); +} +#endif + +static void report_broken_nmi(int cpu, int *prev_nmi_count) +{ + printk(KERN_CONT "\n"); + + printk(KERN_WARNING + "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n", + cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); + + printk(KERN_WARNING + "Please report this to bugzilla.kernel.org,\n"); + printk(KERN_WARNING + "and attach the output of the 'dmesg' command.\n"); + + per_cpu(wd_enabled, cpu) = 0; + atomic_dec(&nmi_active); +} + +static void __acpi_nmi_disable(void *__unused) +{ + apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); +} + +int __init check_nmi_watchdog(void) +{ + unsigned int *prev_nmi_count; + int cpu; + + if (!nmi_watchdog_active() || !atomic_read(&nmi_active)) + return 0; + + prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); + if (!prev_nmi_count) + goto error; + + printk(KERN_INFO "Testing NMI watchdog ... "); + +#ifdef CONFIG_SMP + if (nmi_watchdog == NMI_LOCAL_APIC) + smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); +#endif + + for_each_possible_cpu(cpu) + prev_nmi_count[cpu] = get_nmi_count(cpu); + local_irq_enable(); + mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ + + for_each_online_cpu(cpu) { + if (!per_cpu(wd_enabled, cpu)) + continue; + if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) + report_broken_nmi(cpu, prev_nmi_count); + } + endflag = 1; + if (!atomic_read(&nmi_active)) { + kfree(prev_nmi_count); + atomic_set(&nmi_active, -1); + goto error; + } + printk("OK.\n"); + + /* + * now that we know it works we can reduce NMI frequency to + * something more reasonable; makes a difference in some configs + */ + if (nmi_watchdog == NMI_LOCAL_APIC) + nmi_hz = lapic_adjust_nmi_hz(1); + + kfree(prev_nmi_count); + return 0; +error: + if (nmi_watchdog == NMI_IO_APIC) { + if (!timer_through_8259) + disable_8259A_irq(0); + on_each_cpu(__acpi_nmi_disable, NULL, 1); + } + +#ifdef CONFIG_X86_32 + timer_ack = 0; +#endif + return -1; +} + +static int __init setup_nmi_watchdog(char *str) +{ + unsigned int nmi; + + if (!strncmp(str, "panic", 5)) { + panic_on_timeout = 1; + str = strchr(str, ','); + if (!str) + return 1; + ++str; + } + + if (!strncmp(str, "lapic", 5)) + nmi_watchdog = NMI_LOCAL_APIC; + else if (!strncmp(str, "ioapic", 6)) + nmi_watchdog = NMI_IO_APIC; + else { + get_option(&str, &nmi); + if (nmi >= NMI_INVALID) + return 0; + nmi_watchdog = nmi; + } + + return 1; +} +__setup("nmi_watchdog=", setup_nmi_watchdog); + +/* + * Suspend/resume support + */ +#ifdef CONFIG_PM + +static int nmi_pm_active; /* nmi_active before suspend */ + +static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) +{ + /* only CPU0 goes here, other CPUs should be offline */ + nmi_pm_active = atomic_read(&nmi_active); + stop_apic_nmi_watchdog(NULL); + BUG_ON(atomic_read(&nmi_active) != 0); + return 0; +} + +static int lapic_nmi_resume(struct sys_device *dev) +{ + /* only CPU0 goes here, other CPUs should be offline */ + if (nmi_pm_active > 0) { + setup_apic_nmi_watchdog(NULL); + touch_nmi_watchdog(); + } + return 0; +} + +static struct sysdev_class nmi_sysclass = { + .name = "lapic_nmi", + .resume = lapic_nmi_resume, + .suspend = lapic_nmi_suspend, +}; + +static struct sys_device device_lapic_nmi = { + .id = 0, + .cls = &nmi_sysclass, +}; + +static int __init init_lapic_nmi_sysfs(void) +{ + int error; + + /* + * should really be a BUG_ON but b/c this is an + * init call, it just doesn't work. -dcz + */ + if (nmi_watchdog != NMI_LOCAL_APIC) + return 0; + + if (atomic_read(&nmi_active) < 0) + return 0; + + error = sysdev_class_register(&nmi_sysclass); + if (!error) + error = sysdev_register(&device_lapic_nmi); + return error; +} + +/* must come after the local APIC's device_initcall() */ +late_initcall(init_lapic_nmi_sysfs); + +#endif /* CONFIG_PM */ + +static void __acpi_nmi_enable(void *__unused) +{ + apic_write(APIC_LVT0, APIC_DM_NMI); +} + +/* + * Enable timer based NMIs on all CPUs: + */ +void acpi_nmi_enable(void) +{ + if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) + on_each_cpu(__acpi_nmi_enable, NULL, 1); +} + +/* + * Disable timer based NMIs on all CPUs: + */ +void acpi_nmi_disable(void) +{ + if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) + on_each_cpu(__acpi_nmi_disable, NULL, 1); +} + +/* + * This function is called as soon the LAPIC NMI watchdog driver has everything + * in place and it's ready to check if the NMIs belong to the NMI watchdog + */ +void cpu_nmi_set_wd_enabled(void) +{ + __get_cpu_var(wd_enabled) = 1; +} + +void setup_apic_nmi_watchdog(void *unused) +{ + if (__get_cpu_var(wd_enabled)) + return; + + /* cheap hack to support suspend/resume */ + /* if cpu0 is not active neither should the other cpus */ + if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0) + return; + + switch (nmi_watchdog) { + case NMI_LOCAL_APIC: + if (lapic_watchdog_init(nmi_hz) < 0) { + __get_cpu_var(wd_enabled) = 0; + return; + } + /* FALL THROUGH */ + case NMI_IO_APIC: + __get_cpu_var(wd_enabled) = 1; + atomic_inc(&nmi_active); + } +} + +void stop_apic_nmi_watchdog(void *unused) +{ + /* only support LOCAL and IO APICs for now */ + if (!nmi_watchdog_active()) + return; + if (__get_cpu_var(wd_enabled) == 0) + return; + if (nmi_watchdog == NMI_LOCAL_APIC) + lapic_watchdog_stop(); + else + __acpi_nmi_disable(NULL); + __get_cpu_var(wd_enabled) = 0; + atomic_dec(&nmi_active); +} + +/* + * the best way to detect whether a CPU has a 'hard lockup' problem + * is to check it's local APIC timer IRQ counts. If they are not + * changing then that CPU has some problem. + * + * as these watchdog NMI IRQs are generated on every CPU, we only + * have to check the current processor. + * + * since NMIs don't listen to _any_ locks, we have to be extremely + * careful not to rely on unsafe variables. The printk might lock + * up though, so we have to break up any console locks first ... + * [when there will be more tty-related locks, break them up here too!] + */ + +static DEFINE_PER_CPU(unsigned, last_irq_sum); +static DEFINE_PER_CPU(local_t, alert_counter); +static DEFINE_PER_CPU(int, nmi_touch); + +void touch_nmi_watchdog(void) +{ + if (nmi_watchdog_active()) { + unsigned cpu; + + /* + * Tell other CPUs to reset their alert counters. We cannot + * do it ourselves because the alert count increase is not + * atomic. + */ + for_each_present_cpu(cpu) { + if (per_cpu(nmi_touch, cpu) != 1) + per_cpu(nmi_touch, cpu) = 1; + } + } + + /* + * Tickle the softlockup detector too: + */ + touch_softlockup_watchdog(); +} +EXPORT_SYMBOL(touch_nmi_watchdog); + +notrace __kprobes int +nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) +{ + /* + * Since current_thread_info()-> is always on the stack, and we + * always switch the stack NMI-atomically, it's safe to use + * smp_processor_id(). + */ + unsigned int sum; + int touched = 0; + int cpu = smp_processor_id(); + int rc = 0; + + /* check for other users first */ + if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) + == NOTIFY_STOP) { + rc = 1; + touched = 1; + } + + sum = get_timer_irqs(cpu); + + if (__get_cpu_var(nmi_touch)) { + __get_cpu_var(nmi_touch) = 0; + touched = 1; + } + + if (cpu_isset(cpu, backtrace_mask)) { + static DEFINE_SPINLOCK(lock); /* Serialise the printks */ + + spin_lock(&lock); + printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); + dump_stack(); + spin_unlock(&lock); + cpu_clear(cpu, backtrace_mask); + } + + /* Could check oops_in_progress here too, but it's safer not to */ + if (mce_in_progress()) + touched = 1; + + /* if the none of the timers isn't firing, this cpu isn't doing much */ + if (!touched && __get_cpu_var(last_irq_sum) == sum) { + /* + * Ayiee, looks like this CPU is stuck ... + * wait a few IRQs (5 seconds) before doing the oops ... + */ + local_inc(&__get_cpu_var(alert_counter)); + if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) + /* + * die_nmi will return ONLY if NOTIFY_STOP happens.. + */ + die_nmi("BUG: NMI Watchdog detected LOCKUP", + regs, panic_on_timeout); + } else { + __get_cpu_var(last_irq_sum) = sum; + local_set(&__get_cpu_var(alert_counter), 0); + } + + /* see if the nmi watchdog went off */ + if (!__get_cpu_var(wd_enabled)) + return rc; + switch (nmi_watchdog) { + case NMI_LOCAL_APIC: + rc |= lapic_wd_event(nmi_hz); + break; + case NMI_IO_APIC: + /* + * don't know how to accurately check for this. + * just assume it was a watchdog timer interrupt + * This matches the old behaviour. + */ + rc = 1; + break; + } + return rc; +} + +#ifdef CONFIG_SYSCTL + +static void enable_ioapic_nmi_watchdog_single(void *unused) +{ + __get_cpu_var(wd_enabled) = 1; + atomic_inc(&nmi_active); + __acpi_nmi_enable(NULL); +} + +static void enable_ioapic_nmi_watchdog(void) +{ + on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1); + touch_nmi_watchdog(); +} + +static void disable_ioapic_nmi_watchdog(void) +{ + on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); +} + +static int __init setup_unknown_nmi_panic(char *str) +{ + unknown_nmi_panic = 1; + return 1; +} +__setup("unknown_nmi_panic", setup_unknown_nmi_panic); + +static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) +{ + unsigned char reason = get_nmi_reason(); + char buf[64]; + + sprintf(buf, "NMI received for unknown reason %02x\n", reason); + die_nmi(buf, regs, 1); /* Always panic here */ + return 0; +} + +/* + * proc handler for /proc/sys/kernel/nmi + */ +int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int old_state; + + nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; + old_state = nmi_watchdog_enabled; + proc_dointvec(table, write, file, buffer, length, ppos); + if (!!old_state == !!nmi_watchdog_enabled) + return 0; + + if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) { + printk(KERN_WARNING + "NMI watchdog is permanently disabled\n"); + return -EIO; + } + + if (nmi_watchdog == NMI_LOCAL_APIC) { + if (nmi_watchdog_enabled) + enable_lapic_nmi_watchdog(); + else + disable_lapic_nmi_watchdog(); + } else if (nmi_watchdog == NMI_IO_APIC) { + if (nmi_watchdog_enabled) + enable_ioapic_nmi_watchdog(); + else + disable_ioapic_nmi_watchdog(); + } else { + printk(KERN_WARNING + "NMI watchdog doesn't know what hardware to touch\n"); + return -EIO; + } + return 0; +} + +#endif /* CONFIG_SYSCTL */ + +int do_nmi_callback(struct pt_regs *regs, int cpu) +{ +#ifdef CONFIG_SYSCTL + if (unknown_nmi_panic) + return unknown_nmi_panic_callback(regs, cpu); +#endif + return 0; +} + +void __trigger_all_cpu_backtrace(void) +{ + int i; + + backtrace_mask = cpu_online_map; + /* Wait for up to 10 seconds for all CPUs to do the backtrace */ + for (i = 0; i < 10 * 1000; i++) { + if (cpus_empty(backtrace_mask)) + break; + mdelay(1); + } +} diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c new file mode 100644 index 00000000000..4e39d9ad4d5 --- /dev/null +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -0,0 +1,243 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); + +static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + if (cpu_has_x2apic) + return 1; + + return 0; +} + +/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ + +static const struct cpumask *x2apic_target_cpus(void) +{ + return cpumask_of(0); +} + +/* + * for now each logical cpu is in its own vector allocation domain. + */ +static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + +static void + __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) +{ + unsigned long cfg; + + cfg = __prepare_ICR(0, vector, dest); + + /* + * send the IPI. + */ + native_x2apic_icr_write(cfg, apicid); +} + +/* + * for now, we send the IPI's one by one in the cpumask. + * TBD: Based on the cpu mask, we can send the IPI's to the cluster group + * at once. We have 16 cpu's in a cluster. This will minimize IPI register + * writes. + */ +static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) +{ + unsigned long query_cpu; + unsigned long flags; + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); + } + local_irq_restore(flags); +} + +static void + x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) +{ + unsigned long this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu == this_cpu) + continue; + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); + } + local_irq_restore(flags); +} + +static void x2apic_send_IPI_allbutself(int vector) +{ + unsigned long this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; + + local_irq_save(flags); + for_each_online_cpu(query_cpu) { + if (query_cpu == this_cpu) + continue; + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, apic->dest_logical); + } + local_irq_restore(flags); +} + +static void x2apic_send_IPI_all(int vector) +{ + x2apic_send_IPI_mask(cpu_online_mask, vector); +} + +static int x2apic_apic_id_registered(void) +{ + return 1; +} + +static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + /* + * We're using fixed IRQ delivery, can only return one logical APIC ID. + * May as well be the first. + */ + int cpu = cpumask_first(cpumask); + + if ((unsigned)cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_logical_apicid, cpu); + else + return BAD_APICID; +} + +static unsigned int +x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one logical APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_logical_apicid, cpu); + + return BAD_APICID; +} + +static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x) +{ + unsigned int id; + + id = x; + return id; +} + +static unsigned long set_apic_id(unsigned int id) +{ + unsigned long x; + + x = id; + return x; +} + +static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) +{ + return current_cpu_data.initial_apicid >> index_msb; +} + +static void x2apic_send_IPI_self(int vector) +{ + apic_write(APIC_SELF_IPI, vector); +} + +static void init_x2apic_ldr(void) +{ + int cpu = smp_processor_id(); + + per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); +} + +struct apic apic_x2apic_cluster = { + + .name = "cluster x2apic", + .probe = NULL, + .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, + .apic_id_registered = x2apic_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + .irq_dest_mode = 1, /* logical */ + + .target_cpus = x2apic_target_cpus, + .disable_esr = 0, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .vector_allocation_domain = x2apic_vector_allocation_domain, + .init_apic_ldr = init_x2apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = x2apic_cluster_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = x2apic_cluster_phys_get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFFFFFFFu, + + .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, + + .send_IPI_mask = x2apic_send_IPI_mask, + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, + .send_IPI_allbutself = x2apic_send_IPI_allbutself, + .send_IPI_all = x2apic_send_IPI_all, + .send_IPI_self = x2apic_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = NULL, + + .read = native_apic_msr_read, + .write = native_apic_msr_write, + .icr_read = native_x2apic_icr_read, + .icr_write = native_x2apic_icr_write, + .wait_icr_idle = native_x2apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, +}; diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c new file mode 100644 index 00000000000..d2d52eb9f7e --- /dev/null +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -0,0 +1,229 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int x2apic_phys; + +static int set_x2apic_phys_mode(char *arg) +{ + x2apic_phys = 1; + return 0; +} +early_param("x2apic_phys", set_x2apic_phys_mode); + +static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + if (cpu_has_x2apic && x2apic_phys) + return 1; + + return 0; +} + +/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ + +static const struct cpumask *x2apic_target_cpus(void) +{ + return cpumask_of(0); +} + +static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + +static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, + unsigned int dest) +{ + unsigned long cfg; + + cfg = __prepare_ICR(0, vector, dest); + + /* + * send the IPI. + */ + native_x2apic_icr_write(cfg, apicid); +} + +static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) +{ + unsigned long query_cpu; + unsigned long flags; + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), + vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +static void + x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) +{ + unsigned long this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; + + local_irq_save(flags); + for_each_cpu(query_cpu, mask) { + if (query_cpu != this_cpu) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_apicid, query_cpu), + vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +static void x2apic_send_IPI_allbutself(int vector) +{ + unsigned long this_cpu = smp_processor_id(); + unsigned long query_cpu; + unsigned long flags; + + local_irq_save(flags); + for_each_online_cpu(query_cpu) { + if (query_cpu == this_cpu) + continue; + __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), + vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} + +static void x2apic_send_IPI_all(int vector) +{ + x2apic_send_IPI_mask(cpu_online_mask, vector); +} + +static int x2apic_apic_id_registered(void) +{ + return 1; +} + +static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + int cpu = cpumask_first(cpumask); + + if ((unsigned)cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); + else + return BAD_APICID; +} + +static unsigned int +x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); + + return BAD_APICID; +} + +static unsigned int x2apic_phys_get_apic_id(unsigned long x) +{ + return x; +} + +static unsigned long set_apic_id(unsigned int id) +{ + return id; +} + +static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) +{ + return current_cpu_data.initial_apicid >> index_msb; +} + +static void x2apic_send_IPI_self(int vector) +{ + apic_write(APIC_SELF_IPI, vector); +} + +static void init_x2apic_ldr(void) +{ +} + +struct apic apic_x2apic_phys = { + + .name = "physical x2apic", + .probe = NULL, + .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, + .apic_id_registered = x2apic_apic_id_registered, + + .irq_delivery_mode = dest_Fixed, + .irq_dest_mode = 0, /* physical */ + + .target_cpus = x2apic_target_cpus, + .disable_esr = 0, + .dest_logical = 0, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .vector_allocation_domain = x2apic_vector_allocation_domain, + .init_apic_ldr = init_x2apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = x2apic_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = x2apic_phys_get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFFFFFFFu, + + .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, + + .send_IPI_mask = x2apic_send_IPI_mask, + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, + .send_IPI_allbutself = x2apic_send_IPI_allbutself, + .send_IPI_all = x2apic_send_IPI_all, + .send_IPI_self = x2apic_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = NULL, + + .read = native_apic_msr_read, + .write = native_apic_msr_write, + .icr_read = native_x2apic_icr_read, + .icr_write = native_x2apic_icr_write, + .wait_icr_idle = native_x2apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, +}; diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c new file mode 100644 index 00000000000..20b4ad07c3a --- /dev/null +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -0,0 +1,643 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI UV APIC functions (note: not an Intel compatible APIC) + * + * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +DEFINE_PER_CPU(int, x2apic_extra_bits); + +static enum uv_system_type uv_system_type; + +static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + if (!strcmp(oem_id, "SGI")) { + if (!strcmp(oem_table_id, "UVL")) + uv_system_type = UV_LEGACY_APIC; + else if (!strcmp(oem_table_id, "UVX")) + uv_system_type = UV_X2APIC; + else if (!strcmp(oem_table_id, "UVH")) { + uv_system_type = UV_NON_UNIQUE_APIC; + return 1; + } + } + return 0; +} + +enum uv_system_type get_uv_system_type(void) +{ + return uv_system_type; +} + +int is_uv_system(void) +{ + return uv_system_type != UV_NONE; +} +EXPORT_SYMBOL_GPL(is_uv_system); + +DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); +EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); + +struct uv_blade_info *uv_blade_info; +EXPORT_SYMBOL_GPL(uv_blade_info); + +short *uv_node_to_blade; +EXPORT_SYMBOL_GPL(uv_node_to_blade); + +short *uv_cpu_to_blade; +EXPORT_SYMBOL_GPL(uv_cpu_to_blade); + +short uv_possible_blades; +EXPORT_SYMBOL_GPL(uv_possible_blades); + +unsigned long sn_rtc_cycles_per_second; +EXPORT_SYMBOL(sn_rtc_cycles_per_second); + +/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ + +static const struct cpumask *uv_target_cpus(void) +{ + return cpumask_of(0); +} + +static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + +int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) +{ + unsigned long val; + int pnode; + + pnode = uv_apicid_to_pnode(phys_apicid); + val = (1UL << UVH_IPI_INT_SEND_SHFT) | + (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | + (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | + APIC_DM_INIT; + uv_write_global_mmr64(pnode, UVH_IPI_INT, val); + mdelay(10); + + val = (1UL << UVH_IPI_INT_SEND_SHFT) | + (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | + (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | + APIC_DM_STARTUP; + uv_write_global_mmr64(pnode, UVH_IPI_INT, val); + return 0; +} + +static void uv_send_IPI_one(int cpu, int vector) +{ + unsigned long val, apicid; + int pnode; + + apicid = per_cpu(x86_cpu_to_apicid, cpu); + pnode = uv_apicid_to_pnode(apicid); + + val = (1UL << UVH_IPI_INT_SEND_SHFT) | + (apicid << UVH_IPI_INT_APIC_ID_SHFT) | + (vector << UVH_IPI_INT_VECTOR_SHFT); + + uv_write_global_mmr64(pnode, UVH_IPI_INT, val); +} + +static void uv_send_IPI_mask(const struct cpumask *mask, int vector) +{ + unsigned int cpu; + + for_each_cpu(cpu, mask) + uv_send_IPI_one(cpu, vector); +} + +static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) +{ + unsigned int this_cpu = smp_processor_id(); + unsigned int cpu; + + for_each_cpu(cpu, mask) { + if (cpu != this_cpu) + uv_send_IPI_one(cpu, vector); + } +} + +static void uv_send_IPI_allbutself(int vector) +{ + unsigned int this_cpu = smp_processor_id(); + unsigned int cpu; + + for_each_online_cpu(cpu) { + if (cpu != this_cpu) + uv_send_IPI_one(cpu, vector); + } +} + +static void uv_send_IPI_all(int vector) +{ + uv_send_IPI_mask(cpu_online_mask, vector); +} + +static int uv_apic_id_registered(void) +{ + return 1; +} + +static void uv_init_apic_ldr(void) +{ +} + +static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + int cpu = cpumask_first(cpumask); + + if ((unsigned)cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); + else + return BAD_APICID; +} + +static unsigned int +uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); + + return BAD_APICID; +} + +static unsigned int x2apic_get_apic_id(unsigned long x) +{ + unsigned int id; + + WARN_ON(preemptible() && num_online_cpus() > 1); + id = x | __get_cpu_var(x2apic_extra_bits); + + return id; +} + +static unsigned long set_apic_id(unsigned int id) +{ + unsigned long x; + + /* maskout x2apic_extra_bits ? */ + x = id; + return x; +} + +static unsigned int uv_read_apic_id(void) +{ + + return x2apic_get_apic_id(apic_read(APIC_ID)); +} + +static int uv_phys_pkg_id(int initial_apicid, int index_msb) +{ + return uv_read_apic_id() >> index_msb; +} + +static void uv_send_IPI_self(int vector) +{ + apic_write(APIC_SELF_IPI, vector); +} + +struct apic apic_x2apic_uv_x = { + + .name = "UV large system", + .probe = NULL, + .acpi_madt_oem_check = uv_acpi_madt_oem_check, + .apic_id_registered = uv_apic_id_registered, + + .irq_delivery_mode = dest_Fixed, + .irq_dest_mode = 1, /* logical */ + + .target_cpus = uv_target_cpus, + .disable_esr = 0, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = NULL, + .check_apicid_present = NULL, + + .vector_allocation_domain = uv_vector_allocation_domain, + .init_apic_ldr = uv_init_apic_ldr, + + .ioapic_phys_id_map = NULL, + .setup_apic_routing = NULL, + .multi_timer_check = NULL, + .apicid_to_node = NULL, + .cpu_to_logical_apicid = NULL, + .cpu_present_to_apicid = default_cpu_present_to_apicid, + .apicid_to_cpu_present = NULL, + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = uv_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = x2apic_get_apic_id, + .set_apic_id = set_apic_id, + .apic_id_mask = 0xFFFFFFFFu, + + .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, + + .send_IPI_mask = uv_send_IPI_mask, + .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, + .send_IPI_allbutself = uv_send_IPI_allbutself, + .send_IPI_all = uv_send_IPI_all, + .send_IPI_self = uv_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + .wait_for_init_deassert = NULL, + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = NULL, + + .read = native_apic_msr_read, + .write = native_apic_msr_write, + .icr_read = native_x2apic_icr_read, + .icr_write = native_x2apic_icr_write, + .wait_icr_idle = native_x2apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, +}; + +static __cpuinit void set_x2apic_extra_bits(int pnode) +{ + __get_cpu_var(x2apic_extra_bits) = (pnode << 6); +} + +/* + * Called on boot cpu. + */ +static __init int boot_pnode_to_blade(int pnode) +{ + int blade; + + for (blade = 0; blade < uv_num_possible_blades(); blade++) + if (pnode == uv_blade_info[blade].pnode) + return blade; + BUG(); +} + +struct redir_addr { + unsigned long redirect; + unsigned long alias; +}; + +#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT + +static __initdata struct redir_addr redir_addrs[] = { + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, +}; + +static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) +{ + union uvh_si_alias0_overlay_config_u alias; + union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; + int i; + + for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { + alias.v = uv_read_local_mmr(redir_addrs[i].alias); + if (alias.s.base == 0) { + *size = (1UL << alias.s.m_alias); + redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); + *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; + return; + } + } + BUG(); +} + +static __init void map_low_mmrs(void) +{ + init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); + init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); +} + +enum map_type {map_wb, map_uc}; + +static __init void map_high(char *id, unsigned long base, int shift, + int max_pnode, enum map_type map_type) +{ + unsigned long bytes, paddr; + + paddr = base << shift; + bytes = (1UL << shift) * (max_pnode + 1); + printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, + paddr + bytes); + if (map_type == map_uc) + init_extra_mapping_uc(paddr, bytes); + else + init_extra_mapping_wb(paddr, bytes); + +} +static __init void map_gru_high(int max_pnode) +{ + union uvh_rh_gam_gru_overlay_config_mmr_u gru; + int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; + + gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); + if (gru.s.enable) + map_high("GRU", gru.s.base, shift, max_pnode, map_wb); +} + +static __init void map_config_high(int max_pnode) +{ + union uvh_rh_gam_cfg_overlay_config_mmr_u cfg; + int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT; + + cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR); + if (cfg.s.enable) + map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc); +} + +static __init void map_mmr_high(int max_pnode) +{ + union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; + int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; + + mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); + if (mmr.s.enable) + map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); +} + +static __init void map_mmioh_high(int max_pnode) +{ + union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; + int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; + + mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); + if (mmioh.s.enable) + map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); +} + +static __init void uv_rtc_init(void) +{ + long status; + u64 ticks_per_sec; + + status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, + &ticks_per_sec); + if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) { + printk(KERN_WARNING + "unable to determine platform RTC clock frequency, " + "guessing.\n"); + /* BIOS gives wrong value for clock freq. so guess */ + sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; + } else + sn_rtc_cycles_per_second = ticks_per_sec; +} + +/* + * percpu heartbeat timer + */ +static void uv_heartbeat(unsigned long ignored) +{ + struct timer_list *timer = &uv_hub_info->scir.timer; + unsigned char bits = uv_hub_info->scir.state; + + /* flip heartbeat bit */ + bits ^= SCIR_CPU_HEARTBEAT; + + /* is this cpu idle? */ + if (idle_cpu(raw_smp_processor_id())) + bits &= ~SCIR_CPU_ACTIVITY; + else + bits |= SCIR_CPU_ACTIVITY; + + /* update system controller interface reg */ + uv_set_scir_bits(bits); + + /* enable next timer period */ + mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); +} + +static void __cpuinit uv_heartbeat_enable(int cpu) +{ + if (!uv_cpu_hub_info(cpu)->scir.enabled) { + struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; + + uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); + setup_timer(timer, uv_heartbeat, cpu); + timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; + add_timer_on(timer, cpu); + uv_cpu_hub_info(cpu)->scir.enabled = 1; + } + + /* check boot cpu */ + if (!uv_cpu_hub_info(0)->scir.enabled) + uv_heartbeat_enable(0); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void __cpuinit uv_heartbeat_disable(int cpu) +{ + if (uv_cpu_hub_info(cpu)->scir.enabled) { + uv_cpu_hub_info(cpu)->scir.enabled = 0; + del_timer(&uv_cpu_hub_info(cpu)->scir.timer); + } + uv_set_cpu_scir_bits(cpu, 0xff); +} + +/* + * cpu hotplug notifier + */ +static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + + switch (action) { + case CPU_ONLINE: + uv_heartbeat_enable(cpu); + break; + case CPU_DOWN_PREPARE: + uv_heartbeat_disable(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +static __init void uv_scir_register_cpu_notifier(void) +{ + hotcpu_notifier(uv_scir_cpu_notify, 0); +} + +#else /* !CONFIG_HOTPLUG_CPU */ + +static __init void uv_scir_register_cpu_notifier(void) +{ +} + +static __init int uv_init_heartbeat(void) +{ + int cpu; + + if (is_uv_system()) + for_each_online_cpu(cpu) + uv_heartbeat_enable(cpu); + return 0; +} + +late_initcall(uv_init_heartbeat); + +#endif /* !CONFIG_HOTPLUG_CPU */ + +/* + * Called on each cpu to initialize the per_cpu UV data area. + * ZZZ hotplug not supported yet + */ +void __cpuinit uv_cpu_init(void) +{ + /* CPU 0 initilization will be done via uv_system_init. */ + if (!uv_blade_info) + return; + + uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; + + if (get_uv_system_type() == UV_NON_UNIQUE_APIC) + set_x2apic_extra_bits(uv_hub_info->pnode); +} + + +void __init uv_system_init(void) +{ + union uvh_si_addr_map_config_u m_n_config; + union uvh_node_id_u node_id; + unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; + int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; + int max_pnode = 0; + unsigned long mmr_base, present; + + map_low_mmrs(); + + m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); + m_val = m_n_config.s.m_skt; + n_val = m_n_config.s.n_skt; + mmr_base = + uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & + ~UV_MMR_ENABLE; + printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); + + for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) + uv_possible_blades += + hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); + printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); + + bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); + uv_blade_info = kmalloc(bytes, GFP_KERNEL); + + get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); + + bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); + uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); + memset(uv_node_to_blade, 255, bytes); + + bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); + uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); + memset(uv_cpu_to_blade, 255, bytes); + + blade = 0; + for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { + present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); + for (j = 0; j < 64; j++) { + if (!test_bit(j, &present)) + continue; + uv_blade_info[blade].pnode = (i * 64 + j); + uv_blade_info[blade].nr_possible_cpus = 0; + uv_blade_info[blade].nr_online_cpus = 0; + blade++; + } + } + + node_id.v = uv_read_local_mmr(UVH_NODE_ID); + gnode_upper = (((unsigned long)node_id.s.node_id) & + ~((1 << n_val) - 1)) << m_val; + + uv_bios_init(); + uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, + &sn_coherency_id, &sn_region_size); + uv_rtc_init(); + + for_each_present_cpu(cpu) { + nid = cpu_to_node(cpu); + pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); + blade = boot_pnode_to_blade(pnode); + lcpu = uv_blade_info[blade].nr_possible_cpus; + uv_blade_info[blade].nr_possible_cpus++; + + uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; + uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; + uv_cpu_hub_info(cpu)->m_val = m_val; + uv_cpu_hub_info(cpu)->n_val = m_val; + uv_cpu_hub_info(cpu)->numa_blade_id = blade; + uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; + uv_cpu_hub_info(cpu)->pnode = pnode; + uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; + uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; + uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; + uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; + uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; + uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; + uv_node_to_blade[nid] = blade; + uv_cpu_to_blade[cpu] = blade; + max_pnode = max(pnode, max_pnode); + + printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " + "lcpu %d, blade %d\n", + cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, + lcpu, blade); + } + + map_gru_high(max_pnode); + map_mmr_high(max_pnode); + map_config_high(max_pnode); + map_mmioh_high(max_pnode); + + uv_cpu_init(); + uv_scir_register_cpu_notifier(); + proc_mkdir("sgi_uv", NULL); +} diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c deleted file mode 100644 index 70935dd904d..00000000000 --- a/arch/x86/kernel/genapic_64.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2004 James Cleverdon, IBM. - * Subject to the GNU Public License, v.2 - * - * Generic APIC sub-arch probe layer. - * - * Hacked for x86-64 by James Cleverdon from i386 architecture code by - * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and - * James Cleverdon. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -extern struct apic apic_flat; -extern struct apic apic_physflat; -extern struct apic apic_x2xpic_uv_x; -extern struct apic apic_x2apic_phys; -extern struct apic apic_x2apic_cluster; - -struct apic __read_mostly *apic = &apic_flat; -EXPORT_SYMBOL_GPL(apic); - -static struct apic *apic_probe[] __initdata = { -#ifdef CONFIG_X86_UV - &apic_x2apic_uv_x, -#endif -#ifdef CONFIG_X86_X2APIC - &apic_x2apic_phys, - &apic_x2apic_cluster, -#endif - &apic_physflat, - NULL, -}; - -/* - * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. - */ -void __init default_setup_apic_routing(void) -{ -#ifdef CONFIG_X86_X2APIC - if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { - if (!intr_remapping_enabled) - apic = &apic_flat; - } -#endif - - if (apic == &apic_flat) { - if (max_physical_apicid >= 8) - apic = &apic_physflat; - printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); - } - - if (x86_quirks->update_apic) - x86_quirks->update_apic(); -} - -/* Same for both flat and physical. */ - -void apic_send_IPI_self(int vector) -{ - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); -} - -int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - int i; - - for (i = 0; apic_probe[i]; ++i) { - if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { - apic = apic_probe[i]; - printk(KERN_INFO "Setting APIC routing to %s.\n", - apic->name); - return 1; - } - } - return 0; -} diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c deleted file mode 100644 index 3b002995e14..00000000000 --- a/arch/x86/kernel/genapic_flat_64.c +++ /dev/null @@ -1,389 +0,0 @@ -/* - * Copyright 2004 James Cleverdon, IBM. - * Subject to the GNU Public License, v.2 - * - * Flat APIC subarch code. - * - * Hacked for x86-64 by James Cleverdon from i386 architecture code by - * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and - * James Cleverdon. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_ACPI -#include -#endif - -static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 1; -} - -static const struct cpumask *flat_target_cpus(void) -{ - return cpu_online_mask; -} - -static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - cpumask_clear(retmask); - cpumask_bits(retmask)[0] = APIC_ALL_CPUS; -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static void flat_init_apic_ldr(void) -{ - unsigned long val; - unsigned long num, id; - - num = smp_processor_id(); - id = 1UL << num; - apic_write(APIC_DFR, APIC_DFR_FLAT); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(id); - apic_write(APIC_LDR, val); -} - -static inline void _flat_send_IPI_mask(unsigned long mask, int vector) -{ - unsigned long flags; - - local_irq_save(flags); - __default_send_IPI_dest_field(mask, vector, apic->dest_logical); - local_irq_restore(flags); -} - -static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) -{ - unsigned long mask = cpumask_bits(cpumask)[0]; - - _flat_send_IPI_mask(mask, vector); -} - -static void - flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) -{ - unsigned long mask = cpumask_bits(cpumask)[0]; - int cpu = smp_processor_id(); - - if (cpu < BITS_PER_LONG) - clear_bit(cpu, &mask); - - _flat_send_IPI_mask(mask, vector); -} - -static void flat_send_IPI_allbutself(int vector) -{ - int cpu = smp_processor_id(); -#ifdef CONFIG_HOTPLUG_CPU - int hotplug = 1; -#else - int hotplug = 0; -#endif - if (hotplug || vector == NMI_VECTOR) { - if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { - unsigned long mask = cpumask_bits(cpu_online_mask)[0]; - - if (cpu < BITS_PER_LONG) - clear_bit(cpu, &mask); - - _flat_send_IPI_mask(mask, vector); - } - } else if (num_online_cpus() > 1) { - __default_send_IPI_shortcut(APIC_DEST_ALLBUT, - vector, apic->dest_logical); - } -} - -static void flat_send_IPI_all(int vector) -{ - if (vector == NMI_VECTOR) { - flat_send_IPI_mask(cpu_online_mask, vector); - } else { - __default_send_IPI_shortcut(APIC_DEST_ALLINC, - vector, apic->dest_logical); - } -} - -static unsigned int flat_get_apic_id(unsigned long x) -{ - unsigned int id; - - id = (((x)>>24) & 0xFFu); - - return id; -} - -static unsigned long set_apic_id(unsigned int id) -{ - unsigned long x; - - x = ((id & 0xFFu)<<24); - return x; -} - -static unsigned int read_xapic_id(void) -{ - unsigned int id; - - id = flat_get_apic_id(apic_read(APIC_ID)); - return id; -} - -static int flat_apic_id_registered(void) -{ - return physid_isset(read_xapic_id(), phys_cpu_present_map); -} - -static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) -{ - return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; -} - -static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; - unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS; - - return mask1 & mask2; -} - -static int flat_phys_pkg_id(int initial_apic_id, int index_msb) -{ - return hard_smp_processor_id() >> index_msb; -} - -struct apic apic_flat = { - .name = "flat", - .probe = NULL, - .acpi_madt_oem_check = flat_acpi_madt_oem_check, - .apic_id_registered = flat_apic_id_registered, - - .irq_delivery_mode = dest_LowestPrio, - .irq_dest_mode = 1, /* logical */ - - .target_cpus = flat_target_cpus, - .disable_esr = 0, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = NULL, - .check_apicid_present = NULL, - - .vector_allocation_domain = flat_vector_allocation_domain, - .init_apic_ldr = flat_init_apic_ldr, - - .ioapic_phys_id_map = NULL, - .setup_apic_routing = NULL, - .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = NULL, - .setup_portio_remap = NULL, - .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = flat_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = flat_get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = 0xFFu << 24, - - .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, - - .send_IPI_mask = flat_send_IPI_mask, - .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, - .send_IPI_allbutself = flat_send_IPI_allbutself, - .send_IPI_all = flat_send_IPI_all, - .send_IPI_self = apic_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = NULL, - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = NULL, - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = native_apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, -}; - -/* - * Physflat mode is used when there are more than 8 CPUs on a AMD system. - * We cannot use logical delivery in this case because the mask - * overflows, so use physical mode. - */ -static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ -#ifdef CONFIG_ACPI - /* - * Quirk: some x86_64 machines can only use physical APIC mode - * regardless of how many processors are present (x86_64 ES7000 - * is an example). - */ - if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && - (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { - printk(KERN_DEBUG "system APIC only can use physical flat"); - return 1; - } -#endif - - return 0; -} - -static const struct cpumask *physflat_target_cpus(void) -{ - return cpu_online_mask; -} - -static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - cpumask_clear(retmask); - cpumask_set_cpu(cpu, retmask); -} - -static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) -{ - default_send_IPI_mask_sequence_phys(cpumask, vector); -} - -static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, - int vector) -{ - default_send_IPI_mask_allbutself_phys(cpumask, vector); -} - -static void physflat_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); -} - -static void physflat_send_IPI_all(int vector) -{ - physflat_send_IPI_mask(cpu_online_mask, vector); -} - -static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - cpu = cpumask_first(cpumask); - if ((unsigned)cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_apicid, cpu); - else - return BAD_APICID; -} - -static unsigned int -physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - for_each_cpu_and(cpu, cpumask, andmask) { - if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; - } - if (cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_apicid, cpu); - - return BAD_APICID; -} - -struct apic apic_physflat = { - - .name = "physical flat", - .probe = NULL, - .acpi_madt_oem_check = physflat_acpi_madt_oem_check, - .apic_id_registered = flat_apic_id_registered, - - .irq_delivery_mode = dest_Fixed, - .irq_dest_mode = 0, /* physical */ - - .target_cpus = physflat_target_cpus, - .disable_esr = 0, - .dest_logical = 0, - .check_apicid_used = NULL, - .check_apicid_present = NULL, - - .vector_allocation_domain = physflat_vector_allocation_domain, - /* not needed, but shouldn't hurt: */ - .init_apic_ldr = flat_init_apic_ldr, - - .ioapic_phys_id_map = NULL, - .setup_apic_routing = NULL, - .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = NULL, - .setup_portio_remap = NULL, - .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = flat_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = flat_get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = 0xFFu << 24, - - .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, - - .send_IPI_mask = physflat_send_IPI_mask, - .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, - .send_IPI_allbutself = physflat_send_IPI_allbutself, - .send_IPI_all = physflat_send_IPI_all, - .send_IPI_self = apic_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = NULL, - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = NULL, - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = native_apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, -}; diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c deleted file mode 100644 index 4e39d9ad4d5..00000000000 --- a/arch/x86/kernel/genx2apic_cluster.c +++ /dev/null @@ -1,243 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); - -static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - if (cpu_has_x2apic) - return 1; - - return 0; -} - -/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ - -static const struct cpumask *x2apic_target_cpus(void) -{ - return cpumask_of(0); -} - -/* - * for now each logical cpu is in its own vector allocation domain. - */ -static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - cpumask_clear(retmask); - cpumask_set_cpu(cpu, retmask); -} - -static void - __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) -{ - unsigned long cfg; - - cfg = __prepare_ICR(0, vector, dest); - - /* - * send the IPI. - */ - native_x2apic_icr_write(cfg, apicid); -} - -/* - * for now, we send the IPI's one by one in the cpumask. - * TBD: Based on the cpu mask, we can send the IPI's to the cluster group - * at once. We have 16 cpu's in a cluster. This will minimize IPI register - * writes. - */ -static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) -{ - unsigned long query_cpu; - unsigned long flags; - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - __x2apic_send_IPI_dest( - per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, apic->dest_logical); - } - local_irq_restore(flags); -} - -static void - x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) -{ - unsigned long this_cpu = smp_processor_id(); - unsigned long query_cpu; - unsigned long flags; - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - if (query_cpu == this_cpu) - continue; - __x2apic_send_IPI_dest( - per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, apic->dest_logical); - } - local_irq_restore(flags); -} - -static void x2apic_send_IPI_allbutself(int vector) -{ - unsigned long this_cpu = smp_processor_id(); - unsigned long query_cpu; - unsigned long flags; - - local_irq_save(flags); - for_each_online_cpu(query_cpu) { - if (query_cpu == this_cpu) - continue; - __x2apic_send_IPI_dest( - per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, apic->dest_logical); - } - local_irq_restore(flags); -} - -static void x2apic_send_IPI_all(int vector) -{ - x2apic_send_IPI_mask(cpu_online_mask, vector); -} - -static int x2apic_apic_id_registered(void) -{ - return 1; -} - -static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) -{ - /* - * We're using fixed IRQ delivery, can only return one logical APIC ID. - * May as well be the first. - */ - int cpu = cpumask_first(cpumask); - - if ((unsigned)cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_logical_apicid, cpu); - else - return BAD_APICID; -} - -static unsigned int -x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one logical APIC ID. - * May as well be the first. - */ - for_each_cpu_and(cpu, cpumask, andmask) { - if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; - } - - if (cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_logical_apicid, cpu); - - return BAD_APICID; -} - -static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x) -{ - unsigned int id; - - id = x; - return id; -} - -static unsigned long set_apic_id(unsigned int id) -{ - unsigned long x; - - x = id; - return x; -} - -static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) -{ - return current_cpu_data.initial_apicid >> index_msb; -} - -static void x2apic_send_IPI_self(int vector) -{ - apic_write(APIC_SELF_IPI, vector); -} - -static void init_x2apic_ldr(void) -{ - int cpu = smp_processor_id(); - - per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); -} - -struct apic apic_x2apic_cluster = { - - .name = "cluster x2apic", - .probe = NULL, - .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, - .apic_id_registered = x2apic_apic_id_registered, - - .irq_delivery_mode = dest_LowestPrio, - .irq_dest_mode = 1, /* logical */ - - .target_cpus = x2apic_target_cpus, - .disable_esr = 0, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = NULL, - .check_apicid_present = NULL, - - .vector_allocation_domain = x2apic_vector_allocation_domain, - .init_apic_ldr = init_x2apic_ldr, - - .ioapic_phys_id_map = NULL, - .setup_apic_routing = NULL, - .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = NULL, - .setup_portio_remap = NULL, - .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = x2apic_cluster_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = x2apic_cluster_phys_get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = 0xFFFFFFFFu, - - .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, - - .send_IPI_mask = x2apic_send_IPI_mask, - .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, - .send_IPI_allbutself = x2apic_send_IPI_allbutself, - .send_IPI_all = x2apic_send_IPI_all, - .send_IPI_self = x2apic_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = NULL, - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = NULL, - - .read = native_apic_msr_read, - .write = native_apic_msr_write, - .icr_read = native_x2apic_icr_read, - .icr_write = native_x2apic_icr_write, - .wait_icr_idle = native_x2apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, -}; diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c deleted file mode 100644 index d2d52eb9f7e..00000000000 --- a/arch/x86/kernel/genx2apic_phys.c +++ /dev/null @@ -1,229 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -static int x2apic_phys; - -static int set_x2apic_phys_mode(char *arg) -{ - x2apic_phys = 1; - return 0; -} -early_param("x2apic_phys", set_x2apic_phys_mode); - -static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - if (cpu_has_x2apic && x2apic_phys) - return 1; - - return 0; -} - -/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ - -static const struct cpumask *x2apic_target_cpus(void) -{ - return cpumask_of(0); -} - -static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - cpumask_clear(retmask); - cpumask_set_cpu(cpu, retmask); -} - -static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, - unsigned int dest) -{ - unsigned long cfg; - - cfg = __prepare_ICR(0, vector, dest); - - /* - * send the IPI. - */ - native_x2apic_icr_write(cfg, apicid); -} - -static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) -{ - unsigned long query_cpu; - unsigned long flags; - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), - vector, APIC_DEST_PHYSICAL); - } - local_irq_restore(flags); -} - -static void - x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) -{ - unsigned long this_cpu = smp_processor_id(); - unsigned long query_cpu; - unsigned long flags; - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - if (query_cpu != this_cpu) - __x2apic_send_IPI_dest( - per_cpu(x86_cpu_to_apicid, query_cpu), - vector, APIC_DEST_PHYSICAL); - } - local_irq_restore(flags); -} - -static void x2apic_send_IPI_allbutself(int vector) -{ - unsigned long this_cpu = smp_processor_id(); - unsigned long query_cpu; - unsigned long flags; - - local_irq_save(flags); - for_each_online_cpu(query_cpu) { - if (query_cpu == this_cpu) - continue; - __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), - vector, APIC_DEST_PHYSICAL); - } - local_irq_restore(flags); -} - -static void x2apic_send_IPI_all(int vector) -{ - x2apic_send_IPI_mask(cpu_online_mask, vector); -} - -static int x2apic_apic_id_registered(void) -{ - return 1; -} - -static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) -{ - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - int cpu = cpumask_first(cpumask); - - if ((unsigned)cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_apicid, cpu); - else - return BAD_APICID; -} - -static unsigned int -x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - for_each_cpu_and(cpu, cpumask, andmask) { - if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; - } - - if (cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_apicid, cpu); - - return BAD_APICID; -} - -static unsigned int x2apic_phys_get_apic_id(unsigned long x) -{ - return x; -} - -static unsigned long set_apic_id(unsigned int id) -{ - return id; -} - -static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) -{ - return current_cpu_data.initial_apicid >> index_msb; -} - -static void x2apic_send_IPI_self(int vector) -{ - apic_write(APIC_SELF_IPI, vector); -} - -static void init_x2apic_ldr(void) -{ -} - -struct apic apic_x2apic_phys = { - - .name = "physical x2apic", - .probe = NULL, - .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, - .apic_id_registered = x2apic_apic_id_registered, - - .irq_delivery_mode = dest_Fixed, - .irq_dest_mode = 0, /* physical */ - - .target_cpus = x2apic_target_cpus, - .disable_esr = 0, - .dest_logical = 0, - .check_apicid_used = NULL, - .check_apicid_present = NULL, - - .vector_allocation_domain = x2apic_vector_allocation_domain, - .init_apic_ldr = init_x2apic_ldr, - - .ioapic_phys_id_map = NULL, - .setup_apic_routing = NULL, - .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = NULL, - .setup_portio_remap = NULL, - .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = x2apic_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = x2apic_phys_get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = 0xFFFFFFFFu, - - .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, - - .send_IPI_mask = x2apic_send_IPI_mask, - .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, - .send_IPI_allbutself = x2apic_send_IPI_allbutself, - .send_IPI_all = x2apic_send_IPI_all, - .send_IPI_self = x2apic_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = NULL, - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = NULL, - - .read = native_apic_msr_read, - .write = native_apic_msr_write, - .icr_read = native_x2apic_icr_read, - .icr_write = native_x2apic_icr_write, - .wait_icr_idle = native_x2apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, -}; diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c deleted file mode 100644 index 20b4ad07c3a..00000000000 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ /dev/null @@ -1,643 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * SGI UV APIC functions (note: not an Intel compatible APIC) - * - * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -DEFINE_PER_CPU(int, x2apic_extra_bits); - -static enum uv_system_type uv_system_type; - -static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - if (!strcmp(oem_id, "SGI")) { - if (!strcmp(oem_table_id, "UVL")) - uv_system_type = UV_LEGACY_APIC; - else if (!strcmp(oem_table_id, "UVX")) - uv_system_type = UV_X2APIC; - else if (!strcmp(oem_table_id, "UVH")) { - uv_system_type = UV_NON_UNIQUE_APIC; - return 1; - } - } - return 0; -} - -enum uv_system_type get_uv_system_type(void) -{ - return uv_system_type; -} - -int is_uv_system(void) -{ - return uv_system_type != UV_NONE; -} -EXPORT_SYMBOL_GPL(is_uv_system); - -DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); -EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); - -struct uv_blade_info *uv_blade_info; -EXPORT_SYMBOL_GPL(uv_blade_info); - -short *uv_node_to_blade; -EXPORT_SYMBOL_GPL(uv_node_to_blade); - -short *uv_cpu_to_blade; -EXPORT_SYMBOL_GPL(uv_cpu_to_blade); - -short uv_possible_blades; -EXPORT_SYMBOL_GPL(uv_possible_blades); - -unsigned long sn_rtc_cycles_per_second; -EXPORT_SYMBOL(sn_rtc_cycles_per_second); - -/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ - -static const struct cpumask *uv_target_cpus(void) -{ - return cpumask_of(0); -} - -static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - cpumask_clear(retmask); - cpumask_set_cpu(cpu, retmask); -} - -int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) -{ - unsigned long val; - int pnode; - - pnode = uv_apicid_to_pnode(phys_apicid); - val = (1UL << UVH_IPI_INT_SEND_SHFT) | - (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | - (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | - APIC_DM_INIT; - uv_write_global_mmr64(pnode, UVH_IPI_INT, val); - mdelay(10); - - val = (1UL << UVH_IPI_INT_SEND_SHFT) | - (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | - (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | - APIC_DM_STARTUP; - uv_write_global_mmr64(pnode, UVH_IPI_INT, val); - return 0; -} - -static void uv_send_IPI_one(int cpu, int vector) -{ - unsigned long val, apicid; - int pnode; - - apicid = per_cpu(x86_cpu_to_apicid, cpu); - pnode = uv_apicid_to_pnode(apicid); - - val = (1UL << UVH_IPI_INT_SEND_SHFT) | - (apicid << UVH_IPI_INT_APIC_ID_SHFT) | - (vector << UVH_IPI_INT_VECTOR_SHFT); - - uv_write_global_mmr64(pnode, UVH_IPI_INT, val); -} - -static void uv_send_IPI_mask(const struct cpumask *mask, int vector) -{ - unsigned int cpu; - - for_each_cpu(cpu, mask) - uv_send_IPI_one(cpu, vector); -} - -static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) -{ - unsigned int this_cpu = smp_processor_id(); - unsigned int cpu; - - for_each_cpu(cpu, mask) { - if (cpu != this_cpu) - uv_send_IPI_one(cpu, vector); - } -} - -static void uv_send_IPI_allbutself(int vector) -{ - unsigned int this_cpu = smp_processor_id(); - unsigned int cpu; - - for_each_online_cpu(cpu) { - if (cpu != this_cpu) - uv_send_IPI_one(cpu, vector); - } -} - -static void uv_send_IPI_all(int vector) -{ - uv_send_IPI_mask(cpu_online_mask, vector); -} - -static int uv_apic_id_registered(void) -{ - return 1; -} - -static void uv_init_apic_ldr(void) -{ -} - -static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) -{ - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - int cpu = cpumask_first(cpumask); - - if ((unsigned)cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_apicid, cpu); - else - return BAD_APICID; -} - -static unsigned int -uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - for_each_cpu_and(cpu, cpumask, andmask) { - if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; - } - if (cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_apicid, cpu); - - return BAD_APICID; -} - -static unsigned int x2apic_get_apic_id(unsigned long x) -{ - unsigned int id; - - WARN_ON(preemptible() && num_online_cpus() > 1); - id = x | __get_cpu_var(x2apic_extra_bits); - - return id; -} - -static unsigned long set_apic_id(unsigned int id) -{ - unsigned long x; - - /* maskout x2apic_extra_bits ? */ - x = id; - return x; -} - -static unsigned int uv_read_apic_id(void) -{ - - return x2apic_get_apic_id(apic_read(APIC_ID)); -} - -static int uv_phys_pkg_id(int initial_apicid, int index_msb) -{ - return uv_read_apic_id() >> index_msb; -} - -static void uv_send_IPI_self(int vector) -{ - apic_write(APIC_SELF_IPI, vector); -} - -struct apic apic_x2apic_uv_x = { - - .name = "UV large system", - .probe = NULL, - .acpi_madt_oem_check = uv_acpi_madt_oem_check, - .apic_id_registered = uv_apic_id_registered, - - .irq_delivery_mode = dest_Fixed, - .irq_dest_mode = 1, /* logical */ - - .target_cpus = uv_target_cpus, - .disable_esr = 0, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = NULL, - .check_apicid_present = NULL, - - .vector_allocation_domain = uv_vector_allocation_domain, - .init_apic_ldr = uv_init_apic_ldr, - - .ioapic_phys_id_map = NULL, - .setup_apic_routing = NULL, - .multi_timer_check = NULL, - .apicid_to_node = NULL, - .cpu_to_logical_apicid = NULL, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = NULL, - .setup_portio_remap = NULL, - .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = uv_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = x2apic_get_apic_id, - .set_apic_id = set_apic_id, - .apic_id_mask = 0xFFFFFFFFu, - - .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, - - .send_IPI_mask = uv_send_IPI_mask, - .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, - .send_IPI_allbutself = uv_send_IPI_allbutself, - .send_IPI_all = uv_send_IPI_all, - .send_IPI_self = uv_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - .wait_for_init_deassert = NULL, - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = NULL, - - .read = native_apic_msr_read, - .write = native_apic_msr_write, - .icr_read = native_x2apic_icr_read, - .icr_write = native_x2apic_icr_write, - .wait_icr_idle = native_x2apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, -}; - -static __cpuinit void set_x2apic_extra_bits(int pnode) -{ - __get_cpu_var(x2apic_extra_bits) = (pnode << 6); -} - -/* - * Called on boot cpu. - */ -static __init int boot_pnode_to_blade(int pnode) -{ - int blade; - - for (blade = 0; blade < uv_num_possible_blades(); blade++) - if (pnode == uv_blade_info[blade].pnode) - return blade; - BUG(); -} - -struct redir_addr { - unsigned long redirect; - unsigned long alias; -}; - -#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT - -static __initdata struct redir_addr redir_addrs[] = { - {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, - {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, - {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, -}; - -static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) -{ - union uvh_si_alias0_overlay_config_u alias; - union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; - int i; - - for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { - alias.v = uv_read_local_mmr(redir_addrs[i].alias); - if (alias.s.base == 0) { - *size = (1UL << alias.s.m_alias); - redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); - *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; - return; - } - } - BUG(); -} - -static __init void map_low_mmrs(void) -{ - init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); - init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); -} - -enum map_type {map_wb, map_uc}; - -static __init void map_high(char *id, unsigned long base, int shift, - int max_pnode, enum map_type map_type) -{ - unsigned long bytes, paddr; - - paddr = base << shift; - bytes = (1UL << shift) * (max_pnode + 1); - printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, - paddr + bytes); - if (map_type == map_uc) - init_extra_mapping_uc(paddr, bytes); - else - init_extra_mapping_wb(paddr, bytes); - -} -static __init void map_gru_high(int max_pnode) -{ - union uvh_rh_gam_gru_overlay_config_mmr_u gru; - int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; - - gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); - if (gru.s.enable) - map_high("GRU", gru.s.base, shift, max_pnode, map_wb); -} - -static __init void map_config_high(int max_pnode) -{ - union uvh_rh_gam_cfg_overlay_config_mmr_u cfg; - int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT; - - cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR); - if (cfg.s.enable) - map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc); -} - -static __init void map_mmr_high(int max_pnode) -{ - union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; - int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; - - mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); - if (mmr.s.enable) - map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); -} - -static __init void map_mmioh_high(int max_pnode) -{ - union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; - int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; - - mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); - if (mmioh.s.enable) - map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); -} - -static __init void uv_rtc_init(void) -{ - long status; - u64 ticks_per_sec; - - status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, - &ticks_per_sec); - if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) { - printk(KERN_WARNING - "unable to determine platform RTC clock frequency, " - "guessing.\n"); - /* BIOS gives wrong value for clock freq. so guess */ - sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; - } else - sn_rtc_cycles_per_second = ticks_per_sec; -} - -/* - * percpu heartbeat timer - */ -static void uv_heartbeat(unsigned long ignored) -{ - struct timer_list *timer = &uv_hub_info->scir.timer; - unsigned char bits = uv_hub_info->scir.state; - - /* flip heartbeat bit */ - bits ^= SCIR_CPU_HEARTBEAT; - - /* is this cpu idle? */ - if (idle_cpu(raw_smp_processor_id())) - bits &= ~SCIR_CPU_ACTIVITY; - else - bits |= SCIR_CPU_ACTIVITY; - - /* update system controller interface reg */ - uv_set_scir_bits(bits); - - /* enable next timer period */ - mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); -} - -static void __cpuinit uv_heartbeat_enable(int cpu) -{ - if (!uv_cpu_hub_info(cpu)->scir.enabled) { - struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; - - uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); - setup_timer(timer, uv_heartbeat, cpu); - timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; - add_timer_on(timer, cpu); - uv_cpu_hub_info(cpu)->scir.enabled = 1; - } - - /* check boot cpu */ - if (!uv_cpu_hub_info(0)->scir.enabled) - uv_heartbeat_enable(0); -} - -#ifdef CONFIG_HOTPLUG_CPU -static void __cpuinit uv_heartbeat_disable(int cpu) -{ - if (uv_cpu_hub_info(cpu)->scir.enabled) { - uv_cpu_hub_info(cpu)->scir.enabled = 0; - del_timer(&uv_cpu_hub_info(cpu)->scir.timer); - } - uv_set_cpu_scir_bits(cpu, 0xff); -} - -/* - * cpu hotplug notifier - */ -static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - long cpu = (long)hcpu; - - switch (action) { - case CPU_ONLINE: - uv_heartbeat_enable(cpu); - break; - case CPU_DOWN_PREPARE: - uv_heartbeat_disable(cpu); - break; - default: - break; - } - return NOTIFY_OK; -} - -static __init void uv_scir_register_cpu_notifier(void) -{ - hotcpu_notifier(uv_scir_cpu_notify, 0); -} - -#else /* !CONFIG_HOTPLUG_CPU */ - -static __init void uv_scir_register_cpu_notifier(void) -{ -} - -static __init int uv_init_heartbeat(void) -{ - int cpu; - - if (is_uv_system()) - for_each_online_cpu(cpu) - uv_heartbeat_enable(cpu); - return 0; -} - -late_initcall(uv_init_heartbeat); - -#endif /* !CONFIG_HOTPLUG_CPU */ - -/* - * Called on each cpu to initialize the per_cpu UV data area. - * ZZZ hotplug not supported yet - */ -void __cpuinit uv_cpu_init(void) -{ - /* CPU 0 initilization will be done via uv_system_init. */ - if (!uv_blade_info) - return; - - uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; - - if (get_uv_system_type() == UV_NON_UNIQUE_APIC) - set_x2apic_extra_bits(uv_hub_info->pnode); -} - - -void __init uv_system_init(void) -{ - union uvh_si_addr_map_config_u m_n_config; - union uvh_node_id_u node_id; - unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; - int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; - int max_pnode = 0; - unsigned long mmr_base, present; - - map_low_mmrs(); - - m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); - m_val = m_n_config.s.m_skt; - n_val = m_n_config.s.n_skt; - mmr_base = - uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & - ~UV_MMR_ENABLE; - printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); - - for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) - uv_possible_blades += - hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); - printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); - - bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); - uv_blade_info = kmalloc(bytes, GFP_KERNEL); - - get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); - - bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); - uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); - memset(uv_node_to_blade, 255, bytes); - - bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); - uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); - memset(uv_cpu_to_blade, 255, bytes); - - blade = 0; - for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { - present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); - for (j = 0; j < 64; j++) { - if (!test_bit(j, &present)) - continue; - uv_blade_info[blade].pnode = (i * 64 + j); - uv_blade_info[blade].nr_possible_cpus = 0; - uv_blade_info[blade].nr_online_cpus = 0; - blade++; - } - } - - node_id.v = uv_read_local_mmr(UVH_NODE_ID); - gnode_upper = (((unsigned long)node_id.s.node_id) & - ~((1 << n_val) - 1)) << m_val; - - uv_bios_init(); - uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, - &sn_coherency_id, &sn_region_size); - uv_rtc_init(); - - for_each_present_cpu(cpu) { - nid = cpu_to_node(cpu); - pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); - blade = boot_pnode_to_blade(pnode); - lcpu = uv_blade_info[blade].nr_possible_cpus; - uv_blade_info[blade].nr_possible_cpus++; - - uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; - uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; - uv_cpu_hub_info(cpu)->m_val = m_val; - uv_cpu_hub_info(cpu)->n_val = m_val; - uv_cpu_hub_info(cpu)->numa_blade_id = blade; - uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; - uv_cpu_hub_info(cpu)->pnode = pnode; - uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; - uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; - uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; - uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; - uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; - uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; - uv_node_to_blade[nid] = blade; - uv_cpu_to_blade[cpu] = blade; - max_pnode = max(pnode, max_pnode); - - printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " - "lcpu %d, blade %d\n", - cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, - lcpu, blade); - } - - map_gru_high(max_pnode); - map_mmr_high(max_pnode); - map_config_high(max_pnode); - map_mmioh_high(max_pnode); - - uv_cpu_init(); - uv_scir_register_cpu_notifier(); - proc_mkdir("sgi_uv", NULL); -} diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c deleted file mode 100644 index 00e6071cefc..00000000000 --- a/arch/x86/kernel/io_apic.c +++ /dev/null @@ -1,4160 +0,0 @@ -/* - * Intel IO-APIC support for multi-Pentium hosts. - * - * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo - * - * Many thanks to Stig Venaas for trying out countless experimental - * patches and reporting/debugging problems patiently! - * - * (c) 1999, Multiple IO-APIC support, developed by - * Ken-ichi Yaku and - * Hidemi Kishimoto , - * further tested and cleaned up by Zach Brown - * and Ingo Molnar - * - * Fixes - * Maciej W. Rozycki : Bits for genuine 82489DX APICs; - * thanks to Eric Gilmore - * and Rolf G. Tews - * for testing these extensively - * Paul Diefenbaugh : Added full ACPI support - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* time_after() */ -#ifdef CONFIG_ACPI -#include -#endif -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define __apicdebuginit(type) static type __init - -/* - * Is the SiS APIC rmw bug present ? - * -1 = don't know, 0 = no, 1 = yes - */ -int sis_apic_bug = -1; - -static DEFINE_SPINLOCK(ioapic_lock); -static DEFINE_SPINLOCK(vector_lock); - -/* - * # of IRQ routing registers - */ -int nr_ioapic_registers[MAX_IO_APICS]; - -/* I/O APIC entries */ -struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; -int nr_ioapics; - -/* MP IRQ source entries */ -struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; - -/* # of MP IRQ source entries */ -int mp_irq_entries; - -#if defined (CONFIG_MCA) || defined (CONFIG_EISA) -int mp_bus_id_to_type[MAX_MP_BUSSES]; -#endif - -DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); - -int skip_ioapic_setup; - -void arch_disable_smp_support(void) -{ -#ifdef CONFIG_PCI - noioapicquirk = 1; - noioapicreroute = -1; -#endif - skip_ioapic_setup = 1; -} - -static int __init parse_noapic(char *str) -{ - /* disable IO-APIC */ - arch_disable_smp_support(); - return 0; -} -early_param("noapic", parse_noapic); - -struct irq_pin_list; - -/* - * This is performance-critical, we want to do it O(1) - * - * the indexing order of this array favors 1:1 mappings - * between pins and IRQs. - */ - -struct irq_pin_list { - int apic, pin; - struct irq_pin_list *next; -}; - -static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) -{ - struct irq_pin_list *pin; - int node; - - node = cpu_to_node(cpu); - - pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); - - return pin; -} - -struct irq_cfg { - struct irq_pin_list *irq_2_pin; - cpumask_var_t domain; - cpumask_var_t old_domain; - unsigned move_cleanup_count; - u8 vector; - u8 move_in_progress : 1; -#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC - u8 move_desc_pending : 1; -#endif -}; - -/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ -#ifdef CONFIG_SPARSE_IRQ -static struct irq_cfg irq_cfgx[] = { -#else -static struct irq_cfg irq_cfgx[NR_IRQS] = { -#endif - [0] = { .vector = IRQ0_VECTOR, }, - [1] = { .vector = IRQ1_VECTOR, }, - [2] = { .vector = IRQ2_VECTOR, }, - [3] = { .vector = IRQ3_VECTOR, }, - [4] = { .vector = IRQ4_VECTOR, }, - [5] = { .vector = IRQ5_VECTOR, }, - [6] = { .vector = IRQ6_VECTOR, }, - [7] = { .vector = IRQ7_VECTOR, }, - [8] = { .vector = IRQ8_VECTOR, }, - [9] = { .vector = IRQ9_VECTOR, }, - [10] = { .vector = IRQ10_VECTOR, }, - [11] = { .vector = IRQ11_VECTOR, }, - [12] = { .vector = IRQ12_VECTOR, }, - [13] = { .vector = IRQ13_VECTOR, }, - [14] = { .vector = IRQ14_VECTOR, }, - [15] = { .vector = IRQ15_VECTOR, }, -}; - -int __init arch_early_irq_init(void) -{ - struct irq_cfg *cfg; - struct irq_desc *desc; - int count; - int i; - - cfg = irq_cfgx; - count = ARRAY_SIZE(irq_cfgx); - - for (i = 0; i < count; i++) { - desc = irq_to_desc(i); - desc->chip_data = &cfg[i]; - alloc_bootmem_cpumask_var(&cfg[i].domain); - alloc_bootmem_cpumask_var(&cfg[i].old_domain); - if (i < NR_IRQS_LEGACY) - cpumask_setall(cfg[i].domain); - } - - return 0; -} - -#ifdef CONFIG_SPARSE_IRQ -static struct irq_cfg *irq_cfg(unsigned int irq) -{ - struct irq_cfg *cfg = NULL; - struct irq_desc *desc; - - desc = irq_to_desc(irq); - if (desc) - cfg = desc->chip_data; - - return cfg; -} - -static struct irq_cfg *get_one_free_irq_cfg(int cpu) -{ - struct irq_cfg *cfg; - int node; - - node = cpu_to_node(cpu); - - cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); - if (cfg) { - if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { - kfree(cfg); - cfg = NULL; - } else if (!alloc_cpumask_var_node(&cfg->old_domain, - GFP_ATOMIC, node)) { - free_cpumask_var(cfg->domain); - kfree(cfg); - cfg = NULL; - } else { - cpumask_clear(cfg->domain); - cpumask_clear(cfg->old_domain); - } - } - - return cfg; -} - -int arch_init_chip_data(struct irq_desc *desc, int cpu) -{ - struct irq_cfg *cfg; - - cfg = desc->chip_data; - if (!cfg) { - desc->chip_data = get_one_free_irq_cfg(cpu); - if (!desc->chip_data) { - printk(KERN_ERR "can not alloc irq_cfg\n"); - BUG_ON(1); - } - } - - return 0; -} - -#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC - -static void -init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu) -{ - struct irq_pin_list *old_entry, *head, *tail, *entry; - - cfg->irq_2_pin = NULL; - old_entry = old_cfg->irq_2_pin; - if (!old_entry) - return; - - entry = get_one_free_irq_2_pin(cpu); - if (!entry) - return; - - entry->apic = old_entry->apic; - entry->pin = old_entry->pin; - head = entry; - tail = entry; - old_entry = old_entry->next; - while (old_entry) { - entry = get_one_free_irq_2_pin(cpu); - if (!entry) { - entry = head; - while (entry) { - head = entry->next; - kfree(entry); - entry = head; - } - /* still use the old one */ - return; - } - entry->apic = old_entry->apic; - entry->pin = old_entry->pin; - tail->next = entry; - tail = entry; - old_entry = old_entry->next; - } - - tail->next = NULL; - cfg->irq_2_pin = head; -} - -static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) -{ - struct irq_pin_list *entry, *next; - - if (old_cfg->irq_2_pin == cfg->irq_2_pin) - return; - - entry = old_cfg->irq_2_pin; - - while (entry) { - next = entry->next; - kfree(entry); - entry = next; - } - old_cfg->irq_2_pin = NULL; -} - -void arch_init_copy_chip_data(struct irq_desc *old_desc, - struct irq_desc *desc, int cpu) -{ - struct irq_cfg *cfg; - struct irq_cfg *old_cfg; - - cfg = get_one_free_irq_cfg(cpu); - - if (!cfg) - return; - - desc->chip_data = cfg; - - old_cfg = old_desc->chip_data; - - memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); - - init_copy_irq_2_pin(old_cfg, cfg, cpu); -} - -static void free_irq_cfg(struct irq_cfg *old_cfg) -{ - kfree(old_cfg); -} - -void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) -{ - struct irq_cfg *old_cfg, *cfg; - - old_cfg = old_desc->chip_data; - cfg = desc->chip_data; - - if (old_cfg == cfg) - return; - - if (old_cfg) { - free_irq_2_pin(old_cfg, cfg); - free_irq_cfg(old_cfg); - old_desc->chip_data = NULL; - } -} - -static void -set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) -{ - struct irq_cfg *cfg = desc->chip_data; - - if (!cfg->move_in_progress) { - /* it means that domain is not changed */ - if (!cpumask_intersects(desc->affinity, mask)) - cfg->move_desc_pending = 1; - } -} -#endif - -#else -static struct irq_cfg *irq_cfg(unsigned int irq) -{ - return irq < nr_irqs ? irq_cfgx + irq : NULL; -} - -#endif - -#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC -static inline void -set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) -{ -} -#endif - -struct io_apic { - unsigned int index; - unsigned int unused[3]; - unsigned int data; -}; - -static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) -{ - return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) - + (mp_ioapics[idx].apicaddr & ~PAGE_MASK); -} - -static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) -{ - struct io_apic __iomem *io_apic = io_apic_base(apic); - writel(reg, &io_apic->index); - return readl(&io_apic->data); -} - -static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) -{ - struct io_apic __iomem *io_apic = io_apic_base(apic); - writel(reg, &io_apic->index); - writel(value, &io_apic->data); -} - -/* - * Re-write a value: to be used for read-modify-write - * cycles where the read already set up the index register. - * - * Older SiS APIC requires we rewrite the index register - */ -static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) -{ - struct io_apic __iomem *io_apic = io_apic_base(apic); - - if (sis_apic_bug) - writel(reg, &io_apic->index); - writel(value, &io_apic->data); -} - -static bool io_apic_level_ack_pending(struct irq_cfg *cfg) -{ - struct irq_pin_list *entry; - unsigned long flags; - - spin_lock_irqsave(&ioapic_lock, flags); - entry = cfg->irq_2_pin; - for (;;) { - unsigned int reg; - int pin; - - if (!entry) - break; - pin = entry->pin; - reg = io_apic_read(entry->apic, 0x10 + pin*2); - /* Is the remote IRR bit set? */ - if (reg & IO_APIC_REDIR_REMOTE_IRR) { - spin_unlock_irqrestore(&ioapic_lock, flags); - return true; - } - if (!entry->next) - break; - entry = entry->next; - } - spin_unlock_irqrestore(&ioapic_lock, flags); - - return false; -} - -union entry_union { - struct { u32 w1, w2; }; - struct IO_APIC_route_entry entry; -}; - -static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) -{ - union entry_union eu; - unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags); - eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); - eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); - spin_unlock_irqrestore(&ioapic_lock, flags); - return eu.entry; -} - -/* - * When we write a new IO APIC routing entry, we need to write the high - * word first! If the mask bit in the low word is clear, we will enable - * the interrupt, and we need to make sure the entry is fully populated - * before that happens. - */ -static void -__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) -{ - union entry_union eu; - eu.entry = e; - io_apic_write(apic, 0x11 + 2*pin, eu.w2); - io_apic_write(apic, 0x10 + 2*pin, eu.w1); -} - -void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) -{ - unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags); - __ioapic_write_entry(apic, pin, e); - spin_unlock_irqrestore(&ioapic_lock, flags); -} - -/* - * When we mask an IO APIC routing entry, we need to write the low - * word first, in order to set the mask bit before we change the - * high bits! - */ -static void ioapic_mask_entry(int apic, int pin) -{ - unsigned long flags; - union entry_union eu = { .entry.mask = 1 }; - - spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(apic, 0x10 + 2*pin, eu.w1); - io_apic_write(apic, 0x11 + 2*pin, eu.w2); - spin_unlock_irqrestore(&ioapic_lock, flags); -} - -#ifdef CONFIG_SMP -static void send_cleanup_vector(struct irq_cfg *cfg) -{ - cpumask_var_t cleanup_mask; - - if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { - unsigned int i; - cfg->move_cleanup_count = 0; - for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) - cfg->move_cleanup_count++; - for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) - apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); - } else { - cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); - cfg->move_cleanup_count = cpumask_weight(cleanup_mask); - apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); - free_cpumask_var(cleanup_mask); - } - cfg->move_in_progress = 0; -} - -static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) -{ - int apic, pin; - struct irq_pin_list *entry; - u8 vector = cfg->vector; - - entry = cfg->irq_2_pin; - for (;;) { - unsigned int reg; - - if (!entry) - break; - - apic = entry->apic; - pin = entry->pin; -#ifdef CONFIG_INTR_REMAP - /* - * With interrupt-remapping, destination information comes - * from interrupt-remapping table entry. - */ - if (!irq_remapped(irq)) - io_apic_write(apic, 0x11 + pin*2, dest); -#else - io_apic_write(apic, 0x11 + pin*2, dest); -#endif - reg = io_apic_read(apic, 0x10 + pin*2); - reg &= ~IO_APIC_REDIR_VECTOR_MASK; - reg |= vector; - io_apic_modify(apic, 0x10 + pin*2, reg); - if (!entry->next) - break; - entry = entry->next; - } -} - -static int -assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); - -/* - * Either sets desc->affinity to a valid value, and returns - * ->cpu_mask_to_apicid of that, or returns BAD_APICID and - * leaves desc->affinity untouched. - */ -static unsigned int -set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) -{ - struct irq_cfg *cfg; - unsigned int irq; - - if (!cpumask_intersects(mask, cpu_online_mask)) - return BAD_APICID; - - irq = desc->irq; - cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) - return BAD_APICID; - - cpumask_and(desc->affinity, cfg->domain, mask); - set_extra_move_desc(desc, mask); - - return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask); -} - -static void -set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) -{ - struct irq_cfg *cfg; - unsigned long flags; - unsigned int dest; - unsigned int irq; - - irq = desc->irq; - cfg = desc->chip_data; - - spin_lock_irqsave(&ioapic_lock, flags); - dest = set_desc_affinity(desc, mask); - if (dest != BAD_APICID) { - /* Only the high 8 bits are valid. */ - dest = SET_APIC_LOGICAL_ID(dest); - __target_IO_APIC_irq(irq, dest, cfg); - } - spin_unlock_irqrestore(&ioapic_lock, flags); -} - -static void -set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - - set_ioapic_affinity_irq_desc(desc, mask); -} -#endif /* CONFIG_SMP */ - -/* - * The common case is 1:1 IRQ<->pin mappings. Sometimes there are - * shared ISA-space IRQs, so we have to support them. We are super - * fast in the common case, and fast for shared ISA-space IRQs. - */ -static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin) -{ - struct irq_pin_list *entry; - - entry = cfg->irq_2_pin; - if (!entry) { - entry = get_one_free_irq_2_pin(cpu); - if (!entry) { - printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", - apic, pin); - return; - } - cfg->irq_2_pin = entry; - entry->apic = apic; - entry->pin = pin; - return; - } - - while (entry->next) { - /* not again, please */ - if (entry->apic == apic && entry->pin == pin) - return; - - entry = entry->next; - } - - entry->next = get_one_free_irq_2_pin(cpu); - entry = entry->next; - entry->apic = apic; - entry->pin = pin; -} - -/* - * Reroute an IRQ to a different pin. - */ -static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu, - int oldapic, int oldpin, - int newapic, int newpin) -{ - struct irq_pin_list *entry = cfg->irq_2_pin; - int replaced = 0; - - while (entry) { - if (entry->apic == oldapic && entry->pin == oldpin) { - entry->apic = newapic; - entry->pin = newpin; - replaced = 1; - /* every one is different, right? */ - break; - } - entry = entry->next; - } - - /* why? call replace before add? */ - if (!replaced) - add_pin_to_irq_cpu(cfg, cpu, newapic, newpin); -} - -static inline void io_apic_modify_irq(struct irq_cfg *cfg, - int mask_and, int mask_or, - void (*final)(struct irq_pin_list *entry)) -{ - int pin; - struct irq_pin_list *entry; - - for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { - unsigned int reg; - pin = entry->pin; - reg = io_apic_read(entry->apic, 0x10 + pin * 2); - reg &= mask_and; - reg |= mask_or; - io_apic_modify(entry->apic, 0x10 + pin * 2, reg); - if (final) - final(entry); - } -} - -static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) -{ - io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); -} - -#ifdef CONFIG_X86_64 -static void io_apic_sync(struct irq_pin_list *entry) -{ - /* - * Synchronize the IO-APIC and the CPU by doing - * a dummy read from the IO-APIC - */ - struct io_apic __iomem *io_apic; - io_apic = io_apic_base(entry->apic); - readl(&io_apic->data); -} - -static void __mask_IO_APIC_irq(struct irq_cfg *cfg) -{ - io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); -} -#else /* CONFIG_X86_32 */ -static void __mask_IO_APIC_irq(struct irq_cfg *cfg) -{ - io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL); -} - -static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) -{ - io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER, - IO_APIC_REDIR_MASKED, NULL); -} - -static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg) -{ - io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, - IO_APIC_REDIR_LEVEL_TRIGGER, NULL); -} -#endif /* CONFIG_X86_32 */ - -static void mask_IO_APIC_irq_desc(struct irq_desc *desc) -{ - struct irq_cfg *cfg = desc->chip_data; - unsigned long flags; - - BUG_ON(!cfg); - - spin_lock_irqsave(&ioapic_lock, flags); - __mask_IO_APIC_irq(cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); -} - -static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) -{ - struct irq_cfg *cfg = desc->chip_data; - unsigned long flags; - - spin_lock_irqsave(&ioapic_lock, flags); - __unmask_IO_APIC_irq(cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); -} - -static void mask_IO_APIC_irq(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - mask_IO_APIC_irq_desc(desc); -} -static void unmask_IO_APIC_irq(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - unmask_IO_APIC_irq_desc(desc); -} - -static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) -{ - struct IO_APIC_route_entry entry; - - /* Check delivery_mode to be sure we're not clearing an SMI pin */ - entry = ioapic_read_entry(apic, pin); - if (entry.delivery_mode == dest_SMI) - return; - /* - * Disable it in the IO-APIC irq-routing table: - */ - ioapic_mask_entry(apic, pin); -} - -static void clear_IO_APIC (void) -{ - int apic, pin; - - for (apic = 0; apic < nr_ioapics; apic++) - for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) - clear_IO_APIC_pin(apic, pin); -} - -#ifdef CONFIG_X86_32 -/* - * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to - * specific CPU-side IRQs. - */ - -#define MAX_PIRQS 8 -static int pirq_entries[MAX_PIRQS] = { - [0 ... MAX_PIRQS - 1] = -1 -}; - -static int __init ioapic_pirq_setup(char *str) -{ - int i, max; - int ints[MAX_PIRQS+1]; - - get_options(str, ARRAY_SIZE(ints), ints); - - apic_printk(APIC_VERBOSE, KERN_INFO - "PIRQ redirection, working around broken MP-BIOS.\n"); - max = MAX_PIRQS; - if (ints[0] < MAX_PIRQS) - max = ints[0]; - - for (i = 0; i < max; i++) { - apic_printk(APIC_VERBOSE, KERN_DEBUG - "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); - /* - * PIRQs are mapped upside down, usually. - */ - pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; - } - return 1; -} - -__setup("pirq=", ioapic_pirq_setup); -#endif /* CONFIG_X86_32 */ - -#ifdef CONFIG_INTR_REMAP -/* I/O APIC RTE contents at the OS boot up */ -static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS]; - -/* - * Saves and masks all the unmasked IO-APIC RTE's - */ -int save_mask_IO_APIC_setup(void) -{ - union IO_APIC_reg_01 reg_01; - unsigned long flags; - int apic, pin; - - /* - * The number of IO-APIC IRQ registers (== #pins): - */ - for (apic = 0; apic < nr_ioapics; apic++) { - spin_lock_irqsave(&ioapic_lock, flags); - reg_01.raw = io_apic_read(apic, 1); - spin_unlock_irqrestore(&ioapic_lock, flags); - nr_ioapic_registers[apic] = reg_01.bits.entries+1; - } - - for (apic = 0; apic < nr_ioapics; apic++) { - early_ioapic_entries[apic] = - kzalloc(sizeof(struct IO_APIC_route_entry) * - nr_ioapic_registers[apic], GFP_KERNEL); - if (!early_ioapic_entries[apic]) - goto nomem; - } - - for (apic = 0; apic < nr_ioapics; apic++) - for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { - struct IO_APIC_route_entry entry; - - entry = early_ioapic_entries[apic][pin] = - ioapic_read_entry(apic, pin); - if (!entry.mask) { - entry.mask = 1; - ioapic_write_entry(apic, pin, entry); - } - } - - return 0; - -nomem: - while (apic >= 0) - kfree(early_ioapic_entries[apic--]); - memset(early_ioapic_entries, 0, - ARRAY_SIZE(early_ioapic_entries)); - - return -ENOMEM; -} - -void restore_IO_APIC_setup(void) -{ - int apic, pin; - - for (apic = 0; apic < nr_ioapics; apic++) { - if (!early_ioapic_entries[apic]) - break; - for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) - ioapic_write_entry(apic, pin, - early_ioapic_entries[apic][pin]); - kfree(early_ioapic_entries[apic]); - early_ioapic_entries[apic] = NULL; - } -} - -void reinit_intr_remapped_IO_APIC(int intr_remapping) -{ - /* - * for now plain restore of previous settings. - * TBD: In the case of OS enabling interrupt-remapping, - * IO-APIC RTE's need to be setup to point to interrupt-remapping - * table entries. for now, do a plain restore, and wait for - * the setup_IO_APIC_irqs() to do proper initialization. - */ - restore_IO_APIC_setup(); -} -#endif - -/* - * Find the IRQ entry number of a certain pin. - */ -static int find_irq_entry(int apic, int pin, int type) -{ - int i; - - for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].irqtype == type && - (mp_irqs[i].dstapic == mp_ioapics[apic].apicid || - mp_irqs[i].dstapic == MP_APIC_ALL) && - mp_irqs[i].dstirq == pin) - return i; - - return -1; -} - -/* - * Find the pin to which IRQ[irq] (ISA) is connected - */ -static int __init find_isa_irq_pin(int irq, int type) -{ - int i; - - for (i = 0; i < mp_irq_entries; i++) { - int lbus = mp_irqs[i].srcbus; - - if (test_bit(lbus, mp_bus_not_pci) && - (mp_irqs[i].irqtype == type) && - (mp_irqs[i].srcbusirq == irq)) - - return mp_irqs[i].dstirq; - } - return -1; -} - -static int __init find_isa_irq_apic(int irq, int type) -{ - int i; - - for (i = 0; i < mp_irq_entries; i++) { - int lbus = mp_irqs[i].srcbus; - - if (test_bit(lbus, mp_bus_not_pci) && - (mp_irqs[i].irqtype == type) && - (mp_irqs[i].srcbusirq == irq)) - break; - } - if (i < mp_irq_entries) { - int apic; - for(apic = 0; apic < nr_ioapics; apic++) { - if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic) - return apic; - } - } - - return -1; -} - -/* - * Find a specific PCI IRQ entry. - * Not an __init, possibly needed by modules - */ -static int pin_2_irq(int idx, int apic, int pin); - -int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) -{ - int apic, i, best_guess = -1; - - apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", - bus, slot, pin); - if (test_bit(bus, mp_bus_not_pci)) { - apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus); - return -1; - } - for (i = 0; i < mp_irq_entries; i++) { - int lbus = mp_irqs[i].srcbus; - - for (apic = 0; apic < nr_ioapics; apic++) - if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || - mp_irqs[i].dstapic == MP_APIC_ALL) - break; - - if (!test_bit(lbus, mp_bus_not_pci) && - !mp_irqs[i].irqtype && - (bus == lbus) && - (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { - int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); - - if (!(apic || IO_APIC_IRQ(irq))) - continue; - - if (pin == (mp_irqs[i].srcbusirq & 3)) - return irq; - /* - * Use the first all-but-pin matching entry as a - * best-guess fuzzy result for broken mptables. - */ - if (best_guess < 0) - best_guess = irq; - } - } - return best_guess; -} - -EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); - -#if defined(CONFIG_EISA) || defined(CONFIG_MCA) -/* - * EISA Edge/Level control register, ELCR - */ -static int EISA_ELCR(unsigned int irq) -{ - if (irq < NR_IRQS_LEGACY) { - unsigned int port = 0x4d0 + (irq >> 3); - return (inb(port) >> (irq & 7)) & 1; - } - apic_printk(APIC_VERBOSE, KERN_INFO - "Broken MPtable reports ISA irq %d\n", irq); - return 0; -} - -#endif - -/* ISA interrupts are always polarity zero edge triggered, - * when listed as conforming in the MP table. */ - -#define default_ISA_trigger(idx) (0) -#define default_ISA_polarity(idx) (0) - -/* EISA interrupts are always polarity zero and can be edge or level - * trigger depending on the ELCR value. If an interrupt is listed as - * EISA conforming in the MP table, that means its trigger type must - * be read in from the ELCR */ - -#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) -#define default_EISA_polarity(idx) default_ISA_polarity(idx) - -/* PCI interrupts are always polarity one level triggered, - * when listed as conforming in the MP table. */ - -#define default_PCI_trigger(idx) (1) -#define default_PCI_polarity(idx) (1) - -/* MCA interrupts are always polarity zero level triggered, - * when listed as conforming in the MP table. */ - -#define default_MCA_trigger(idx) (1) -#define default_MCA_polarity(idx) default_ISA_polarity(idx) - -static int MPBIOS_polarity(int idx) -{ - int bus = mp_irqs[idx].srcbus; - int polarity; - - /* - * Determine IRQ line polarity (high active or low active): - */ - switch (mp_irqs[idx].irqflag & 3) - { - case 0: /* conforms, ie. bus-type dependent polarity */ - if (test_bit(bus, mp_bus_not_pci)) - polarity = default_ISA_polarity(idx); - else - polarity = default_PCI_polarity(idx); - break; - case 1: /* high active */ - { - polarity = 0; - break; - } - case 2: /* reserved */ - { - printk(KERN_WARNING "broken BIOS!!\n"); - polarity = 1; - break; - } - case 3: /* low active */ - { - polarity = 1; - break; - } - default: /* invalid */ - { - printk(KERN_WARNING "broken BIOS!!\n"); - polarity = 1; - break; - } - } - return polarity; -} - -static int MPBIOS_trigger(int idx) -{ - int bus = mp_irqs[idx].srcbus; - int trigger; - - /* - * Determine IRQ trigger mode (edge or level sensitive): - */ - switch ((mp_irqs[idx].irqflag>>2) & 3) - { - case 0: /* conforms, ie. bus-type dependent */ - if (test_bit(bus, mp_bus_not_pci)) - trigger = default_ISA_trigger(idx); - else - trigger = default_PCI_trigger(idx); -#if defined(CONFIG_EISA) || defined(CONFIG_MCA) - switch (mp_bus_id_to_type[bus]) { - case MP_BUS_ISA: /* ISA pin */ - { - /* set before the switch */ - break; - } - case MP_BUS_EISA: /* EISA pin */ - { - trigger = default_EISA_trigger(idx); - break; - } - case MP_BUS_PCI: /* PCI pin */ - { - /* set before the switch */ - break; - } - case MP_BUS_MCA: /* MCA pin */ - { - trigger = default_MCA_trigger(idx); - break; - } - default: - { - printk(KERN_WARNING "broken BIOS!!\n"); - trigger = 1; - break; - } - } -#endif - break; - case 1: /* edge */ - { - trigger = 0; - break; - } - case 2: /* reserved */ - { - printk(KERN_WARNING "broken BIOS!!\n"); - trigger = 1; - break; - } - case 3: /* level */ - { - trigger = 1; - break; - } - default: /* invalid */ - { - printk(KERN_WARNING "broken BIOS!!\n"); - trigger = 0; - break; - } - } - return trigger; -} - -static inline int irq_polarity(int idx) -{ - return MPBIOS_polarity(idx); -} - -static inline int irq_trigger(int idx) -{ - return MPBIOS_trigger(idx); -} - -int (*ioapic_renumber_irq)(int ioapic, int irq); -static int pin_2_irq(int idx, int apic, int pin) -{ - int irq, i; - int bus = mp_irqs[idx].srcbus; - - /* - * Debugging check, we are in big trouble if this message pops up! - */ - if (mp_irqs[idx].dstirq != pin) - printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); - - if (test_bit(bus, mp_bus_not_pci)) { - irq = mp_irqs[idx].srcbusirq; - } else { - /* - * PCI IRQs are mapped in order - */ - i = irq = 0; - while (i < apic) - irq += nr_ioapic_registers[i++]; - irq += pin; - /* - * For MPS mode, so far only needed by ES7000 platform - */ - if (ioapic_renumber_irq) - irq = ioapic_renumber_irq(apic, irq); - } - -#ifdef CONFIG_X86_32 - /* - * PCI IRQ command line redirection. Yes, limits are hardcoded. - */ - if ((pin >= 16) && (pin <= 23)) { - if (pirq_entries[pin-16] != -1) { - if (!pirq_entries[pin-16]) { - apic_printk(APIC_VERBOSE, KERN_DEBUG - "disabling PIRQ%d\n", pin-16); - } else { - irq = pirq_entries[pin-16]; - apic_printk(APIC_VERBOSE, KERN_DEBUG - "using PIRQ%d -> IRQ %d\n", - pin-16, irq); - } - } - } -#endif - - return irq; -} - -void lock_vector_lock(void) -{ - /* Used to the online set of cpus does not change - * during assign_irq_vector. - */ - spin_lock(&vector_lock); -} - -void unlock_vector_lock(void) -{ - spin_unlock(&vector_lock); -} - -static int -__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) -{ - /* - * NOTE! The local APIC isn't very good at handling - * multiple interrupts at the same interrupt level. - * As the interrupt level is determined by taking the - * vector number and shifting that right by 4, we - * want to spread these out a bit so that they don't - * all fall in the same interrupt level. - * - * Also, we've got to be careful not to trash gate - * 0x80, because int 0x80 is hm, kind of importantish. ;) - */ - static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; - unsigned int old_vector; - int cpu, err; - cpumask_var_t tmp_mask; - - if ((cfg->move_in_progress) || cfg->move_cleanup_count) - return -EBUSY; - - if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) - return -ENOMEM; - - old_vector = cfg->vector; - if (old_vector) { - cpumask_and(tmp_mask, mask, cpu_online_mask); - cpumask_and(tmp_mask, cfg->domain, tmp_mask); - if (!cpumask_empty(tmp_mask)) { - free_cpumask_var(tmp_mask); - return 0; - } - } - - /* Only try and allocate irqs on cpus that are present */ - err = -ENOSPC; - for_each_cpu_and(cpu, mask, cpu_online_mask) { - int new_cpu; - int vector, offset; - - apic->vector_allocation_domain(cpu, tmp_mask); - - vector = current_vector; - offset = current_offset; -next: - vector += 8; - if (vector >= first_system_vector) { - /* If out of vectors on large boxen, must share them. */ - offset = (offset + 1) % 8; - vector = FIRST_DEVICE_VECTOR + offset; - } - if (unlikely(current_vector == vector)) - continue; - - if (test_bit(vector, used_vectors)) - goto next; - - for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) - if (per_cpu(vector_irq, new_cpu)[vector] != -1) - goto next; - /* Found one! */ - current_vector = vector; - current_offset = offset; - if (old_vector) { - cfg->move_in_progress = 1; - cpumask_copy(cfg->old_domain, cfg->domain); - } - for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) - per_cpu(vector_irq, new_cpu)[vector] = irq; - cfg->vector = vector; - cpumask_copy(cfg->domain, tmp_mask); - err = 0; - break; - } - free_cpumask_var(tmp_mask); - return err; -} - -static int -assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) -{ - int err; - unsigned long flags; - - spin_lock_irqsave(&vector_lock, flags); - err = __assign_irq_vector(irq, cfg, mask); - spin_unlock_irqrestore(&vector_lock, flags); - return err; -} - -static void __clear_irq_vector(int irq, struct irq_cfg *cfg) -{ - int cpu, vector; - - BUG_ON(!cfg->vector); - - vector = cfg->vector; - for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) - per_cpu(vector_irq, cpu)[vector] = -1; - - cfg->vector = 0; - cpumask_clear(cfg->domain); - - if (likely(!cfg->move_in_progress)) - return; - for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { - for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; - vector++) { - if (per_cpu(vector_irq, cpu)[vector] != irq) - continue; - per_cpu(vector_irq, cpu)[vector] = -1; - break; - } - } - cfg->move_in_progress = 0; -} - -void __setup_vector_irq(int cpu) -{ - /* Initialize vector_irq on a new cpu */ - /* This function must be called with vector_lock held */ - int irq, vector; - struct irq_cfg *cfg; - struct irq_desc *desc; - - /* Mark the inuse vectors */ - for_each_irq_desc(irq, desc) { - cfg = desc->chip_data; - if (!cpumask_test_cpu(cpu, cfg->domain)) - continue; - vector = cfg->vector; - per_cpu(vector_irq, cpu)[vector] = irq; - } - /* Mark the free vectors */ - for (vector = 0; vector < NR_VECTORS; ++vector) { - irq = per_cpu(vector_irq, cpu)[vector]; - if (irq < 0) - continue; - - cfg = irq_cfg(irq); - if (!cpumask_test_cpu(cpu, cfg->domain)) - per_cpu(vector_irq, cpu)[vector] = -1; - } -} - -static struct irq_chip ioapic_chip; -#ifdef CONFIG_INTR_REMAP -static struct irq_chip ir_ioapic_chip; -#endif - -#define IOAPIC_AUTO -1 -#define IOAPIC_EDGE 0 -#define IOAPIC_LEVEL 1 - -#ifdef CONFIG_X86_32 -static inline int IO_APIC_irq_trigger(int irq) -{ - int apic, idx, pin; - - for (apic = 0; apic < nr_ioapics; apic++) { - for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { - idx = find_irq_entry(apic, pin, mp_INT); - if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) - return irq_trigger(idx); - } - } - /* - * nonexistent IRQs are edge default - */ - return 0; -} -#else -static inline int IO_APIC_irq_trigger(int irq) -{ - return 1; -} -#endif - -static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) -{ - - if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) - desc->status |= IRQ_LEVEL; - else - desc->status &= ~IRQ_LEVEL; - -#ifdef CONFIG_INTR_REMAP - if (irq_remapped(irq)) { - desc->status |= IRQ_MOVE_PCNTXT; - if (trigger) - set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, - handle_fasteoi_irq, - "fasteoi"); - else - set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, - handle_edge_irq, "edge"); - return; - } -#endif - if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) - set_irq_chip_and_handler_name(irq, &ioapic_chip, - handle_fasteoi_irq, - "fasteoi"); - else - set_irq_chip_and_handler_name(irq, &ioapic_chip, - handle_edge_irq, "edge"); -} - -int setup_ioapic_entry(int apic_id, int irq, - struct IO_APIC_route_entry *entry, - unsigned int destination, int trigger, - int polarity, int vector) -{ - /* - * add it to the IO-APIC irq-routing table: - */ - memset(entry,0,sizeof(*entry)); - -#ifdef CONFIG_INTR_REMAP - if (intr_remapping_enabled) { - struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); - struct irte irte; - struct IR_IO_APIC_route_entry *ir_entry = - (struct IR_IO_APIC_route_entry *) entry; - int index; - - if (!iommu) - panic("No mapping iommu for ioapic %d\n", apic_id); - - index = alloc_irte(iommu, irq, 1); - if (index < 0) - panic("Failed to allocate IRTE for ioapic %d\n", apic_id); - - memset(&irte, 0, sizeof(irte)); - - irte.present = 1; - irte.dst_mode = apic->irq_dest_mode; - irte.trigger_mode = trigger; - irte.dlvry_mode = apic->irq_delivery_mode; - irte.vector = vector; - irte.dest_id = IRTE_DEST(destination); - - modify_irte(irq, &irte); - - ir_entry->index2 = (index >> 15) & 0x1; - ir_entry->zero = 0; - ir_entry->format = 1; - ir_entry->index = (index & 0x7fff); - } else -#endif - { - entry->delivery_mode = apic->irq_delivery_mode; - entry->dest_mode = apic->irq_dest_mode; - entry->dest = destination; - } - - entry->mask = 0; /* enable IRQ */ - entry->trigger = trigger; - entry->polarity = polarity; - entry->vector = vector; - - /* Mask level triggered irqs. - * Use IRQ_DELAYED_DISABLE for edge triggered irqs. - */ - if (trigger) - entry->mask = 1; - return 0; -} - -static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, - int trigger, int polarity) -{ - struct irq_cfg *cfg; - struct IO_APIC_route_entry entry; - unsigned int dest; - - if (!IO_APIC_IRQ(irq)) - return; - - cfg = desc->chip_data; - - if (assign_irq_vector(irq, cfg, apic->target_cpus())) - return; - - dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); - - apic_printk(APIC_VERBOSE,KERN_DEBUG - "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " - "IRQ %d Mode:%i Active:%i)\n", - apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector, - irq, trigger, polarity); - - - if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry, - dest, trigger, polarity, cfg->vector)) { - printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", - mp_ioapics[apic_id].apicid, pin); - __clear_irq_vector(irq, cfg); - return; - } - - ioapic_register_intr(irq, desc, trigger); - if (irq < NR_IRQS_LEGACY) - disable_8259A_irq(irq); - - ioapic_write_entry(apic_id, pin, entry); -} - -static void __init setup_IO_APIC_irqs(void) -{ - int apic_id, pin, idx, irq; - int notcon = 0; - struct irq_desc *desc; - struct irq_cfg *cfg; - int cpu = boot_cpu_id; - - apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); - - for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { - for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { - - idx = find_irq_entry(apic_id, pin, mp_INT); - if (idx == -1) { - if (!notcon) { - notcon = 1; - apic_printk(APIC_VERBOSE, - KERN_DEBUG " %d-%d", - mp_ioapics[apic_id].apicid, pin); - } else - apic_printk(APIC_VERBOSE, " %d-%d", - mp_ioapics[apic_id].apicid, pin); - continue; - } - if (notcon) { - apic_printk(APIC_VERBOSE, - " (apicid-pin) not connected\n"); - notcon = 0; - } - - irq = pin_2_irq(idx, apic_id, pin); - - /* - * Skip the timer IRQ if there's a quirk handler - * installed and if it returns 1: - */ - if (apic->multi_timer_check && - apic->multi_timer_check(apic_id, irq)) - continue; - - desc = irq_to_desc_alloc_cpu(irq, cpu); - if (!desc) { - printk(KERN_INFO "can not get irq_desc for %d\n", irq); - continue; - } - cfg = desc->chip_data; - add_pin_to_irq_cpu(cfg, cpu, apic_id, pin); - - setup_IO_APIC_irq(apic_id, pin, irq, desc, - irq_trigger(idx), irq_polarity(idx)); - } - } - - if (notcon) - apic_printk(APIC_VERBOSE, - " (apicid-pin) not connected\n"); -} - -/* - * Set up the timer pin, possibly with the 8259A-master behind. - */ -static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, - int vector) -{ - struct IO_APIC_route_entry entry; - -#ifdef CONFIG_INTR_REMAP - if (intr_remapping_enabled) - return; -#endif - - memset(&entry, 0, sizeof(entry)); - - /* - * We use logical delivery to get the timer IRQ - * to the first CPU. - */ - entry.dest_mode = apic->irq_dest_mode; - entry.mask = 0; /* don't mask IRQ for edge */ - entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); - entry.delivery_mode = apic->irq_delivery_mode; - entry.polarity = 0; - entry.trigger = 0; - entry.vector = vector; - - /* - * The timer IRQ doesn't have to know that behind the - * scene we may have a 8259A-master in AEOI mode ... - */ - set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); - - /* - * Add it to the IO-APIC irq-routing table: - */ - ioapic_write_entry(apic_id, pin, entry); -} - - -__apicdebuginit(void) print_IO_APIC(void) -{ - int apic, i; - union IO_APIC_reg_00 reg_00; - union IO_APIC_reg_01 reg_01; - union IO_APIC_reg_02 reg_02; - union IO_APIC_reg_03 reg_03; - unsigned long flags; - struct irq_cfg *cfg; - struct irq_desc *desc; - unsigned int irq; - - if (apic_verbosity == APIC_QUIET) - return; - - printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); - for (i = 0; i < nr_ioapics; i++) - printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", - mp_ioapics[i].apicid, nr_ioapic_registers[i]); - - /* - * We are a bit conservative about what we expect. We have to - * know about every hardware change ASAP. - */ - printk(KERN_INFO "testing the IO APIC.......................\n"); - - for (apic = 0; apic < nr_ioapics; apic++) { - - spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(apic, 0); - reg_01.raw = io_apic_read(apic, 1); - if (reg_01.bits.version >= 0x10) - reg_02.raw = io_apic_read(apic, 2); - if (reg_01.bits.version >= 0x20) - reg_03.raw = io_apic_read(apic, 3); - spin_unlock_irqrestore(&ioapic_lock, flags); - - printk("\n"); - printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); - printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); - printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); - printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); - printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); - - printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); - printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); - - printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); - printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); - - /* - * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, - * but the value of reg_02 is read as the previous read register - * value, so ignore it if reg_02 == reg_01. - */ - if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { - printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); - printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); - } - - /* - * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 - * or reg_03, but the value of reg_0[23] is read as the previous read - * register value, so ignore it if reg_03 == reg_0[12]. - */ - if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && - reg_03.raw != reg_01.raw) { - printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); - printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); - } - - printk(KERN_DEBUG ".... IRQ redirection table:\n"); - - printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" - " Stat Dmod Deli Vect: \n"); - - for (i = 0; i <= reg_01.bits.entries; i++) { - struct IO_APIC_route_entry entry; - - entry = ioapic_read_entry(apic, i); - - printk(KERN_DEBUG " %02x %03X ", - i, - entry.dest - ); - - printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", - entry.mask, - entry.trigger, - entry.irr, - entry.polarity, - entry.delivery_status, - entry.dest_mode, - entry.delivery_mode, - entry.vector - ); - } - } - printk(KERN_DEBUG "IRQ to pin mappings:\n"); - for_each_irq_desc(irq, desc) { - struct irq_pin_list *entry; - - cfg = desc->chip_data; - entry = cfg->irq_2_pin; - if (!entry) - continue; - printk(KERN_DEBUG "IRQ%d ", irq); - for (;;) { - printk("-> %d:%d", entry->apic, entry->pin); - if (!entry->next) - break; - entry = entry->next; - } - printk("\n"); - } - - printk(KERN_INFO ".................................... done.\n"); - - return; -} - -__apicdebuginit(void) print_APIC_bitfield(int base) -{ - unsigned int v; - int i, j; - - if (apic_verbosity == APIC_QUIET) - return; - - printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG); - for (i = 0; i < 8; i++) { - v = apic_read(base + i*0x10); - for (j = 0; j < 32; j++) { - if (v & (1< 3) /* Due to the Pentium erratum 3AP. */ - apic_write(APIC_ESR, 0); - - v = apic_read(APIC_ESR); - printk(KERN_DEBUG "... APIC ESR: %08x\n", v); - } - - icr = apic_icr_read(); - printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); - printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); - - v = apic_read(APIC_LVTT); - printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); - - if (maxlvt > 3) { /* PC is LVT#4. */ - v = apic_read(APIC_LVTPC); - printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); - } - v = apic_read(APIC_LVT0); - printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); - v = apic_read(APIC_LVT1); - printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); - - if (maxlvt > 2) { /* ERR is LVT#3. */ - v = apic_read(APIC_LVTERR); - printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); - } - - v = apic_read(APIC_TMICT); - printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); - v = apic_read(APIC_TMCCT); - printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); - v = apic_read(APIC_TDCR); - printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); - printk("\n"); -} - -__apicdebuginit(void) print_all_local_APICs(void) -{ - int cpu; - - preempt_disable(); - for_each_online_cpu(cpu) - smp_call_function_single(cpu, print_local_APIC, NULL, 1); - preempt_enable(); -} - -__apicdebuginit(void) print_PIC(void) -{ - unsigned int v; - unsigned long flags; - - if (apic_verbosity == APIC_QUIET) - return; - - printk(KERN_DEBUG "\nprinting PIC contents\n"); - - spin_lock_irqsave(&i8259A_lock, flags); - - v = inb(0xa1) << 8 | inb(0x21); - printk(KERN_DEBUG "... PIC IMR: %04x\n", v); - - v = inb(0xa0) << 8 | inb(0x20); - printk(KERN_DEBUG "... PIC IRR: %04x\n", v); - - outb(0x0b,0xa0); - outb(0x0b,0x20); - v = inb(0xa0) << 8 | inb(0x20); - outb(0x0a,0xa0); - outb(0x0a,0x20); - - spin_unlock_irqrestore(&i8259A_lock, flags); - - printk(KERN_DEBUG "... PIC ISR: %04x\n", v); - - v = inb(0x4d1) << 8 | inb(0x4d0); - printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); -} - -__apicdebuginit(int) print_all_ICs(void) -{ - print_PIC(); - print_all_local_APICs(); - print_IO_APIC(); - - return 0; -} - -fs_initcall(print_all_ICs); - - -/* Where if anywhere is the i8259 connect in external int mode */ -static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; - -void __init enable_IO_APIC(void) -{ - union IO_APIC_reg_01 reg_01; - int i8259_apic, i8259_pin; - int apic; - unsigned long flags; - - /* - * The number of IO-APIC IRQ registers (== #pins): - */ - for (apic = 0; apic < nr_ioapics; apic++) { - spin_lock_irqsave(&ioapic_lock, flags); - reg_01.raw = io_apic_read(apic, 1); - spin_unlock_irqrestore(&ioapic_lock, flags); - nr_ioapic_registers[apic] = reg_01.bits.entries+1; - } - for(apic = 0; apic < nr_ioapics; apic++) { - int pin; - /* See if any of the pins is in ExtINT mode */ - for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { - struct IO_APIC_route_entry entry; - entry = ioapic_read_entry(apic, pin); - - /* If the interrupt line is enabled and in ExtInt mode - * I have found the pin where the i8259 is connected. - */ - if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { - ioapic_i8259.apic = apic; - ioapic_i8259.pin = pin; - goto found_i8259; - } - } - } - found_i8259: - /* Look to see what if the MP table has reported the ExtINT */ - /* If we could not find the appropriate pin by looking at the ioapic - * the i8259 probably is not connected the ioapic but give the - * mptable a chance anyway. - */ - i8259_pin = find_isa_irq_pin(0, mp_ExtINT); - i8259_apic = find_isa_irq_apic(0, mp_ExtINT); - /* Trust the MP table if nothing is setup in the hardware */ - if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { - printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); - ioapic_i8259.pin = i8259_pin; - ioapic_i8259.apic = i8259_apic; - } - /* Complain if the MP table and the hardware disagree */ - if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && - (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) - { - printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); - } - - /* - * Do not trust the IO-APIC being empty at bootup - */ - clear_IO_APIC(); -} - -/* - * Not an __init, needed by the reboot code - */ -void disable_IO_APIC(void) -{ - /* - * Clear the IO-APIC before rebooting: - */ - clear_IO_APIC(); - - /* - * If the i8259 is routed through an IOAPIC - * Put that IOAPIC in virtual wire mode - * so legacy interrupts can be delivered. - */ - if (ioapic_i8259.pin != -1) { - struct IO_APIC_route_entry entry; - - memset(&entry, 0, sizeof(entry)); - entry.mask = 0; /* Enabled */ - entry.trigger = 0; /* Edge */ - entry.irr = 0; - entry.polarity = 0; /* High */ - entry.delivery_status = 0; - entry.dest_mode = 0; /* Physical */ - entry.delivery_mode = dest_ExtINT; /* ExtInt */ - entry.vector = 0; - entry.dest = read_apic_id(); - - /* - * Add it to the IO-APIC irq-routing table: - */ - ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); - } - - disconnect_bsp_APIC(ioapic_i8259.pin != -1); -} - -#ifdef CONFIG_X86_32 -/* - * function to set the IO-APIC physical IDs based on the - * values stored in the MPC table. - * - * by Matt Domsch Tue Dec 21 12:25:05 CST 1999 - */ - -static void __init setup_ioapic_ids_from_mpc(void) -{ - union IO_APIC_reg_00 reg_00; - physid_mask_t phys_id_present_map; - int apic_id; - int i; - unsigned char old_id; - unsigned long flags; - - if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids()) - return; - - /* - * Don't check I/O APIC IDs for xAPIC systems. They have - * no meaning without the serial APIC bus. - */ - if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - return; - /* - * This is broken; anything with a real cpu count has to - * circumvent this idiocy regardless. - */ - phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map); - - /* - * Set the IOAPIC ID to the value stored in the MPC table. - */ - for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { - - /* Read the register 0 value */ - spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(apic_id, 0); - spin_unlock_irqrestore(&ioapic_lock, flags); - - old_id = mp_ioapics[apic_id].apicid; - - if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) { - printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", - apic_id, mp_ioapics[apic_id].apicid); - printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", - reg_00.bits.ID); - mp_ioapics[apic_id].apicid = reg_00.bits.ID; - } - - /* - * Sanity check, is the ID really free? Every APIC in a - * system must have a unique ID or we get lots of nice - * 'stuck on smp_invalidate_needed IPI wait' messages. - */ - if (apic->check_apicid_used(phys_id_present_map, - mp_ioapics[apic_id].apicid)) { - printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", - apic_id, mp_ioapics[apic_id].apicid); - for (i = 0; i < get_physical_broadcast(); i++) - if (!physid_isset(i, phys_id_present_map)) - break; - if (i >= get_physical_broadcast()) - panic("Max APIC ID exceeded!\n"); - printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", - i); - physid_set(i, phys_id_present_map); - mp_ioapics[apic_id].apicid = i; - } else { - physid_mask_t tmp; - tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid); - apic_printk(APIC_VERBOSE, "Setting %d in the " - "phys_id_present_map\n", - mp_ioapics[apic_id].apicid); - physids_or(phys_id_present_map, phys_id_present_map, tmp); - } - - - /* - * We need to adjust the IRQ routing table - * if the ID changed. - */ - if (old_id != mp_ioapics[apic_id].apicid) - for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].dstapic == old_id) - mp_irqs[i].dstapic - = mp_ioapics[apic_id].apicid; - - /* - * Read the right value from the MPC table and - * write it into the ID register. - */ - apic_printk(APIC_VERBOSE, KERN_INFO - "...changing IO-APIC physical APIC ID to %d ...", - mp_ioapics[apic_id].apicid); - - reg_00.bits.ID = mp_ioapics[apic_id].apicid; - spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(apic_id, 0, reg_00.raw); - spin_unlock_irqrestore(&ioapic_lock, flags); - - /* - * Sanity check - */ - spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(apic_id, 0); - spin_unlock_irqrestore(&ioapic_lock, flags); - if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) - printk("could not set ID!\n"); - else - apic_printk(APIC_VERBOSE, " ok.\n"); - } -} -#endif - -int no_timer_check __initdata; - -static int __init notimercheck(char *s) -{ - no_timer_check = 1; - return 1; -} -__setup("no_timer_check", notimercheck); - -/* - * There is a nasty bug in some older SMP boards, their mptable lies - * about the timer IRQ. We do the following to work around the situation: - * - * - timer IRQ defaults to IO-APIC IRQ - * - if this function detects that timer IRQs are defunct, then we fall - * back to ISA timer IRQs - */ -static int __init timer_irq_works(void) -{ - unsigned long t1 = jiffies; - unsigned long flags; - - if (no_timer_check) - return 1; - - local_save_flags(flags); - local_irq_enable(); - /* Let ten ticks pass... */ - mdelay((10 * 1000) / HZ); - local_irq_restore(flags); - - /* - * Expect a few ticks at least, to be sure some possible - * glue logic does not lock up after one or two first - * ticks in a non-ExtINT mode. Also the local APIC - * might have cached one ExtINT interrupt. Finally, at - * least one tick may be lost due to delays. - */ - - /* jiffies wrap? */ - if (time_after(jiffies, t1 + 4)) - return 1; - return 0; -} - -/* - * In the SMP+IOAPIC case it might happen that there are an unspecified - * number of pending IRQ events unhandled. These cases are very rare, - * so we 'resend' these IRQs via IPIs, to the same CPU. It's much - * better to do it this way as thus we do not have to be aware of - * 'pending' interrupts in the IRQ path, except at this point. - */ -/* - * Edge triggered needs to resend any interrupt - * that was delayed but this is now handled in the device - * independent code. - */ - -/* - * Starting up a edge-triggered IO-APIC interrupt is - * nasty - we need to make sure that we get the edge. - * If it is already asserted for some reason, we need - * return 1 to indicate that is was pending. - * - * This is not complete - we should be able to fake - * an edge even if it isn't on the 8259A... - */ - -static unsigned int startup_ioapic_irq(unsigned int irq) -{ - int was_pending = 0; - unsigned long flags; - struct irq_cfg *cfg; - - spin_lock_irqsave(&ioapic_lock, flags); - if (irq < NR_IRQS_LEGACY) { - disable_8259A_irq(irq); - if (i8259A_irq_pending(irq)) - was_pending = 1; - } - cfg = irq_cfg(irq); - __unmask_IO_APIC_irq(cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); - - return was_pending; -} - -#ifdef CONFIG_X86_64 -static int ioapic_retrigger_irq(unsigned int irq) -{ - - struct irq_cfg *cfg = irq_cfg(irq); - unsigned long flags; - - spin_lock_irqsave(&vector_lock, flags); - apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); - spin_unlock_irqrestore(&vector_lock, flags); - - return 1; -} -#else -static int ioapic_retrigger_irq(unsigned int irq) -{ - apic->send_IPI_self(irq_cfg(irq)->vector); - - return 1; -} -#endif - -/* - * Level and edge triggered IO-APIC interrupts need different handling, - * so we use two separate IRQ descriptors. Edge triggered IRQs can be - * handled with the level-triggered descriptor, but that one has slightly - * more overhead. Level-triggered interrupts cannot be handled with the - * edge-triggered handler, without risking IRQ storms and other ugly - * races. - */ - -#ifdef CONFIG_SMP - -#ifdef CONFIG_INTR_REMAP -static void ir_irq_migration(struct work_struct *work); - -static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); - -/* - * Migrate the IO-APIC irq in the presence of intr-remapping. - * - * For edge triggered, irq migration is a simple atomic update(of vector - * and cpu destination) of IRTE and flush the hardware cache. - * - * For level triggered, we need to modify the io-apic RTE aswell with the update - * vector information, along with modifying IRTE with vector and destination. - * So irq migration for level triggered is little bit more complex compared to - * edge triggered migration. But the good news is, we use the same algorithm - * for level triggered migration as we have today, only difference being, - * we now initiate the irq migration from process context instead of the - * interrupt context. - * - * In future, when we do a directed EOI (combined with cpu EOI broadcast - * suppression) to the IO-APIC, level triggered irq migration will also be - * as simple as edge triggered migration and we can do the irq migration - * with a simple atomic update to IO-APIC RTE. - */ -static void -migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) -{ - struct irq_cfg *cfg; - struct irte irte; - int modify_ioapic_rte; - unsigned int dest; - unsigned long flags; - unsigned int irq; - - if (!cpumask_intersects(mask, cpu_online_mask)) - return; - - irq = desc->irq; - if (get_irte(irq, &irte)) - return; - - cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) - return; - - set_extra_move_desc(desc, mask); - - dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); - - modify_ioapic_rte = desc->status & IRQ_LEVEL; - if (modify_ioapic_rte) { - spin_lock_irqsave(&ioapic_lock, flags); - __target_IO_APIC_irq(irq, dest, cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); - } - - irte.vector = cfg->vector; - irte.dest_id = IRTE_DEST(dest); - - /* - * Modified the IRTE and flushes the Interrupt entry cache. - */ - modify_irte(irq, &irte); - - if (cfg->move_in_progress) - send_cleanup_vector(cfg); - - cpumask_copy(desc->affinity, mask); -} - -static int migrate_irq_remapped_level_desc(struct irq_desc *desc) -{ - int ret = -1; - struct irq_cfg *cfg = desc->chip_data; - - mask_IO_APIC_irq_desc(desc); - - if (io_apic_level_ack_pending(cfg)) { - /* - * Interrupt in progress. Migrating irq now will change the - * vector information in the IO-APIC RTE and that will confuse - * the EOI broadcast performed by cpu. - * So, delay the irq migration to the next instance. - */ - schedule_delayed_work(&ir_migration_work, 1); - goto unmask; - } - - /* everthing is clear. we have right of way */ - migrate_ioapic_irq_desc(desc, desc->pending_mask); - - ret = 0; - desc->status &= ~IRQ_MOVE_PENDING; - cpumask_clear(desc->pending_mask); - -unmask: - unmask_IO_APIC_irq_desc(desc); - - return ret; -} - -static void ir_irq_migration(struct work_struct *work) -{ - unsigned int irq; - struct irq_desc *desc; - - for_each_irq_desc(irq, desc) { - if (desc->status & IRQ_MOVE_PENDING) { - unsigned long flags; - - spin_lock_irqsave(&desc->lock, flags); - if (!desc->chip->set_affinity || - !(desc->status & IRQ_MOVE_PENDING)) { - desc->status &= ~IRQ_MOVE_PENDING; - spin_unlock_irqrestore(&desc->lock, flags); - continue; - } - - desc->chip->set_affinity(irq, desc->pending_mask); - spin_unlock_irqrestore(&desc->lock, flags); - } - } -} - -/* - * Migrates the IRQ destination in the process context. - */ -static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, - const struct cpumask *mask) -{ - if (desc->status & IRQ_LEVEL) { - desc->status |= IRQ_MOVE_PENDING; - cpumask_copy(desc->pending_mask, mask); - migrate_irq_remapped_level_desc(desc); - return; - } - - migrate_ioapic_irq_desc(desc, mask); -} -static void set_ir_ioapic_affinity_irq(unsigned int irq, - const struct cpumask *mask) -{ - struct irq_desc *desc = irq_to_desc(irq); - - set_ir_ioapic_affinity_irq_desc(desc, mask); -} -#endif - -asmlinkage void smp_irq_move_cleanup_interrupt(void) -{ - unsigned vector, me; - - ack_APIC_irq(); - exit_idle(); - irq_enter(); - - me = smp_processor_id(); - for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { - unsigned int irq; - struct irq_desc *desc; - struct irq_cfg *cfg; - irq = __get_cpu_var(vector_irq)[vector]; - - if (irq == -1) - continue; - - desc = irq_to_desc(irq); - if (!desc) - continue; - - cfg = irq_cfg(irq); - spin_lock(&desc->lock); - if (!cfg->move_cleanup_count) - goto unlock; - - if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) - goto unlock; - - __get_cpu_var(vector_irq)[vector] = -1; - cfg->move_cleanup_count--; -unlock: - spin_unlock(&desc->lock); - } - - irq_exit(); -} - -static void irq_complete_move(struct irq_desc **descp) -{ - struct irq_desc *desc = *descp; - struct irq_cfg *cfg = desc->chip_data; - unsigned vector, me; - - if (likely(!cfg->move_in_progress)) { -#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC - if (likely(!cfg->move_desc_pending)) - return; - - /* domain has not changed, but affinity did */ - me = smp_processor_id(); - if (cpumask_test_cpu(me, desc->affinity)) { - *descp = desc = move_irq_desc(desc, me); - /* get the new one */ - cfg = desc->chip_data; - cfg->move_desc_pending = 0; - } -#endif - return; - } - - vector = ~get_irq_regs()->orig_ax; - me = smp_processor_id(); - - if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { -#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC - *descp = desc = move_irq_desc(desc, me); - /* get the new one */ - cfg = desc->chip_data; -#endif - send_cleanup_vector(cfg); - } -} -#else -static inline void irq_complete_move(struct irq_desc **descp) {} -#endif - -#ifdef CONFIG_INTR_REMAP -static void ack_x2apic_level(unsigned int irq) -{ - ack_x2APIC_irq(); -} - -static void ack_x2apic_edge(unsigned int irq) -{ - ack_x2APIC_irq(); -} - -#endif - -static void ack_apic_edge(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - irq_complete_move(&desc); - move_native_irq(irq); - ack_APIC_irq(); -} - -atomic_t irq_mis_count; - -static void ack_apic_level(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - -#ifdef CONFIG_X86_32 - unsigned long v; - int i; -#endif - struct irq_cfg *cfg; - int do_unmask_irq = 0; - - irq_complete_move(&desc); -#ifdef CONFIG_GENERIC_PENDING_IRQ - /* If we are moving the irq we need to mask it */ - if (unlikely(desc->status & IRQ_MOVE_PENDING)) { - do_unmask_irq = 1; - mask_IO_APIC_irq_desc(desc); - } -#endif - -#ifdef CONFIG_X86_32 - /* - * It appears there is an erratum which affects at least version 0x11 - * of I/O APIC (that's the 82093AA and cores integrated into various - * chipsets). Under certain conditions a level-triggered interrupt is - * erroneously delivered as edge-triggered one but the respective IRR - * bit gets set nevertheless. As a result the I/O unit expects an EOI - * message but it will never arrive and further interrupts are blocked - * from the source. The exact reason is so far unknown, but the - * phenomenon was observed when two consecutive interrupt requests - * from a given source get delivered to the same CPU and the source is - * temporarily disabled in between. - * - * A workaround is to simulate an EOI message manually. We achieve it - * by setting the trigger mode to edge and then to level when the edge - * trigger mode gets detected in the TMR of a local APIC for a - * level-triggered interrupt. We mask the source for the time of the - * operation to prevent an edge-triggered interrupt escaping meanwhile. - * The idea is from Manfred Spraul. --macro - */ - cfg = desc->chip_data; - i = cfg->vector; - - v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); -#endif - - /* - * We must acknowledge the irq before we move it or the acknowledge will - * not propagate properly. - */ - ack_APIC_irq(); - - /* Now we can move and renable the irq */ - if (unlikely(do_unmask_irq)) { - /* Only migrate the irq if the ack has been received. - * - * On rare occasions the broadcast level triggered ack gets - * delayed going to ioapics, and if we reprogram the - * vector while Remote IRR is still set the irq will never - * fire again. - * - * To prevent this scenario we read the Remote IRR bit - * of the ioapic. This has two effects. - * - On any sane system the read of the ioapic will - * flush writes (and acks) going to the ioapic from - * this cpu. - * - We get to see if the ACK has actually been delivered. - * - * Based on failed experiments of reprogramming the - * ioapic entry from outside of irq context starting - * with masking the ioapic entry and then polling until - * Remote IRR was clear before reprogramming the - * ioapic I don't trust the Remote IRR bit to be - * completey accurate. - * - * However there appears to be no other way to plug - * this race, so if the Remote IRR bit is not - * accurate and is causing problems then it is a hardware bug - * and you can go talk to the chipset vendor about it. - */ - cfg = desc->chip_data; - if (!io_apic_level_ack_pending(cfg)) - move_masked_irq(irq); - unmask_IO_APIC_irq_desc(desc); - } - -#ifdef CONFIG_X86_32 - if (!(v & (1 << (i & 0x1f)))) { - atomic_inc(&irq_mis_count); - spin_lock(&ioapic_lock); - __mask_and_edge_IO_APIC_irq(cfg); - __unmask_and_level_IO_APIC_irq(cfg); - spin_unlock(&ioapic_lock); - } -#endif -} - -static struct irq_chip ioapic_chip __read_mostly = { - .name = "IO-APIC", - .startup = startup_ioapic_irq, - .mask = mask_IO_APIC_irq, - .unmask = unmask_IO_APIC_irq, - .ack = ack_apic_edge, - .eoi = ack_apic_level, -#ifdef CONFIG_SMP - .set_affinity = set_ioapic_affinity_irq, -#endif - .retrigger = ioapic_retrigger_irq, -}; - -#ifdef CONFIG_INTR_REMAP -static struct irq_chip ir_ioapic_chip __read_mostly = { - .name = "IR-IO-APIC", - .startup = startup_ioapic_irq, - .mask = mask_IO_APIC_irq, - .unmask = unmask_IO_APIC_irq, - .ack = ack_x2apic_edge, - .eoi = ack_x2apic_level, -#ifdef CONFIG_SMP - .set_affinity = set_ir_ioapic_affinity_irq, -#endif - .retrigger = ioapic_retrigger_irq, -}; -#endif - -static inline void init_IO_APIC_traps(void) -{ - int irq; - struct irq_desc *desc; - struct irq_cfg *cfg; - - /* - * NOTE! The local APIC isn't very good at handling - * multiple interrupts at the same interrupt level. - * As the interrupt level is determined by taking the - * vector number and shifting that right by 4, we - * want to spread these out a bit so that they don't - * all fall in the same interrupt level. - * - * Also, we've got to be careful not to trash gate - * 0x80, because int 0x80 is hm, kind of importantish. ;) - */ - for_each_irq_desc(irq, desc) { - cfg = desc->chip_data; - if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { - /* - * Hmm.. We don't have an entry for this, - * so default to an old-fashioned 8259 - * interrupt if we can.. - */ - if (irq < NR_IRQS_LEGACY) - make_8259A_irq(irq); - else - /* Strange. Oh, well.. */ - desc->chip = &no_irq_chip; - } - } -} - -/* - * The local APIC irq-chip implementation: - */ - -static void mask_lapic_irq(unsigned int irq) -{ - unsigned long v; - - v = apic_read(APIC_LVT0); - apic_write(APIC_LVT0, v | APIC_LVT_MASKED); -} - -static void unmask_lapic_irq(unsigned int irq) -{ - unsigned long v; - - v = apic_read(APIC_LVT0); - apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); -} - -static void ack_lapic_irq(unsigned int irq) -{ - ack_APIC_irq(); -} - -static struct irq_chip lapic_chip __read_mostly = { - .name = "local-APIC", - .mask = mask_lapic_irq, - .unmask = unmask_lapic_irq, - .ack = ack_lapic_irq, -}; - -static void lapic_register_intr(int irq, struct irq_desc *desc) -{ - desc->status &= ~IRQ_LEVEL; - set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, - "edge"); -} - -static void __init setup_nmi(void) -{ - /* - * Dirty trick to enable the NMI watchdog ... - * We put the 8259A master into AEOI mode and - * unmask on all local APICs LVT0 as NMI. - * - * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') - * is from Maciej W. Rozycki - so we do not have to EOI from - * the NMI handler or the timer interrupt. - */ - apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); - - enable_NMI_through_LVT0(); - - apic_printk(APIC_VERBOSE, " done.\n"); -} - -/* - * This looks a bit hackish but it's about the only one way of sending - * a few INTA cycles to 8259As and any associated glue logic. ICR does - * not support the ExtINT mode, unfortunately. We need to send these - * cycles as some i82489DX-based boards have glue logic that keeps the - * 8259A interrupt line asserted until INTA. --macro - */ -static inline void __init unlock_ExtINT_logic(void) -{ - int apic, pin, i; - struct IO_APIC_route_entry entry0, entry1; - unsigned char save_control, save_freq_select; - - pin = find_isa_irq_pin(8, mp_INT); - if (pin == -1) { - WARN_ON_ONCE(1); - return; - } - apic = find_isa_irq_apic(8, mp_INT); - if (apic == -1) { - WARN_ON_ONCE(1); - return; - } - - entry0 = ioapic_read_entry(apic, pin); - clear_IO_APIC_pin(apic, pin); - - memset(&entry1, 0, sizeof(entry1)); - - entry1.dest_mode = 0; /* physical delivery */ - entry1.mask = 0; /* unmask IRQ now */ - entry1.dest = hard_smp_processor_id(); - entry1.delivery_mode = dest_ExtINT; - entry1.polarity = entry0.polarity; - entry1.trigger = 0; - entry1.vector = 0; - - ioapic_write_entry(apic, pin, entry1); - - save_control = CMOS_READ(RTC_CONTROL); - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, - RTC_FREQ_SELECT); - CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); - - i = 100; - while (i-- > 0) { - mdelay(10); - if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) - i -= 10; - } - - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - clear_IO_APIC_pin(apic, pin); - - ioapic_write_entry(apic, pin, entry0); -} - -static int disable_timer_pin_1 __initdata; -/* Actually the next is obsolete, but keep it for paranoid reasons -AK */ -static int __init disable_timer_pin_setup(char *arg) -{ - disable_timer_pin_1 = 1; - return 0; -} -early_param("disable_timer_pin_1", disable_timer_pin_setup); - -int timer_through_8259 __initdata; - -/* - * This code may look a bit paranoid, but it's supposed to cooperate with - * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ - * is so screwy. Thanks to Brian Perkins for testing/hacking this beast - * fanatically on his truly buggy board. - * - * FIXME: really need to revamp this for all platforms. - */ -static inline void __init check_timer(void) -{ - struct irq_desc *desc = irq_to_desc(0); - struct irq_cfg *cfg = desc->chip_data; - int cpu = boot_cpu_id; - int apic1, pin1, apic2, pin2; - unsigned long flags; - int no_pin1 = 0; - - local_irq_save(flags); - - /* - * get/set the timer IRQ vector: - */ - disable_8259A_irq(0); - assign_irq_vector(0, cfg, apic->target_cpus()); - - /* - * As IRQ0 is to be enabled in the 8259A, the virtual - * wire has to be disabled in the local APIC. Also - * timer interrupts need to be acknowledged manually in - * the 8259A for the i82489DX when using the NMI - * watchdog as that APIC treats NMIs as level-triggered. - * The AEOI mode will finish them in the 8259A - * automatically. - */ - apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); - init_8259A(1); -#ifdef CONFIG_X86_32 - { - unsigned int ver; - - ver = apic_read(APIC_LVR); - ver = GET_APIC_VERSION(ver); - timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); - } -#endif - - pin1 = find_isa_irq_pin(0, mp_INT); - apic1 = find_isa_irq_apic(0, mp_INT); - pin2 = ioapic_i8259.pin; - apic2 = ioapic_i8259.apic; - - apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " - "apic1=%d pin1=%d apic2=%d pin2=%d\n", - cfg->vector, apic1, pin1, apic2, pin2); - - /* - * Some BIOS writers are clueless and report the ExtINTA - * I/O APIC input from the cascaded 8259A as the timer - * interrupt input. So just in case, if only one pin - * was found above, try it both directly and through the - * 8259A. - */ - if (pin1 == -1) { -#ifdef CONFIG_INTR_REMAP - if (intr_remapping_enabled) - panic("BIOS bug: timer not connected to IO-APIC"); -#endif - pin1 = pin2; - apic1 = apic2; - no_pin1 = 1; - } else if (pin2 == -1) { - pin2 = pin1; - apic2 = apic1; - } - - if (pin1 != -1) { - /* - * Ok, does IRQ0 through the IOAPIC work? - */ - if (no_pin1) { - add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); - setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); - } else { - /* for edge trigger, setup_IO_APIC_irq already - * leave it unmasked. - * so only need to unmask if it is level-trigger - * do we really have level trigger timer? - */ - int idx; - idx = find_irq_entry(apic1, pin1, mp_INT); - if (idx != -1 && irq_trigger(idx)) - unmask_IO_APIC_irq_desc(desc); - } - if (timer_irq_works()) { - if (nmi_watchdog == NMI_IO_APIC) { - setup_nmi(); - enable_8259A_irq(0); - } - if (disable_timer_pin_1 > 0) - clear_IO_APIC_pin(0, pin1); - goto out; - } -#ifdef CONFIG_INTR_REMAP - if (intr_remapping_enabled) - panic("timer doesn't work through Interrupt-remapped IO-APIC"); -#endif - local_irq_disable(); - clear_IO_APIC_pin(apic1, pin1); - if (!no_pin1) - apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " - "8254 timer not connected to IO-APIC\n"); - - apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " - "(IRQ0) through the 8259A ...\n"); - apic_printk(APIC_QUIET, KERN_INFO - "..... (found apic %d pin %d) ...\n", apic2, pin2); - /* - * legacy devices should be connected to IO APIC #0 - */ - replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); - setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); - enable_8259A_irq(0); - if (timer_irq_works()) { - apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); - timer_through_8259 = 1; - if (nmi_watchdog == NMI_IO_APIC) { - disable_8259A_irq(0); - setup_nmi(); - enable_8259A_irq(0); - } - goto out; - } - /* - * Cleanup, just in case ... - */ - local_irq_disable(); - disable_8259A_irq(0); - clear_IO_APIC_pin(apic2, pin2); - apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); - } - - if (nmi_watchdog == NMI_IO_APIC) { - apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work " - "through the IO-APIC - disabling NMI Watchdog!\n"); - nmi_watchdog = NMI_NONE; - } -#ifdef CONFIG_X86_32 - timer_ack = 0; -#endif - - apic_printk(APIC_QUIET, KERN_INFO - "...trying to set up timer as Virtual Wire IRQ...\n"); - - lapic_register_intr(0, desc); - apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ - enable_8259A_irq(0); - - if (timer_irq_works()) { - apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); - goto out; - } - local_irq_disable(); - disable_8259A_irq(0); - apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); - apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); - - apic_printk(APIC_QUIET, KERN_INFO - "...trying to set up timer as ExtINT IRQ...\n"); - - init_8259A(0); - make_8259A_irq(0); - apic_write(APIC_LVT0, APIC_DM_EXTINT); - - unlock_ExtINT_logic(); - - if (timer_irq_works()) { - apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); - goto out; - } - local_irq_disable(); - apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); - panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " - "report. Then try booting with the 'noapic' option.\n"); -out: - local_irq_restore(flags); -} - -/* - * Traditionally ISA IRQ2 is the cascade IRQ, and is not available - * to devices. However there may be an I/O APIC pin available for - * this interrupt regardless. The pin may be left unconnected, but - * typically it will be reused as an ExtINT cascade interrupt for - * the master 8259A. In the MPS case such a pin will normally be - * reported as an ExtINT interrupt in the MP table. With ACPI - * there is no provision for ExtINT interrupts, and in the absence - * of an override it would be treated as an ordinary ISA I/O APIC - * interrupt, that is edge-triggered and unmasked by default. We - * used to do this, but it caused problems on some systems because - * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using - * the same ExtINT cascade interrupt to drive the local APIC of the - * bootstrap processor. Therefore we refrain from routing IRQ2 to - * the I/O APIC in all cases now. No actual device should request - * it anyway. --macro - */ -#define PIC_IRQS (1 << PIC_CASCADE_IR) - -void __init setup_IO_APIC(void) -{ - - /* - * calling enable_IO_APIC() is moved to setup_local_APIC for BP - */ - - io_apic_irqs = ~PIC_IRQS; - - apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); - /* - * Set up IO-APIC IRQ routing. - */ -#ifdef CONFIG_X86_32 - if (!acpi_ioapic) - setup_ioapic_ids_from_mpc(); -#endif - sync_Arb_IDs(); - setup_IO_APIC_irqs(); - init_IO_APIC_traps(); - check_timer(); -} - -/* - * Called after all the initialization is done. If we didnt find any - * APIC bugs then we can allow the modify fast path - */ - -static int __init io_apic_bug_finalize(void) -{ - if (sis_apic_bug == -1) - sis_apic_bug = 0; - return 0; -} - -late_initcall(io_apic_bug_finalize); - -struct sysfs_ioapic_data { - struct sys_device dev; - struct IO_APIC_route_entry entry[0]; -}; -static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; - -static int ioapic_suspend(struct sys_device *dev, pm_message_t state) -{ - struct IO_APIC_route_entry *entry; - struct sysfs_ioapic_data *data; - int i; - - data = container_of(dev, struct sysfs_ioapic_data, dev); - entry = data->entry; - for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) - *entry = ioapic_read_entry(dev->id, i); - - return 0; -} - -static int ioapic_resume(struct sys_device *dev) -{ - struct IO_APIC_route_entry *entry; - struct sysfs_ioapic_data *data; - unsigned long flags; - union IO_APIC_reg_00 reg_00; - int i; - - data = container_of(dev, struct sysfs_ioapic_data, dev); - entry = data->entry; - - spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(dev->id, 0); - if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { - reg_00.bits.ID = mp_ioapics[dev->id].apicid; - io_apic_write(dev->id, 0, reg_00.raw); - } - spin_unlock_irqrestore(&ioapic_lock, flags); - for (i = 0; i < nr_ioapic_registers[dev->id]; i++) - ioapic_write_entry(dev->id, i, entry[i]); - - return 0; -} - -static struct sysdev_class ioapic_sysdev_class = { - .name = "ioapic", - .suspend = ioapic_suspend, - .resume = ioapic_resume, -}; - -static int __init ioapic_init_sysfs(void) -{ - struct sys_device * dev; - int i, size, error; - - error = sysdev_class_register(&ioapic_sysdev_class); - if (error) - return error; - - for (i = 0; i < nr_ioapics; i++ ) { - size = sizeof(struct sys_device) + nr_ioapic_registers[i] - * sizeof(struct IO_APIC_route_entry); - mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); - if (!mp_ioapic_data[i]) { - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); - continue; - } - dev = &mp_ioapic_data[i]->dev; - dev->id = i; - dev->cls = &ioapic_sysdev_class; - error = sysdev_register(dev); - if (error) { - kfree(mp_ioapic_data[i]); - mp_ioapic_data[i] = NULL; - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); - continue; - } - } - - return 0; -} - -device_initcall(ioapic_init_sysfs); - -static int nr_irqs_gsi = NR_IRQS_LEGACY; -/* - * Dynamic irq allocate and deallocation - */ -unsigned int create_irq_nr(unsigned int irq_want) -{ - /* Allocate an unused irq */ - unsigned int irq; - unsigned int new; - unsigned long flags; - struct irq_cfg *cfg_new = NULL; - int cpu = boot_cpu_id; - struct irq_desc *desc_new = NULL; - - irq = 0; - if (irq_want < nr_irqs_gsi) - irq_want = nr_irqs_gsi; - - spin_lock_irqsave(&vector_lock, flags); - for (new = irq_want; new < nr_irqs; new++) { - desc_new = irq_to_desc_alloc_cpu(new, cpu); - if (!desc_new) { - printk(KERN_INFO "can not get irq_desc for %d\n", new); - continue; - } - cfg_new = desc_new->chip_data; - - if (cfg_new->vector != 0) - continue; - if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) - irq = new; - break; - } - spin_unlock_irqrestore(&vector_lock, flags); - - if (irq > 0) { - dynamic_irq_init(irq); - /* restore it, in case dynamic_irq_init clear it */ - if (desc_new) - desc_new->chip_data = cfg_new; - } - return irq; -} - -int create_irq(void) -{ - unsigned int irq_want; - int irq; - - irq_want = nr_irqs_gsi; - irq = create_irq_nr(irq_want); - - if (irq == 0) - irq = -1; - - return irq; -} - -void destroy_irq(unsigned int irq) -{ - unsigned long flags; - struct irq_cfg *cfg; - struct irq_desc *desc; - - /* store it, in case dynamic_irq_cleanup clear it */ - desc = irq_to_desc(irq); - cfg = desc->chip_data; - dynamic_irq_cleanup(irq); - /* connect back irq_cfg */ - if (desc) - desc->chip_data = cfg; - -#ifdef CONFIG_INTR_REMAP - free_irte(irq); -#endif - spin_lock_irqsave(&vector_lock, flags); - __clear_irq_vector(irq, cfg); - spin_unlock_irqrestore(&vector_lock, flags); -} - -/* - * MSI message composition - */ -#ifdef CONFIG_PCI_MSI -static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) -{ - struct irq_cfg *cfg; - int err; - unsigned dest; - - if (disable_apic) - return -ENXIO; - - cfg = irq_cfg(irq); - err = assign_irq_vector(irq, cfg, apic->target_cpus()); - if (err) - return err; - - dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); - -#ifdef CONFIG_INTR_REMAP - if (irq_remapped(irq)) { - struct irte irte; - int ir_index; - u16 sub_handle; - - ir_index = map_irq_to_irte_handle(irq, &sub_handle); - BUG_ON(ir_index == -1); - - memset (&irte, 0, sizeof(irte)); - - irte.present = 1; - irte.dst_mode = apic->irq_dest_mode; - irte.trigger_mode = 0; /* edge */ - irte.dlvry_mode = apic->irq_delivery_mode; - irte.vector = cfg->vector; - irte.dest_id = IRTE_DEST(dest); - - modify_irte(irq, &irte); - - msg->address_hi = MSI_ADDR_BASE_HI; - msg->data = sub_handle; - msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | - MSI_ADDR_IR_SHV | - MSI_ADDR_IR_INDEX1(ir_index) | - MSI_ADDR_IR_INDEX2(ir_index); - } else -#endif - { - msg->address_hi = MSI_ADDR_BASE_HI; - msg->address_lo = - MSI_ADDR_BASE_LO | - ((apic->irq_dest_mode == 0) ? - MSI_ADDR_DEST_MODE_PHYSICAL: - MSI_ADDR_DEST_MODE_LOGICAL) | - ((apic->irq_delivery_mode != dest_LowestPrio) ? - MSI_ADDR_REDIRECTION_CPU: - MSI_ADDR_REDIRECTION_LOWPRI) | - MSI_ADDR_DEST_ID(dest); - - msg->data = - MSI_DATA_TRIGGER_EDGE | - MSI_DATA_LEVEL_ASSERT | - ((apic->irq_delivery_mode != dest_LowestPrio) ? - MSI_DATA_DELIVERY_FIXED: - MSI_DATA_DELIVERY_LOWPRI) | - MSI_DATA_VECTOR(cfg->vector); - } - return err; -} - -#ifdef CONFIG_SMP -static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; - struct msi_msg msg; - unsigned int dest; - - dest = set_desc_affinity(desc, mask); - if (dest == BAD_APICID) - return; - - cfg = desc->chip_data; - - read_msi_msg_desc(desc, &msg); - - msg.data &= ~MSI_DATA_VECTOR_MASK; - msg.data |= MSI_DATA_VECTOR(cfg->vector); - msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; - msg.address_lo |= MSI_ADDR_DEST_ID(dest); - - write_msi_msg_desc(desc, &msg); -} -#ifdef CONFIG_INTR_REMAP -/* - * Migrate the MSI irq to another cpumask. This migration is - * done in the process context using interrupt-remapping hardware. - */ -static void -ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg = desc->chip_data; - unsigned int dest; - struct irte irte; - - if (get_irte(irq, &irte)) - return; - - dest = set_desc_affinity(desc, mask); - if (dest == BAD_APICID) - return; - - irte.vector = cfg->vector; - irte.dest_id = IRTE_DEST(dest); - - /* - * atomically update the IRTE with the new destination and vector. - */ - modify_irte(irq, &irte); - - /* - * After this point, all the interrupts will start arriving - * at the new destination. So, time to cleanup the previous - * vector allocation. - */ - if (cfg->move_in_progress) - send_cleanup_vector(cfg); -} - -#endif -#endif /* CONFIG_SMP */ - -/* - * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, - * which implement the MSI or MSI-X Capability Structure. - */ -static struct irq_chip msi_chip = { - .name = "PCI-MSI", - .unmask = unmask_msi_irq, - .mask = mask_msi_irq, - .ack = ack_apic_edge, -#ifdef CONFIG_SMP - .set_affinity = set_msi_irq_affinity, -#endif - .retrigger = ioapic_retrigger_irq, -}; - -#ifdef CONFIG_INTR_REMAP -static struct irq_chip msi_ir_chip = { - .name = "IR-PCI-MSI", - .unmask = unmask_msi_irq, - .mask = mask_msi_irq, - .ack = ack_x2apic_edge, -#ifdef CONFIG_SMP - .set_affinity = ir_set_msi_irq_affinity, -#endif - .retrigger = ioapic_retrigger_irq, -}; - -/* - * Map the PCI dev to the corresponding remapping hardware unit - * and allocate 'nvec' consecutive interrupt-remapping table entries - * in it. - */ -static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) -{ - struct intel_iommu *iommu; - int index; - - iommu = map_dev_to_ir(dev); - if (!iommu) { - printk(KERN_ERR - "Unable to map PCI %s to iommu\n", pci_name(dev)); - return -ENOENT; - } - - index = alloc_irte(iommu, irq, nvec); - if (index < 0) { - printk(KERN_ERR - "Unable to allocate %d IRTE for PCI %s\n", nvec, - pci_name(dev)); - return -ENOSPC; - } - return index; -} -#endif - -static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) -{ - int ret; - struct msi_msg msg; - - ret = msi_compose_msg(dev, irq, &msg); - if (ret < 0) - return ret; - - set_irq_msi(irq, msidesc); - write_msi_msg(irq, &msg); - -#ifdef CONFIG_INTR_REMAP - if (irq_remapped(irq)) { - struct irq_desc *desc = irq_to_desc(irq); - /* - * irq migration in process context - */ - desc->status |= IRQ_MOVE_PCNTXT; - set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); - } else -#endif - set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); - - dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); - - return 0; -} - -int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - unsigned int irq; - int ret, sub_handle; - struct msi_desc *msidesc; - unsigned int irq_want; - -#ifdef CONFIG_INTR_REMAP - struct intel_iommu *iommu = 0; - int index = 0; -#endif - - irq_want = nr_irqs_gsi; - sub_handle = 0; - list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = create_irq_nr(irq_want); - if (irq == 0) - return -1; - irq_want = irq + 1; -#ifdef CONFIG_INTR_REMAP - if (!intr_remapping_enabled) - goto no_ir; - - if (!sub_handle) { - /* - * allocate the consecutive block of IRTE's - * for 'nvec' - */ - index = msi_alloc_irte(dev, irq, nvec); - if (index < 0) { - ret = index; - goto error; - } - } else { - iommu = map_dev_to_ir(dev); - if (!iommu) { - ret = -ENOENT; - goto error; - } - /* - * setup the mapping between the irq and the IRTE - * base index, the sub_handle pointing to the - * appropriate interrupt remap table entry. - */ - set_irte_irq(irq, iommu, index, sub_handle); - } -no_ir: -#endif - ret = setup_msi_irq(dev, msidesc, irq); - if (ret < 0) - goto error; - sub_handle++; - } - return 0; - -error: - destroy_irq(irq); - return ret; -} - -void arch_teardown_msi_irq(unsigned int irq) -{ - destroy_irq(irq); -} - -#ifdef CONFIG_DMAR -#ifdef CONFIG_SMP -static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; - struct msi_msg msg; - unsigned int dest; - - dest = set_desc_affinity(desc, mask); - if (dest == BAD_APICID) - return; - - cfg = desc->chip_data; - - dmar_msi_read(irq, &msg); - - msg.data &= ~MSI_DATA_VECTOR_MASK; - msg.data |= MSI_DATA_VECTOR(cfg->vector); - msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; - msg.address_lo |= MSI_ADDR_DEST_ID(dest); - - dmar_msi_write(irq, &msg); -} - -#endif /* CONFIG_SMP */ - -struct irq_chip dmar_msi_type = { - .name = "DMAR_MSI", - .unmask = dmar_msi_unmask, - .mask = dmar_msi_mask, - .ack = ack_apic_edge, -#ifdef CONFIG_SMP - .set_affinity = dmar_msi_set_affinity, -#endif - .retrigger = ioapic_retrigger_irq, -}; - -int arch_setup_dmar_msi(unsigned int irq) -{ - int ret; - struct msi_msg msg; - - ret = msi_compose_msg(NULL, irq, &msg); - if (ret < 0) - return ret; - dmar_msi_write(irq, &msg); - set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, - "edge"); - return 0; -} -#endif - -#ifdef CONFIG_HPET_TIMER - -#ifdef CONFIG_SMP -static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; - struct msi_msg msg; - unsigned int dest; - - dest = set_desc_affinity(desc, mask); - if (dest == BAD_APICID) - return; - - cfg = desc->chip_data; - - hpet_msi_read(irq, &msg); - - msg.data &= ~MSI_DATA_VECTOR_MASK; - msg.data |= MSI_DATA_VECTOR(cfg->vector); - msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; - msg.address_lo |= MSI_ADDR_DEST_ID(dest); - - hpet_msi_write(irq, &msg); -} - -#endif /* CONFIG_SMP */ - -struct irq_chip hpet_msi_type = { - .name = "HPET_MSI", - .unmask = hpet_msi_unmask, - .mask = hpet_msi_mask, - .ack = ack_apic_edge, -#ifdef CONFIG_SMP - .set_affinity = hpet_msi_set_affinity, -#endif - .retrigger = ioapic_retrigger_irq, -}; - -int arch_setup_hpet_msi(unsigned int irq) -{ - int ret; - struct msi_msg msg; - - ret = msi_compose_msg(NULL, irq, &msg); - if (ret < 0) - return ret; - - hpet_msi_write(irq, &msg); - set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, - "edge"); - - return 0; -} -#endif - -#endif /* CONFIG_PCI_MSI */ -/* - * Hypertransport interrupt support - */ -#ifdef CONFIG_HT_IRQ - -#ifdef CONFIG_SMP - -static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) -{ - struct ht_irq_msg msg; - fetch_ht_irq_msg(irq, &msg); - - msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); - msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); - - msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); - msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); - - write_ht_irq_msg(irq, &msg); -} - -static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; - unsigned int dest; - - dest = set_desc_affinity(desc, mask); - if (dest == BAD_APICID) - return; - - cfg = desc->chip_data; - - target_ht_irq(irq, dest, cfg->vector); -} - -#endif - -static struct irq_chip ht_irq_chip = { - .name = "PCI-HT", - .mask = mask_ht_irq, - .unmask = unmask_ht_irq, - .ack = ack_apic_edge, -#ifdef CONFIG_SMP - .set_affinity = set_ht_irq_affinity, -#endif - .retrigger = ioapic_retrigger_irq, -}; - -int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) -{ - struct irq_cfg *cfg; - int err; - - if (disable_apic) - return -ENXIO; - - cfg = irq_cfg(irq); - err = assign_irq_vector(irq, cfg, apic->target_cpus()); - if (!err) { - struct ht_irq_msg msg; - unsigned dest; - - dest = apic->cpu_mask_to_apicid_and(cfg->domain, - apic->target_cpus()); - - msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); - - msg.address_lo = - HT_IRQ_LOW_BASE | - HT_IRQ_LOW_DEST_ID(dest) | - HT_IRQ_LOW_VECTOR(cfg->vector) | - ((apic->irq_dest_mode == 0) ? - HT_IRQ_LOW_DM_PHYSICAL : - HT_IRQ_LOW_DM_LOGICAL) | - HT_IRQ_LOW_RQEOI_EDGE | - ((apic->irq_delivery_mode != dest_LowestPrio) ? - HT_IRQ_LOW_MT_FIXED : - HT_IRQ_LOW_MT_ARBITRATED) | - HT_IRQ_LOW_IRQ_MASKED; - - write_ht_irq_msg(irq, &msg); - - set_irq_chip_and_handler_name(irq, &ht_irq_chip, - handle_edge_irq, "edge"); - - dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); - } - return err; -} -#endif /* CONFIG_HT_IRQ */ - -#ifdef CONFIG_X86_UV -/* - * Re-target the irq to the specified CPU and enable the specified MMR located - * on the specified blade to allow the sending of MSIs to the specified CPU. - */ -int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, - unsigned long mmr_offset) -{ - const struct cpumask *eligible_cpu = cpumask_of(cpu); - struct irq_cfg *cfg; - int mmr_pnode; - unsigned long mmr_value; - struct uv_IO_APIC_route_entry *entry; - unsigned long flags; - int err; - - cfg = irq_cfg(irq); - - err = assign_irq_vector(irq, cfg, eligible_cpu); - if (err != 0) - return err; - - spin_lock_irqsave(&vector_lock, flags); - set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, - irq_name); - spin_unlock_irqrestore(&vector_lock, flags); - - mmr_value = 0; - entry = (struct uv_IO_APIC_route_entry *)&mmr_value; - BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); - - entry->vector = cfg->vector; - entry->delivery_mode = apic->irq_delivery_mode; - entry->dest_mode = apic->irq_dest_mode; - entry->polarity = 0; - entry->trigger = 0; - entry->mask = 0; - entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); - - mmr_pnode = uv_blade_to_pnode(mmr_blade); - uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); - - return irq; -} - -/* - * Disable the specified MMR located on the specified blade so that MSIs are - * longer allowed to be sent. - */ -void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset) -{ - unsigned long mmr_value; - struct uv_IO_APIC_route_entry *entry; - int mmr_pnode; - - mmr_value = 0; - entry = (struct uv_IO_APIC_route_entry *)&mmr_value; - BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); - - entry->mask = 1; - - mmr_pnode = uv_blade_to_pnode(mmr_blade); - uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); -} -#endif /* CONFIG_X86_64 */ - -int __init io_apic_get_redir_entries (int ioapic) -{ - union IO_APIC_reg_01 reg_01; - unsigned long flags; - - spin_lock_irqsave(&ioapic_lock, flags); - reg_01.raw = io_apic_read(ioapic, 1); - spin_unlock_irqrestore(&ioapic_lock, flags); - - return reg_01.bits.entries; -} - -void __init probe_nr_irqs_gsi(void) -{ - int nr = 0; - - nr = acpi_probe_gsi(); - if (nr > nr_irqs_gsi) { - nr_irqs_gsi = nr; - } else { - /* for acpi=off or acpi is not compiled in */ - int idx; - - nr = 0; - for (idx = 0; idx < nr_ioapics; idx++) - nr += io_apic_get_redir_entries(idx) + 1; - - if (nr > nr_irqs_gsi) - nr_irqs_gsi = nr; - } - - printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); -} - -#ifdef CONFIG_SPARSE_IRQ -int __init arch_probe_nr_irqs(void) -{ - int nr; - - if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) - nr_irqs = NR_VECTORS * nr_cpu_ids; - - nr = nr_irqs_gsi + 8 * nr_cpu_ids; -#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) - /* - * for MSI and HT dyn irq - */ - nr += nr_irqs_gsi * 16; -#endif - if (nr < nr_irqs) - nr_irqs = nr; - - return 0; -} -#endif - -/* -------------------------------------------------------------------------- - ACPI-based IOAPIC Configuration - -------------------------------------------------------------------------- */ - -#ifdef CONFIG_ACPI - -#ifdef CONFIG_X86_32 -int __init io_apic_get_unique_id(int ioapic, int apic_id) -{ - union IO_APIC_reg_00 reg_00; - static physid_mask_t apic_id_map = PHYSID_MASK_NONE; - physid_mask_t tmp; - unsigned long flags; - int i = 0; - - /* - * The P4 platform supports up to 256 APIC IDs on two separate APIC - * buses (one for LAPICs, one for IOAPICs), where predecessors only - * supports up to 16 on one shared APIC bus. - * - * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full - * advantage of new APIC bus architecture. - */ - - if (physids_empty(apic_id_map)) - apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map); - - spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(ioapic, 0); - spin_unlock_irqrestore(&ioapic_lock, flags); - - if (apic_id >= get_physical_broadcast()) { - printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " - "%d\n", ioapic, apic_id, reg_00.bits.ID); - apic_id = reg_00.bits.ID; - } - - /* - * Every APIC in a system must have a unique ID or we get lots of nice - * 'stuck on smp_invalidate_needed IPI wait' messages. - */ - if (apic->check_apicid_used(apic_id_map, apic_id)) { - - for (i = 0; i < get_physical_broadcast(); i++) { - if (!apic->check_apicid_used(apic_id_map, i)) - break; - } - - if (i == get_physical_broadcast()) - panic("Max apic_id exceeded!\n"); - - printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " - "trying %d\n", ioapic, apic_id, i); - - apic_id = i; - } - - tmp = apic->apicid_to_cpu_present(apic_id); - physids_or(apic_id_map, apic_id_map, tmp); - - if (reg_00.bits.ID != apic_id) { - reg_00.bits.ID = apic_id; - - spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(ioapic, 0, reg_00.raw); - reg_00.raw = io_apic_read(ioapic, 0); - spin_unlock_irqrestore(&ioapic_lock, flags); - - /* Sanity check */ - if (reg_00.bits.ID != apic_id) { - printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); - return -1; - } - } - - apic_printk(APIC_VERBOSE, KERN_INFO - "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); - - return apic_id; -} - -int __init io_apic_get_version(int ioapic) -{ - union IO_APIC_reg_01 reg_01; - unsigned long flags; - - spin_lock_irqsave(&ioapic_lock, flags); - reg_01.raw = io_apic_read(ioapic, 1); - spin_unlock_irqrestore(&ioapic_lock, flags); - - return reg_01.bits.version; -} -#endif - -int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) -{ - struct irq_desc *desc; - struct irq_cfg *cfg; - int cpu = boot_cpu_id; - - if (!IO_APIC_IRQ(irq)) { - apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", - ioapic); - return -EINVAL; - } - - desc = irq_to_desc_alloc_cpu(irq, cpu); - if (!desc) { - printk(KERN_INFO "can not get irq_desc %d\n", irq); - return 0; - } - - /* - * IRQs < 16 are already in the irq_2_pin[] map - */ - if (irq >= NR_IRQS_LEGACY) { - cfg = desc->chip_data; - add_pin_to_irq_cpu(cfg, cpu, ioapic, pin); - } - - setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity); - - return 0; -} - - -int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) -{ - int i; - - if (skip_ioapic_setup) - return -1; - - for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].irqtype == mp_INT && - mp_irqs[i].srcbusirq == bus_irq) - break; - if (i >= mp_irq_entries) - return -1; - - *trigger = irq_trigger(i); - *polarity = irq_polarity(i); - return 0; -} - -#endif /* CONFIG_ACPI */ - -/* - * This function currently is only a helper for the i386 smp boot process where - * we need to reprogram the ioredtbls to cater for the cpus which have come online - * so mask in all cases should simply be apic->target_cpus() - */ -#ifdef CONFIG_SMP -void __init setup_ioapic_dest(void) -{ - int pin, ioapic, irq, irq_entry; - struct irq_desc *desc; - struct irq_cfg *cfg; - const struct cpumask *mask; - - if (skip_ioapic_setup == 1) - return; - - for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { - for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { - irq_entry = find_irq_entry(ioapic, pin, mp_INT); - if (irq_entry == -1) - continue; - irq = pin_2_irq(irq_entry, ioapic, pin); - - /* setup_IO_APIC_irqs could fail to get vector for some device - * when you have too many devices, because at that time only boot - * cpu is online. - */ - desc = irq_to_desc(irq); - cfg = desc->chip_data; - if (!cfg->vector) { - setup_IO_APIC_irq(ioapic, pin, irq, desc, - irq_trigger(irq_entry), - irq_polarity(irq_entry)); - continue; - - } - - /* - * Honour affinities which have been set in early boot - */ - if (desc->status & - (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) - mask = desc->affinity; - else - mask = apic->target_cpus(); - -#ifdef CONFIG_INTR_REMAP - if (intr_remapping_enabled) - set_ir_ioapic_affinity_irq_desc(desc, mask); - else -#endif - set_ioapic_affinity_irq_desc(desc, mask); - } - - } -} -#endif - -#define IOAPIC_RESOURCE_NAME_SIZE 11 - -static struct resource *ioapic_resources; - -static struct resource * __init ioapic_setup_resources(void) -{ - unsigned long n; - struct resource *res; - char *mem; - int i; - - if (nr_ioapics <= 0) - return NULL; - - n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); - n *= nr_ioapics; - - mem = alloc_bootmem(n); - res = (void *)mem; - - if (mem != NULL) { - mem += sizeof(struct resource) * nr_ioapics; - - for (i = 0; i < nr_ioapics; i++) { - res[i].name = mem; - res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; - sprintf(mem, "IOAPIC %u", i); - mem += IOAPIC_RESOURCE_NAME_SIZE; - } - } - - ioapic_resources = res; - - return res; -} - -void __init ioapic_init_mappings(void) -{ - unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; - struct resource *ioapic_res; - int i; - - ioapic_res = ioapic_setup_resources(); - for (i = 0; i < nr_ioapics; i++) { - if (smp_found_config) { - ioapic_phys = mp_ioapics[i].apicaddr; -#ifdef CONFIG_X86_32 - if (!ioapic_phys) { - printk(KERN_ERR - "WARNING: bogus zero IO-APIC " - "address found in MPTABLE, " - "disabling IO/APIC support!\n"); - smp_found_config = 0; - skip_ioapic_setup = 1; - goto fake_ioapic_page; - } -#endif - } else { -#ifdef CONFIG_X86_32 -fake_ioapic_page: -#endif - ioapic_phys = (unsigned long) - alloc_bootmem_pages(PAGE_SIZE); - ioapic_phys = __pa(ioapic_phys); - } - set_fixmap_nocache(idx, ioapic_phys); - apic_printk(APIC_VERBOSE, - "mapped IOAPIC to %08lx (%08lx)\n", - __fix_to_virt(idx), ioapic_phys); - idx++; - - if (ioapic_res != NULL) { - ioapic_res->start = ioapic_phys; - ioapic_res->end = ioapic_phys + (4 * 1024) - 1; - ioapic_res++; - } - } -} - -static int __init ioapic_insert_resources(void) -{ - int i; - struct resource *r = ioapic_resources; - - if (!r) { - printk(KERN_ERR - "IO APIC resources could be not be allocated.\n"); - return -1; - } - - for (i = 0; i < nr_ioapics; i++) { - insert_resource(&iomem_resource, r); - r++; - } - - return 0; -} - -/* Insert the IO APIC resources after PCI initialization has occured to handle - * IO APICS that are mapped in on a BAR in PCI space. */ -late_initcall(ioapic_insert_resources); diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c deleted file mode 100644 index dbf5445727a..00000000000 --- a/arch/x86/kernel/ipi.c +++ /dev/null @@ -1,164 +0,0 @@ -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) -{ - unsigned long query_cpu; - unsigned long flags; - - /* - * Hack. The clustered APIC addressing mode doesn't allow us to send - * to an arbitrary mask, so I do a unicast to each CPU instead. - * - mbligh - */ - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, - query_cpu), vector, APIC_DEST_PHYSICAL); - } - local_irq_restore(flags); -} - -void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, - int vector) -{ - unsigned int this_cpu = smp_processor_id(); - unsigned int query_cpu; - unsigned long flags; - - /* See Hack comment above */ - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - if (query_cpu == this_cpu) - continue; - __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, - query_cpu), vector, APIC_DEST_PHYSICAL); - } - local_irq_restore(flags); -} - -void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, - int vector) -{ - unsigned long flags; - unsigned int query_cpu; - - /* - * Hack. The clustered APIC addressing mode doesn't allow us to send - * to an arbitrary mask, so I do a unicasts to each CPU instead. This - * should be modified to do 1 message per cluster ID - mbligh - */ - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) - __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector, - apic->dest_logical); - local_irq_restore(flags); -} - -void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, - int vector) -{ - unsigned long flags; - unsigned int query_cpu; - unsigned int this_cpu = smp_processor_id(); - - /* See Hack comment above */ - - local_irq_save(flags); - for_each_cpu(query_cpu, mask) { - if (query_cpu == this_cpu) - continue; - __default_send_IPI_dest_field( - apic->cpu_to_logical_apicid(query_cpu), vector, - apic->dest_logical); - } - local_irq_restore(flags); -} - -#ifdef CONFIG_X86_32 - -/* - * This is only used on smaller machines. - */ -void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) -{ - unsigned long mask = cpumask_bits(cpumask)[0]; - unsigned long flags; - - local_irq_save(flags); - WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); - __default_send_IPI_dest_field(mask, vector, apic->dest_logical); - local_irq_restore(flags); -} - -void default_send_IPI_allbutself(int vector) -{ - /* - * if there are no other CPUs in the system then we get an APIC send - * error if we try to broadcast, thus avoid sending IPIs in this case. - */ - if (!(num_online_cpus() > 1)) - return; - - __default_local_send_IPI_allbutself(vector); -} - -void default_send_IPI_all(int vector) -{ - __default_local_send_IPI_all(vector); -} - -void default_send_IPI_self(int vector) -{ - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); -} - -/* must come after the send_IPI functions above for inlining */ -static int convert_apicid_to_cpu(int apic_id) -{ - int i; - - for_each_possible_cpu(i) { - if (per_cpu(x86_cpu_to_apicid, i) == apic_id) - return i; - } - return -1; -} - -int safe_smp_processor_id(void) -{ - int apicid, cpuid; - - if (!boot_cpu_has(X86_FEATURE_APIC)) - return 0; - - apicid = hard_smp_processor_id(); - if (apicid == BAD_APICID) - return 0; - - cpuid = convert_apicid_to_cpu(apicid); - - return cpuid >= 0 ? cpuid : 0; -} -#endif diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c deleted file mode 100644 index bdfad80c3cf..00000000000 --- a/arch/x86/kernel/nmi.c +++ /dev/null @@ -1,564 +0,0 @@ -/* - * NMI watchdog support on APIC systems - * - * Started by Ingo Molnar - * - * Fixes: - * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. - * Mikael Pettersson : Power Management for local APIC NMI watchdog. - * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog. - * Pavel Machek and - * Mikael Pettersson : PM converted to driver model. Disable/enable API. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -#include - -int unknown_nmi_panic; -int nmi_watchdog_enabled; - -static cpumask_t backtrace_mask = CPU_MASK_NONE; - -/* nmi_active: - * >0: the lapic NMI watchdog is active, but can be disabled - * <0: the lapic NMI watchdog has not been set up, and cannot - * be enabled - * 0: the lapic NMI watchdog is disabled, but can be enabled - */ -atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ -EXPORT_SYMBOL(nmi_active); - -unsigned int nmi_watchdog = NMI_NONE; -EXPORT_SYMBOL(nmi_watchdog); - -static int panic_on_timeout; - -static unsigned int nmi_hz = HZ; -static DEFINE_PER_CPU(short, wd_enabled); -static int endflag __initdata; - -static inline unsigned int get_nmi_count(int cpu) -{ - return per_cpu(irq_stat, cpu).__nmi_count; -} - -static inline int mce_in_progress(void) -{ -#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) - return atomic_read(&mce_entry) > 0; -#endif - return 0; -} - -/* - * Take the local apic timer and PIT/HPET into account. We don't - * know which one is active, when we have highres/dyntick on - */ -static inline unsigned int get_timer_irqs(int cpu) -{ - return per_cpu(irq_stat, cpu).apic_timer_irqs + - per_cpu(irq_stat, cpu).irq0_irqs; -} - -#ifdef CONFIG_SMP -/* - * The performance counters used by NMI_LOCAL_APIC don't trigger when - * the CPU is idle. To make sure the NMI watchdog really ticks on all - * CPUs during the test make them busy. - */ -static __init void nmi_cpu_busy(void *data) -{ - local_irq_enable_in_hardirq(); - /* - * Intentionally don't use cpu_relax here. This is - * to make sure that the performance counter really ticks, - * even if there is a simulator or similar that catches the - * pause instruction. On a real HT machine this is fine because - * all other CPUs are busy with "useless" delay loops and don't - * care if they get somewhat less cycles. - */ - while (endflag == 0) - mb(); -} -#endif - -static void report_broken_nmi(int cpu, int *prev_nmi_count) -{ - printk(KERN_CONT "\n"); - - printk(KERN_WARNING - "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n", - cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); - - printk(KERN_WARNING - "Please report this to bugzilla.kernel.org,\n"); - printk(KERN_WARNING - "and attach the output of the 'dmesg' command.\n"); - - per_cpu(wd_enabled, cpu) = 0; - atomic_dec(&nmi_active); -} - -static void __acpi_nmi_disable(void *__unused) -{ - apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); -} - -int __init check_nmi_watchdog(void) -{ - unsigned int *prev_nmi_count; - int cpu; - - if (!nmi_watchdog_active() || !atomic_read(&nmi_active)) - return 0; - - prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); - if (!prev_nmi_count) - goto error; - - printk(KERN_INFO "Testing NMI watchdog ... "); - -#ifdef CONFIG_SMP - if (nmi_watchdog == NMI_LOCAL_APIC) - smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); -#endif - - for_each_possible_cpu(cpu) - prev_nmi_count[cpu] = get_nmi_count(cpu); - local_irq_enable(); - mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ - - for_each_online_cpu(cpu) { - if (!per_cpu(wd_enabled, cpu)) - continue; - if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) - report_broken_nmi(cpu, prev_nmi_count); - } - endflag = 1; - if (!atomic_read(&nmi_active)) { - kfree(prev_nmi_count); - atomic_set(&nmi_active, -1); - goto error; - } - printk("OK.\n"); - - /* - * now that we know it works we can reduce NMI frequency to - * something more reasonable; makes a difference in some configs - */ - if (nmi_watchdog == NMI_LOCAL_APIC) - nmi_hz = lapic_adjust_nmi_hz(1); - - kfree(prev_nmi_count); - return 0; -error: - if (nmi_watchdog == NMI_IO_APIC) { - if (!timer_through_8259) - disable_8259A_irq(0); - on_each_cpu(__acpi_nmi_disable, NULL, 1); - } - -#ifdef CONFIG_X86_32 - timer_ack = 0; -#endif - return -1; -} - -static int __init setup_nmi_watchdog(char *str) -{ - unsigned int nmi; - - if (!strncmp(str, "panic", 5)) { - panic_on_timeout = 1; - str = strchr(str, ','); - if (!str) - return 1; - ++str; - } - - if (!strncmp(str, "lapic", 5)) - nmi_watchdog = NMI_LOCAL_APIC; - else if (!strncmp(str, "ioapic", 6)) - nmi_watchdog = NMI_IO_APIC; - else { - get_option(&str, &nmi); - if (nmi >= NMI_INVALID) - return 0; - nmi_watchdog = nmi; - } - - return 1; -} -__setup("nmi_watchdog=", setup_nmi_watchdog); - -/* - * Suspend/resume support - */ -#ifdef CONFIG_PM - -static int nmi_pm_active; /* nmi_active before suspend */ - -static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) -{ - /* only CPU0 goes here, other CPUs should be offline */ - nmi_pm_active = atomic_read(&nmi_active); - stop_apic_nmi_watchdog(NULL); - BUG_ON(atomic_read(&nmi_active) != 0); - return 0; -} - -static int lapic_nmi_resume(struct sys_device *dev) -{ - /* only CPU0 goes here, other CPUs should be offline */ - if (nmi_pm_active > 0) { - setup_apic_nmi_watchdog(NULL); - touch_nmi_watchdog(); - } - return 0; -} - -static struct sysdev_class nmi_sysclass = { - .name = "lapic_nmi", - .resume = lapic_nmi_resume, - .suspend = lapic_nmi_suspend, -}; - -static struct sys_device device_lapic_nmi = { - .id = 0, - .cls = &nmi_sysclass, -}; - -static int __init init_lapic_nmi_sysfs(void) -{ - int error; - - /* - * should really be a BUG_ON but b/c this is an - * init call, it just doesn't work. -dcz - */ - if (nmi_watchdog != NMI_LOCAL_APIC) - return 0; - - if (atomic_read(&nmi_active) < 0) - return 0; - - error = sysdev_class_register(&nmi_sysclass); - if (!error) - error = sysdev_register(&device_lapic_nmi); - return error; -} - -/* must come after the local APIC's device_initcall() */ -late_initcall(init_lapic_nmi_sysfs); - -#endif /* CONFIG_PM */ - -static void __acpi_nmi_enable(void *__unused) -{ - apic_write(APIC_LVT0, APIC_DM_NMI); -} - -/* - * Enable timer based NMIs on all CPUs: - */ -void acpi_nmi_enable(void) -{ - if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_enable, NULL, 1); -} - -/* - * Disable timer based NMIs on all CPUs: - */ -void acpi_nmi_disable(void) -{ - if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_disable, NULL, 1); -} - -/* - * This function is called as soon the LAPIC NMI watchdog driver has everything - * in place and it's ready to check if the NMIs belong to the NMI watchdog - */ -void cpu_nmi_set_wd_enabled(void) -{ - __get_cpu_var(wd_enabled) = 1; -} - -void setup_apic_nmi_watchdog(void *unused) -{ - if (__get_cpu_var(wd_enabled)) - return; - - /* cheap hack to support suspend/resume */ - /* if cpu0 is not active neither should the other cpus */ - if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0) - return; - - switch (nmi_watchdog) { - case NMI_LOCAL_APIC: - if (lapic_watchdog_init(nmi_hz) < 0) { - __get_cpu_var(wd_enabled) = 0; - return; - } - /* FALL THROUGH */ - case NMI_IO_APIC: - __get_cpu_var(wd_enabled) = 1; - atomic_inc(&nmi_active); - } -} - -void stop_apic_nmi_watchdog(void *unused) -{ - /* only support LOCAL and IO APICs for now */ - if (!nmi_watchdog_active()) - return; - if (__get_cpu_var(wd_enabled) == 0) - return; - if (nmi_watchdog == NMI_LOCAL_APIC) - lapic_watchdog_stop(); - else - __acpi_nmi_disable(NULL); - __get_cpu_var(wd_enabled) = 0; - atomic_dec(&nmi_active); -} - -/* - * the best way to detect whether a CPU has a 'hard lockup' problem - * is to check it's local APIC timer IRQ counts. If they are not - * changing then that CPU has some problem. - * - * as these watchdog NMI IRQs are generated on every CPU, we only - * have to check the current processor. - * - * since NMIs don't listen to _any_ locks, we have to be extremely - * careful not to rely on unsafe variables. The printk might lock - * up though, so we have to break up any console locks first ... - * [when there will be more tty-related locks, break them up here too!] - */ - -static DEFINE_PER_CPU(unsigned, last_irq_sum); -static DEFINE_PER_CPU(local_t, alert_counter); -static DEFINE_PER_CPU(int, nmi_touch); - -void touch_nmi_watchdog(void) -{ - if (nmi_watchdog_active()) { - unsigned cpu; - - /* - * Tell other CPUs to reset their alert counters. We cannot - * do it ourselves because the alert count increase is not - * atomic. - */ - for_each_present_cpu(cpu) { - if (per_cpu(nmi_touch, cpu) != 1) - per_cpu(nmi_touch, cpu) = 1; - } - } - - /* - * Tickle the softlockup detector too: - */ - touch_softlockup_watchdog(); -} -EXPORT_SYMBOL(touch_nmi_watchdog); - -notrace __kprobes int -nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) -{ - /* - * Since current_thread_info()-> is always on the stack, and we - * always switch the stack NMI-atomically, it's safe to use - * smp_processor_id(). - */ - unsigned int sum; - int touched = 0; - int cpu = smp_processor_id(); - int rc = 0; - - /* check for other users first */ - if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) - == NOTIFY_STOP) { - rc = 1; - touched = 1; - } - - sum = get_timer_irqs(cpu); - - if (__get_cpu_var(nmi_touch)) { - __get_cpu_var(nmi_touch) = 0; - touched = 1; - } - - if (cpu_isset(cpu, backtrace_mask)) { - static DEFINE_SPINLOCK(lock); /* Serialise the printks */ - - spin_lock(&lock); - printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); - dump_stack(); - spin_unlock(&lock); - cpu_clear(cpu, backtrace_mask); - } - - /* Could check oops_in_progress here too, but it's safer not to */ - if (mce_in_progress()) - touched = 1; - - /* if the none of the timers isn't firing, this cpu isn't doing much */ - if (!touched && __get_cpu_var(last_irq_sum) == sum) { - /* - * Ayiee, looks like this CPU is stuck ... - * wait a few IRQs (5 seconds) before doing the oops ... - */ - local_inc(&__get_cpu_var(alert_counter)); - if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) - /* - * die_nmi will return ONLY if NOTIFY_STOP happens.. - */ - die_nmi("BUG: NMI Watchdog detected LOCKUP", - regs, panic_on_timeout); - } else { - __get_cpu_var(last_irq_sum) = sum; - local_set(&__get_cpu_var(alert_counter), 0); - } - - /* see if the nmi watchdog went off */ - if (!__get_cpu_var(wd_enabled)) - return rc; - switch (nmi_watchdog) { - case NMI_LOCAL_APIC: - rc |= lapic_wd_event(nmi_hz); - break; - case NMI_IO_APIC: - /* - * don't know how to accurately check for this. - * just assume it was a watchdog timer interrupt - * This matches the old behaviour. - */ - rc = 1; - break; - } - return rc; -} - -#ifdef CONFIG_SYSCTL - -static void enable_ioapic_nmi_watchdog_single(void *unused) -{ - __get_cpu_var(wd_enabled) = 1; - atomic_inc(&nmi_active); - __acpi_nmi_enable(NULL); -} - -static void enable_ioapic_nmi_watchdog(void) -{ - on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1); - touch_nmi_watchdog(); -} - -static void disable_ioapic_nmi_watchdog(void) -{ - on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); -} - -static int __init setup_unknown_nmi_panic(char *str) -{ - unknown_nmi_panic = 1; - return 1; -} -__setup("unknown_nmi_panic", setup_unknown_nmi_panic); - -static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) -{ - unsigned char reason = get_nmi_reason(); - char buf[64]; - - sprintf(buf, "NMI received for unknown reason %02x\n", reason); - die_nmi(buf, regs, 1); /* Always panic here */ - return 0; -} - -/* - * proc handler for /proc/sys/kernel/nmi - */ -int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, - void __user *buffer, size_t *length, loff_t *ppos) -{ - int old_state; - - nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; - old_state = nmi_watchdog_enabled; - proc_dointvec(table, write, file, buffer, length, ppos); - if (!!old_state == !!nmi_watchdog_enabled) - return 0; - - if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) { - printk(KERN_WARNING - "NMI watchdog is permanently disabled\n"); - return -EIO; - } - - if (nmi_watchdog == NMI_LOCAL_APIC) { - if (nmi_watchdog_enabled) - enable_lapic_nmi_watchdog(); - else - disable_lapic_nmi_watchdog(); - } else if (nmi_watchdog == NMI_IO_APIC) { - if (nmi_watchdog_enabled) - enable_ioapic_nmi_watchdog(); - else - disable_ioapic_nmi_watchdog(); - } else { - printk(KERN_WARNING - "NMI watchdog doesn't know what hardware to touch\n"); - return -EIO; - } - return 0; -} - -#endif /* CONFIG_SYSCTL */ - -int do_nmi_callback(struct pt_regs *regs, int cpu) -{ -#ifdef CONFIG_SYSCTL - if (unknown_nmi_panic) - return unknown_nmi_panic_callback(regs, cpu); -#endif - return 0; -} - -void __trigger_all_cpu_backtrace(void) -{ - int i; - - backtrace_mask = cpu_online_map; - /* Wait for up to 10 seconds for all CPUs to do the backtrace */ - for (i = 0; i < 10 * 1000; i++) { - if (cpus_empty(backtrace_mask)) - break; - mdelay(1); - } -} -- cgit v1.2.3 From 2a05180fe2e5b414f0cb2ccfc80e6c90563e3c67 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 20:35:16 +0100 Subject: x86, apic: move remaining APIC drivers to arch/x86/kernel/apic/* Move the 32-bit extended-arch APIC drivers to arch/x86/kernel/apic/ too, and rename apic_64.c to probe_64.c. Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 6 +- arch/x86/kernel/apic/Makefile | 9 +- arch/x86/kernel/apic/apic_64.c | 89 ----- arch/x86/kernel/apic/bigsmp_32.c | 274 ++++++++++++++ arch/x86/kernel/apic/es7000_32.c | 757 +++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/apic/numaq_32.c | 565 +++++++++++++++++++++++++++++ arch/x86/kernel/apic/probe_32.c | 424 ++++++++++++++++++++++ arch/x86/kernel/apic/probe_64.c | 89 +++++ arch/x86/kernel/apic/summit_32.c | 601 +++++++++++++++++++++++++++++++ arch/x86/kernel/bigsmp_32.c | 274 -------------- arch/x86/kernel/es7000_32.c | 757 --------------------------------------- arch/x86/kernel/numaq_32.c | 565 ----------------------------- arch/x86/kernel/probe_32.c | 424 ---------------------- arch/x86/kernel/summit_32.c | 601 ------------------------------- 14 files changed, 2718 insertions(+), 2717 deletions(-) delete mode 100644 arch/x86/kernel/apic/apic_64.c create mode 100644 arch/x86/kernel/apic/bigsmp_32.c create mode 100644 arch/x86/kernel/apic/es7000_32.c create mode 100644 arch/x86/kernel/apic/numaq_32.c create mode 100644 arch/x86/kernel/apic/probe_32.c create mode 100644 arch/x86/kernel/apic/probe_64.c create mode 100644 arch/x86/kernel/apic/summit_32.c delete mode 100644 arch/x86/kernel/bigsmp_32.c delete mode 100644 arch/x86/kernel/es7000_32.c delete mode 100644 arch/x86/kernel/numaq_32.c delete mode 100644 arch/x86/kernel/probe_32.c delete mode 100644 arch/x86/kernel/summit_32.c (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 9139ff69471..c70537d8c15 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -30,7 +30,7 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o obj-y += setup.o i8259.o irqinit_$(BITS).o obj-$(CONFIG_X86_VISWS) += visws_quirks.o -obj-$(CONFIG_X86_32) += probe_32.o probe_roms_32.o +obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o @@ -70,10 +70,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o -obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o -obj-$(CONFIG_X86_NUMAQ) += numaq_32.o -obj-$(CONFIG_X86_ES7000) += es7000_32.o -obj-$(CONFIG_X86_SUMMIT) += summit_32.o obj-y += vsmp_64.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_MODULES) += module_$(BITS).o diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile index da20b70c400..97f558db5c3 100644 --- a/arch/x86/kernel/apic/Makefile +++ b/arch/x86/kernel/apic/Makefile @@ -2,14 +2,19 @@ # Makefile for local APIC drivers and for the IO-APIC code # -obj-y := apic.o ipi.o nmi.o +obj-y := apic.o probe_$(BITS).o ipi.o nmi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o obj-$(CONFIG_SMP) += ipi.o +obj-$ ifeq ($(CONFIG_X86_64),y) -obj-y += apic_64.o apic_flat_64.o +obj-y += apic_flat_64.o obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o obj-$(CONFIG_X86_UV) += x2apic_uv_x.o endif +obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o +obj-$(CONFIG_X86_NUMAQ) += numaq_32.o +obj-$(CONFIG_X86_ES7000) += es7000_32.o +obj-$(CONFIG_X86_SUMMIT) += summit_32.o diff --git a/arch/x86/kernel/apic/apic_64.c b/arch/x86/kernel/apic/apic_64.c deleted file mode 100644 index 70935dd904d..00000000000 --- a/arch/x86/kernel/apic/apic_64.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2004 James Cleverdon, IBM. - * Subject to the GNU Public License, v.2 - * - * Generic APIC sub-arch probe layer. - * - * Hacked for x86-64 by James Cleverdon from i386 architecture code by - * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and - * James Cleverdon. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -extern struct apic apic_flat; -extern struct apic apic_physflat; -extern struct apic apic_x2xpic_uv_x; -extern struct apic apic_x2apic_phys; -extern struct apic apic_x2apic_cluster; - -struct apic __read_mostly *apic = &apic_flat; -EXPORT_SYMBOL_GPL(apic); - -static struct apic *apic_probe[] __initdata = { -#ifdef CONFIG_X86_UV - &apic_x2apic_uv_x, -#endif -#ifdef CONFIG_X86_X2APIC - &apic_x2apic_phys, - &apic_x2apic_cluster, -#endif - &apic_physflat, - NULL, -}; - -/* - * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. - */ -void __init default_setup_apic_routing(void) -{ -#ifdef CONFIG_X86_X2APIC - if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { - if (!intr_remapping_enabled) - apic = &apic_flat; - } -#endif - - if (apic == &apic_flat) { - if (max_physical_apicid >= 8) - apic = &apic_physflat; - printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); - } - - if (x86_quirks->update_apic) - x86_quirks->update_apic(); -} - -/* Same for both flat and physical. */ - -void apic_send_IPI_self(int vector) -{ - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); -} - -int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - int i; - - for (i = 0; apic_probe[i]; ++i) { - if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { - apic = apic_probe[i]; - printk(KERN_INFO "Setting APIC routing to %s.\n", - apic->name); - return 1; - } - } - return 0; -} diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c new file mode 100644 index 00000000000..0b1093394fd --- /dev/null +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -0,0 +1,274 @@ +/* + * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs. + * + * Drives the local APIC in "clustered mode". + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static inline unsigned bigsmp_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0xFF; +} + +static inline int bigsmp_apic_id_registered(void) +{ + return 1; +} + +static inline const cpumask_t *bigsmp_target_cpus(void) +{ +#ifdef CONFIG_SMP + return &cpu_online_map; +#else + return &cpumask_of_cpu(0); +#endif +} + +static inline unsigned long +bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return 0; +} + +static inline unsigned long bigsmp_check_apicid_present(int bit) +{ + return 1; +} + +static inline unsigned long calculate_ldr(int cpu) +{ + unsigned long val, id; + + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + id = per_cpu(x86_bios_cpu_apicid, cpu); + val |= SET_APIC_LOGICAL_ID(id); + + return val; +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +static inline void bigsmp_init_apic_ldr(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_FLAT); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + +static inline void bigsmp_setup_apic_routing(void) +{ + printk(KERN_INFO + "Enabling APIC mode: Physflat. Using %d I/O APICs\n", + nr_ioapics); +} + +static inline int bigsmp_apicid_to_node(int logical_apicid) +{ + return apicid_2_node[hard_smp_processor_id()]; +} + +static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids) + return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); + + return BAD_APICID; +} + +static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) +{ + return physid_mask_of_physid(phys_apicid); +} + +/* Mapping from cpu number to logical apicid */ +static inline int bigsmp_cpu_to_logical_apicid(int cpu) +{ + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return cpu_physical_id(cpu); +} + +static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) +{ + /* For clustered we don't have a good way to do this yet - hack */ + return physids_promote(0xFFL); +} + +static inline void bigsmp_setup_portio_remap(void) +{ +} + +static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return 1; +} + +/* As we are using single CPU as destination, pick only one CPU here */ +static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); +} + +static inline unsigned int +bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + if (cpu < nr_cpu_ids) + return bigsmp_cpu_to_logical_apicid(cpu); + + return BAD_APICID; +} + +static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_sequence_phys(mask, vector); +} + +static inline void bigsmp_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); +} + +static inline void bigsmp_send_IPI_all(int vector) +{ + bigsmp_send_IPI_mask(cpu_online_mask, vector); +} + +static int dmi_bigsmp; /* can be set by dmi scanners */ + +static int hp_ht_bigsmp(const struct dmi_system_id *d) +{ + printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); + dmi_bigsmp = 1; + + return 0; +} + + +static const struct dmi_system_id bigsmp_dmi_table[] = { + { hp_ht_bigsmp, "HP ProLiant DL760 G2", + { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_BIOS_VERSION, "P44-"), + } + }, + + { hp_ht_bigsmp, "HP ProLiant DL740", + { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_BIOS_VERSION, "P47-"), + } + }, + { } /* NULL entry stops DMI scanning */ +}; + +static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + cpus_clear(*retmask); + cpu_set(cpu, *retmask); +} + +static int probe_bigsmp(void) +{ + if (def_to_bigsmp) + dmi_bigsmp = 1; + else + dmi_check_system(bigsmp_dmi_table); + + return dmi_bigsmp; +} + +struct apic apic_bigsmp = { + + .name = "bigsmp", + .probe = probe_bigsmp, + .acpi_madt_oem_check = NULL, + .apic_id_registered = bigsmp_apic_id_registered, + + .irq_delivery_mode = dest_Fixed, + /* phys delivery to target CPU: */ + .irq_dest_mode = 0, + + .target_cpus = bigsmp_target_cpus, + .disable_esr = 1, + .dest_logical = 0, + .check_apicid_used = bigsmp_check_apicid_used, + .check_apicid_present = bigsmp_check_apicid_present, + + .vector_allocation_domain = bigsmp_vector_allocation_domain, + .init_apic_ldr = bigsmp_init_apic_ldr, + + .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, + .setup_apic_routing = bigsmp_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = bigsmp_apicid_to_node, + .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, + .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, + .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = bigsmp_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = bigsmp_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = bigsmp_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0xFF << 24, + + .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, + + .send_IPI_mask = bigsmp_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = bigsmp_send_IPI_allbutself, + .send_IPI_all = bigsmp_send_IPI_all, + .send_IPI_self = default_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + + .wait_for_init_deassert = default_wait_for_init_deassert, + + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c new file mode 100644 index 00000000000..320f2d2e4e5 --- /dev/null +++ b/arch/x86/kernel/apic/es7000_32.c @@ -0,0 +1,757 @@ +/* + * Written by: Garry Forsgren, Unisys Corporation + * Natalie Protasevich, Unisys Corporation + * + * This file contains the code to configure and interface + * with Unisys ES7000 series hardware system manager. + * + * Copyright (c) 2003 Unisys Corporation. + * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar + * + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Unisys Corporation, Township Line & Union Meeting + * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or: + * + * http://www.unisys.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * ES7000 chipsets + */ + +#define NON_UNISYS 0 +#define ES7000_CLASSIC 1 +#define ES7000_ZORRO 2 + +#define MIP_REG 1 +#define MIP_PSAI_REG 4 + +#define MIP_BUSY 1 +#define MIP_SPIN 0xf0000 +#define MIP_VALID 0x0100000000000000ULL +#define MIP_SW_APIC 0x1020b + +#define MIP_PORT(val) ((val >> 32) & 0xffff) + +#define MIP_RD_LO(val) (val & 0xffffffff) + +struct mip_reg { + unsigned long long off_0x00; + unsigned long long off_0x08; + unsigned long long off_0x10; + unsigned long long off_0x18; + unsigned long long off_0x20; + unsigned long long off_0x28; + unsigned long long off_0x30; + unsigned long long off_0x38; +}; + +struct mip_reg_info { + unsigned long long mip_info; + unsigned long long delivery_info; + unsigned long long host_reg; + unsigned long long mip_reg; +}; + +struct psai { + unsigned long long entry_type; + unsigned long long addr; + unsigned long long bep_addr; +}; + +#ifdef CONFIG_ACPI + +struct es7000_oem_table { + struct acpi_table_header Header; + u32 OEMTableAddr; + u32 OEMTableSize; +}; + +static unsigned long oem_addrX; +static unsigned long oem_size; + +#endif + +/* + * ES7000 Globals + */ + +static volatile unsigned long *psai; +static struct mip_reg *mip_reg; +static struct mip_reg *host_reg; +static int mip_port; +static unsigned long mip_addr; +static unsigned long host_addr; + +int es7000_plat; + +/* + * GSI override for ES7000 platforms. + */ + +static unsigned int base; + +static int +es7000_rename_gsi(int ioapic, int gsi) +{ + if (es7000_plat == ES7000_ZORRO) + return gsi; + + if (!base) { + int i; + for (i = 0; i < nr_ioapics; i++) + base += nr_ioapic_registers[i]; + } + + if (!ioapic && (gsi < 16)) + gsi += base; + + return gsi; +} + +static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) +{ + unsigned long vect = 0, psaival = 0; + + if (psai == NULL) + return -1; + + vect = ((unsigned long)__pa(eip)/0x1000) << 16; + psaival = (0x1000000 | vect | cpu); + + while (*psai & 0x1000000) + ; + + *psai = psaival; + + return 0; +} + +static int __init es7000_update_apic(void) +{ + apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; + + /* MPENTIUMIII */ + if (boot_cpu_data.x86 == 6 && + (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { + es7000_update_apic_to_cluster(); + apic->wait_for_init_deassert = NULL; + apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; + } + + return 0; +} + +static void __init setup_unisys(void) +{ + /* + * Determine the generation of the ES7000 currently running. + * + * es7000_plat = 1 if the machine is a 5xx ES7000 box + * es7000_plat = 2 if the machine is a x86_64 ES7000 box + * + */ + if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2)) + es7000_plat = ES7000_ZORRO; + else + es7000_plat = ES7000_CLASSIC; + ioapic_renumber_irq = es7000_rename_gsi; + + x86_quirks->update_apic = es7000_update_apic; +} + +/* + * Parse the OEM Table: + */ +static int __init parse_unisys_oem(char *oemptr) +{ + int i; + int success = 0; + unsigned char type, size; + unsigned long val; + char *tp = NULL; + struct psai *psaip = NULL; + struct mip_reg_info *mi; + struct mip_reg *host, *mip; + + tp = oemptr; + + tp += 8; + + for (i = 0; i <= 6; i++) { + type = *tp++; + size = *tp++; + tp -= 2; + switch (type) { + case MIP_REG: + mi = (struct mip_reg_info *)tp; + val = MIP_RD_LO(mi->host_reg); + host_addr = val; + host = (struct mip_reg *)val; + host_reg = __va(host); + val = MIP_RD_LO(mi->mip_reg); + mip_port = MIP_PORT(mi->mip_info); + mip_addr = val; + mip = (struct mip_reg *)val; + mip_reg = __va(mip); + pr_debug("es7000_mipcfg: host_reg = 0x%lx \n", + (unsigned long)host_reg); + pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n", + (unsigned long)mip_reg); + success++; + break; + case MIP_PSAI_REG: + psaip = (struct psai *)tp; + if (tp != NULL) { + if (psaip->addr) + psai = __va(psaip->addr); + else + psai = NULL; + success++; + } + break; + default: + break; + } + tp += size; + } + + if (success < 2) + es7000_plat = NON_UNISYS; + else + setup_unisys(); + + return es7000_plat; +} + +#ifdef CONFIG_ACPI +static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) +{ + struct acpi_table_header *header = NULL; + struct es7000_oem_table *table; + acpi_size tbl_size; + acpi_status ret; + int i = 0; + + for (;;) { + ret = acpi_get_table_with_size("OEM1", i++, &header, &tbl_size); + if (!ACPI_SUCCESS(ret)) + return -1; + + if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) + break; + + early_acpi_os_unmap_memory(header, tbl_size); + } + + table = (void *)header; + + oem_addrX = table->OEMTableAddr; + oem_size = table->OEMTableSize; + + early_acpi_os_unmap_memory(header, tbl_size); + + *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size); + + return 0; +} + +static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) +{ + if (!oem_addr) + return; + + __acpi_unmap_table((char *)oem_addr, oem_size); +} + +static int es7000_check_dsdt(void) +{ + struct acpi_table_header header; + + if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && + !strncmp(header.oem_id, "UNISYS", 6)) + return 1; + return 0; +} + +/* Hook from generic ACPI tables.c */ +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + unsigned long oem_addr = 0; + int check_dsdt; + int ret = 0; + + /* check dsdt at first to avoid clear fix_map for oem_addr */ + check_dsdt = es7000_check_dsdt(); + + if (!find_unisys_acpi_oem_table(&oem_addr)) { + if (check_dsdt) { + ret = parse_unisys_oem((char *)oem_addr); + } else { + setup_unisys(); + ret = 1; + } + /* + * we need to unmap it + */ + unmap_unisys_acpi_oem_table(oem_addr); + } + return ret; +} +#else /* !CONFIG_ACPI: */ +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return 0; +} +#endif /* !CONFIG_ACPI */ + +static void es7000_spin(int n) +{ + int i = 0; + + while (i++ < n) + rep_nop(); +} + +static int __init +es7000_mip_write(struct mip_reg *mip_reg) +{ + int status = 0; + int spin; + + spin = MIP_SPIN; + while ((host_reg->off_0x38 & MIP_VALID) != 0) { + if (--spin <= 0) { + WARN(1, "Timeout waiting for Host Valid Flag\n"); + return -1; + } + es7000_spin(MIP_SPIN); + } + + memcpy(host_reg, mip_reg, sizeof(struct mip_reg)); + outb(1, mip_port); + + spin = MIP_SPIN; + + while ((mip_reg->off_0x38 & MIP_VALID) == 0) { + if (--spin <= 0) { + WARN(1, "Timeout waiting for MIP Valid Flag\n"); + return -1; + } + es7000_spin(MIP_SPIN); + } + + status = (mip_reg->off_0x00 & 0xffff0000000000ULL) >> 48; + mip_reg->off_0x38 &= ~MIP_VALID; + + return status; +} + +static void __init es7000_enable_apic_mode(void) +{ + struct mip_reg es7000_mip_reg; + int mip_status; + + if (!es7000_plat) + return; + + printk(KERN_INFO "ES7000: Enabling APIC mode.\n"); + memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); + es7000_mip_reg.off_0x00 = MIP_SW_APIC; + es7000_mip_reg.off_0x38 = MIP_VALID; + + while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) + WARN(1, "Command failed, status = %x\n", mip_status); +} + +static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; +} + + +static void es7000_wait_for_init_deassert(atomic_t *deassert) +{ +#ifndef CONFIG_ES7000_CLUSTERED_APIC + while (!atomic_read(deassert)) + cpu_relax(); +#endif + return; +} + +static unsigned int es7000_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0xFF; +} + +static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_sequence_phys(mask, vector); +} + +static void es7000_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); +} + +static void es7000_send_IPI_all(int vector) +{ + es7000_send_IPI_mask(cpu_online_mask, vector); +} + +static int es7000_apic_id_registered(void) +{ + return 1; +} + +static const cpumask_t *target_cpus_cluster(void) +{ + return &CPU_MASK_ALL; +} + +static const cpumask_t *es7000_target_cpus(void) +{ + return &cpumask_of_cpu(smp_processor_id()); +} + +static unsigned long +es7000_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return 0; +} +static unsigned long es7000_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static unsigned long calculate_ldr(int cpu) +{ + unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); + + return SET_APIC_LOGICAL_ID(id); +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LdR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +static void es7000_init_apic_ldr_cluster(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_CLUSTER); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + +static void es7000_init_apic_ldr(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_FLAT); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + +static void es7000_setup_apic_routing(void) +{ + int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); + + printk(KERN_INFO + "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", + (apic_version[apic] == 0x14) ? + "Physical Cluster" : "Logical Cluster", + nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); +} + +static int es7000_apicid_to_node(int logical_apicid) +{ + return 0; +} + + +static int es7000_cpu_present_to_apicid(int mps_cpu) +{ + if (!mps_cpu) + return boot_cpu_physical_apicid; + else if (mps_cpu < nr_cpu_ids) + return per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static int cpu_id; + +static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) +{ + physid_mask_t mask; + + mask = physid_mask_of_physid(cpu_id); + ++cpu_id; + + return mask; +} + +/* Mapping from cpu number to logical apicid */ +static int es7000_cpu_to_logical_apicid(int cpu) +{ +#ifdef CONFIG_SMP + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return cpu_2_logical_apicid[cpu]; +#else + return logical_smp_processor_id(); +#endif +} + +static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) +{ + /* For clustered we don't have a good way to do this yet - hack */ + return physids_promote(0xff); +} + +static int es7000_check_phys_apicid_present(int cpu_physical_apicid) +{ + boot_cpu_physical_apicid = read_apic_id(); + return 1; +} + +static unsigned int +es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) +{ + int cpus_found = 0; + int num_bits_set; + int apicid; + int cpu; + + num_bits_set = cpumask_weight(cpumask); + /* Return id to all */ + if (num_bits_set == nr_cpu_ids) + return 0xFF; + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of target_cpus(): + */ + cpu = cpumask_first(cpumask); + apicid = es7000_cpu_to_logical_apicid(cpu); + + while (cpus_found < num_bits_set) { + if (cpumask_test_cpu(cpu, cpumask)) { + int new_apicid = es7000_cpu_to_logical_apicid(cpu); + + if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { + WARN(1, "Not a valid mask!"); + + return 0xFF; + } + apicid = new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + int cpus_found = 0; + int num_bits_set; + int apicid; + int cpu; + + num_bits_set = cpus_weight(*cpumask); + /* Return id to all */ + if (num_bits_set == nr_cpu_ids) + return es7000_cpu_to_logical_apicid(0); + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of target_cpus(): + */ + cpu = first_cpu(*cpumask); + apicid = es7000_cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { + if (cpu_isset(cpu, *cpumask)) { + int new_apicid = es7000_cpu_to_logical_apicid(cpu); + + if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { + printk("%s: Not a valid mask!\n", __func__); + + return es7000_cpu_to_logical_apicid(0); + } + apicid = new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static unsigned int +es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, + const struct cpumask *andmask) +{ + int apicid = es7000_cpu_to_logical_apicid(0); + cpumask_var_t cpumask; + + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) + return apicid; + + cpumask_and(cpumask, inmask, andmask); + cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = es7000_cpu_mask_to_apicid(cpumask); + + free_cpumask_var(cpumask); + + return apicid; +} + +static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +void __init es7000_update_apic_to_cluster(void) +{ + apic->target_cpus = target_cpus_cluster; + apic->irq_delivery_mode = dest_LowestPrio; + /* logical delivery broadcast to all procs: */ + apic->irq_dest_mode = 1; + + apic->init_apic_ldr = es7000_init_apic_ldr_cluster; + + apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster; +} + +static int probe_es7000(void) +{ + /* probed later in mptable/ACPI hooks */ + return 0; +} + +static __init int +es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +{ + if (mpc->oemptr) { + struct mpc_oemtable *oem_table = + (struct mpc_oemtable *)mpc->oemptr; + + if (!strncmp(oem, "UNISYS", 6)) + return parse_unisys_oem((char *)oem_table); + } + return 0; +} + + +struct apic apic_es7000 = { + + .name = "es7000", + .probe = probe_es7000, + .acpi_madt_oem_check = es7000_acpi_madt_oem_check, + .apic_id_registered = es7000_apic_id_registered, + + .irq_delivery_mode = dest_Fixed, + /* phys delivery to target CPUs: */ + .irq_dest_mode = 0, + + .target_cpus = es7000_target_cpus, + .disable_esr = 1, + .dest_logical = 0, + .check_apicid_used = es7000_check_apicid_used, + .check_apicid_present = es7000_check_apicid_present, + + .vector_allocation_domain = es7000_vector_allocation_domain, + .init_apic_ldr = es7000_init_apic_ldr, + + .ioapic_phys_id_map = es7000_ioapic_phys_id_map, + .setup_apic_routing = es7000_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = es7000_apicid_to_node, + .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, + .cpu_present_to_apicid = es7000_cpu_present_to_apicid, + .apicid_to_cpu_present = es7000_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = es7000_check_phys_apicid_present, + .enable_apic_mode = es7000_enable_apic_mode, + .phys_pkg_id = es7000_phys_pkg_id, + .mps_oem_check = es7000_mps_oem_check, + + .get_apic_id = es7000_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0xFF << 24, + + .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, + + .send_IPI_mask = es7000_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = es7000_send_IPI_allbutself, + .send_IPI_all = es7000_send_IPI_all, + .send_IPI_self = default_send_IPI_self, + + .wakeup_cpu = NULL, + + .trampoline_phys_low = 0x467, + .trampoline_phys_high = 0x469, + + .wait_for_init_deassert = es7000_wait_for_init_deassert, + + /* Nothing to do for most platforms, since cleared by the INIT cycle: */ + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c new file mode 100644 index 00000000000..d9d6d61eed8 --- /dev/null +++ b/arch/x86/kernel/apic/numaq_32.c @@ -0,0 +1,565 @@ +/* + * Written by: Patricia Gaughen, IBM Corporation + * + * Copyright (C) 2002, IBM Corp. + * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) + +int found_numaq; + +/* + * Have to match translation table entries to main table entries by counter + * hence the mpc_record variable .... can't see a less disgusting way of + * doing this .... + */ +struct mpc_trans { + unsigned char mpc_type; + unsigned char trans_len; + unsigned char trans_type; + unsigned char trans_quad; + unsigned char trans_global; + unsigned char trans_local; + unsigned short trans_reserved; +}; + +/* x86_quirks member */ +static int mpc_record; + +static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; + +int mp_bus_id_to_node[MAX_MP_BUSSES]; +int mp_bus_id_to_local[MAX_MP_BUSSES]; +int quad_local_to_mp_bus_id[NR_CPUS/4][4]; + + +static inline void numaq_register_node(int node, struct sys_cfg_data *scd) +{ + struct eachquadmem *eq = scd->eq + node; + + node_set_online(node); + + /* Convert to pages */ + node_start_pfn[node] = + MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size); + + node_end_pfn[node] = + MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); + + e820_register_active_regions(node, node_start_pfn[node], + node_end_pfn[node]); + + memory_present(node, node_start_pfn[node], node_end_pfn[node]); + + node_remap_size[node] = node_memmap_size_bytes(node, + node_start_pfn[node], + node_end_pfn[node]); +} + +/* + * Function: smp_dump_qct() + * + * Description: gets memory layout from the quad config table. This + * function also updates node_online_map with the nodes (quads) present. + */ +static void __init smp_dump_qct(void) +{ + struct sys_cfg_data *scd; + int node; + + scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR); + + nodes_clear(node_online_map); + for_each_node(node) { + if (scd->quads_present31_0 & (1 << node)) + numaq_register_node(node, scd); + } +} + +void __cpuinit numaq_tsc_disable(void) +{ + if (!found_numaq) + return; + + if (num_online_nodes() > 1) { + printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); + setup_clear_cpu_cap(X86_FEATURE_TSC); + } +} + +static int __init numaq_pre_time_init(void) +{ + numaq_tsc_disable(); + return 0; +} + +static inline int generate_logical_apicid(int quad, int phys_apicid) +{ + return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1); +} + +/* x86_quirks member */ +static int mpc_apic_id(struct mpc_cpu *m) +{ + int quad = translation_table[mpc_record]->trans_quad; + int logical_apicid = generate_logical_apicid(quad, m->apicid); + + printk(KERN_DEBUG + "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", + m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8, + (m->cpufeature & CPU_MODEL_MASK) >> 4, + m->apicver, quad, logical_apicid); + + return logical_apicid; +} + +/* x86_quirks member */ +static void mpc_oem_bus_info(struct mpc_bus *m, char *name) +{ + int quad = translation_table[mpc_record]->trans_quad; + int local = translation_table[mpc_record]->trans_local; + + mp_bus_id_to_node[m->busid] = quad; + mp_bus_id_to_local[m->busid] = local; + + printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad); +} + +/* x86_quirks member */ +static void mpc_oem_pci_bus(struct mpc_bus *m) +{ + int quad = translation_table[mpc_record]->trans_quad; + int local = translation_table[mpc_record]->trans_local; + + quad_local_to_mp_bus_id[quad][local] = m->busid; +} + +static void __init MP_translation_info(struct mpc_trans *m) +{ + printk(KERN_INFO + "Translation: record %d, type %d, quad %d, global %d, local %d\n", + mpc_record, m->trans_type, m->trans_quad, m->trans_global, + m->trans_local); + + if (mpc_record >= MAX_MPC_ENTRY) + printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); + else + translation_table[mpc_record] = m; /* stash this for later */ + + if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) + node_set_online(m->trans_quad); +} + +static int __init mpf_checksum(unsigned char *mp, int len) +{ + int sum = 0; + + while (len--) + sum += *mp++; + + return sum & 0xFF; +} + +/* + * Read/parse the MPC oem tables + */ +static void __init + smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize) +{ + int count = sizeof(*oemtable); /* the header size */ + unsigned char *oemptr = ((unsigned char *)oemtable) + count; + + mpc_record = 0; + printk(KERN_INFO + "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); + + if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) { + printk(KERN_WARNING + "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", + oemtable->signature[0], oemtable->signature[1], + oemtable->signature[2], oemtable->signature[3]); + return; + } + + if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) { + printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); + return; + } + + while (count < oemtable->length) { + switch (*oemptr) { + case MP_TRANSLATION: + { + struct mpc_trans *m = (void *)oemptr; + + MP_translation_info(m); + oemptr += sizeof(*m); + count += sizeof(*m); + ++mpc_record; + break; + } + default: + printk(KERN_WARNING + "Unrecognised OEM table entry type! - %d\n", + (int)*oemptr); + return; + } + } +} + +static int __init numaq_setup_ioapic_ids(void) +{ + /* so can skip it */ + return 1; +} + +static int __init numaq_update_apic(void) +{ + apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; + + return 0; +} + +static struct x86_quirks numaq_x86_quirks __initdata = { + .arch_pre_time_init = numaq_pre_time_init, + .arch_time_init = NULL, + .arch_pre_intr_init = NULL, + .arch_memory_setup = NULL, + .arch_intr_init = NULL, + .arch_trap_init = NULL, + .mach_get_smp_config = NULL, + .mach_find_smp_config = NULL, + .mpc_record = &mpc_record, + .mpc_apic_id = mpc_apic_id, + .mpc_oem_bus_info = mpc_oem_bus_info, + .mpc_oem_pci_bus = mpc_oem_pci_bus, + .smp_read_mpc_oem = smp_read_mpc_oem, + .setup_ioapic_ids = numaq_setup_ioapic_ids, + .update_apic = numaq_update_apic, +}; + +static __init void early_check_numaq(void) +{ + /* + * Find possible boot-time SMP configuration: + */ + early_find_smp_config(); + + /* + * get boot-time SMP configuration: + */ + if (smp_found_config) + early_get_smp_config(); + + if (found_numaq) + x86_quirks = &numaq_x86_quirks; +} + +int __init get_memcfg_numaq(void) +{ + early_check_numaq(); + if (!found_numaq) + return 0; + smp_dump_qct(); + + return 1; +} + +#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) + +static inline unsigned int numaq_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0x0F; +} + +static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) +{ + default_send_IPI_mask_sequence_logical(mask, vector); +} + +static inline void numaq_send_IPI_allbutself(int vector) +{ + default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector); +} + +static inline void numaq_send_IPI_all(int vector) +{ + numaq_send_IPI_mask(cpu_online_mask, vector); +} + +#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) +#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) + +/* + * Because we use NMIs rather than the INIT-STARTUP sequence to + * bootstrap the CPUs, the APIC may be in a weird state. Kick it: + */ +static inline void numaq_smp_callin_clear_local_apic(void) +{ + clear_local_APIC(); +} + +static inline const cpumask_t *numaq_target_cpus(void) +{ + return &CPU_MASK_ALL; +} + +static inline unsigned long +numaq_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} + +static inline unsigned long numaq_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static inline int numaq_apic_id_registered(void) +{ + return 1; +} + +static inline void numaq_init_apic_ldr(void) +{ + /* Already done in NUMA-Q firmware */ +} + +static inline void numaq_setup_apic_routing(void) +{ + printk(KERN_INFO + "Enabling APIC mode: NUMA-Q. Using %d I/O APICs\n", + nr_ioapics); +} + +/* + * Skip adding the timer int on secondary nodes, which causes + * a small but painful rift in the time-space continuum. + */ +static inline int numaq_multi_timer_check(int apic, int irq) +{ + return apic != 0 && irq == 0; +} + +static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) +{ + /* We don't have a good way to do this yet - hack */ + return physids_promote(0xFUL); +} + +static inline int numaq_cpu_to_logical_apicid(int cpu) +{ + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return cpu_2_logical_apicid[cpu]; +} + +/* + * Supporting over 60 cpus on NUMA-Q requires a locality-dependent + * cpu to APIC ID relation to properly interact with the intelligent + * mode of the cluster controller. + */ +static inline int numaq_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < 60) + return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); + else + return BAD_APICID; +} + +static inline int numaq_apicid_to_node(int logical_apicid) +{ + return logical_apicid >> 4; +} + +static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) +{ + int node = numaq_apicid_to_node(logical_apicid); + int cpu = __ffs(logical_apicid & 0xf); + + return physid_mask_of_physid(cpu + 4*node); +} + +/* Where the IO area was mapped on multiquad, always 0 otherwise */ +void *xquad_portio; + +static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return 1; +} + +/* + * We use physical apicids here, not logical, so just return the default + * physical broadcast to stop people from breaking us + */ +static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + return 0x0F; +} + +static inline unsigned int +numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + return 0x0F; +} + +/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ +static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +static int +numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +{ + if (strncmp(oem, "IBM NUMA", 8)) + printk(KERN_ERR "Warning! Not a NUMA-Q system!\n"); + else + found_numaq = 1; + + return found_numaq; +} + +static int probe_numaq(void) +{ + /* already know from get_memcfg_numaq() */ + return found_numaq; +} + +static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; +} + +static void numaq_setup_portio_remap(void) +{ + int num_quads = num_online_nodes(); + + if (num_quads <= 1) + return; + + printk(KERN_INFO + "Remapping cross-quad port I/O for %d quads\n", num_quads); + + xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); + + printk(KERN_INFO + "xquad_portio vaddr 0x%08lx, len %08lx\n", + (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); +} + +struct apic apic_numaq = { + + .name = "NUMAQ", + .probe = probe_numaq, + .acpi_madt_oem_check = NULL, + .apic_id_registered = numaq_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + /* physical delivery on LOCAL quad: */ + .irq_dest_mode = 0, + + .target_cpus = numaq_target_cpus, + .disable_esr = 1, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = numaq_check_apicid_used, + .check_apicid_present = numaq_check_apicid_present, + + .vector_allocation_domain = numaq_vector_allocation_domain, + .init_apic_ldr = numaq_init_apic_ldr, + + .ioapic_phys_id_map = numaq_ioapic_phys_id_map, + .setup_apic_routing = numaq_setup_apic_routing, + .multi_timer_check = numaq_multi_timer_check, + .apicid_to_node = numaq_apicid_to_node, + .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, + .cpu_present_to_apicid = numaq_cpu_present_to_apicid, + .apicid_to_cpu_present = numaq_apicid_to_cpu_present, + .setup_portio_remap = numaq_setup_portio_remap, + .check_phys_apicid_present = numaq_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = numaq_phys_pkg_id, + .mps_oem_check = numaq_mps_oem_check, + + .get_apic_id = numaq_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0x0F << 24, + + .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, + + .send_IPI_mask = numaq_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = numaq_send_IPI_allbutself, + .send_IPI_all = numaq_send_IPI_all, + .send_IPI_self = default_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, + + /* We don't do anything here because we use NMI's to boot instead */ + .wait_for_init_deassert = NULL, + + .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, + .inquire_remote_apic = NULL, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c new file mode 100644 index 00000000000..5fa48332c5c --- /dev/null +++ b/arch/x86/kernel/apic/probe_32.c @@ -0,0 +1,424 @@ +/* + * Default generic APIC driver. This handles up to 8 CPUs. + * + * Copyright 2003 Andi Kleen, SuSE Labs. + * Subject to the GNU Public License, v.2 + * + * Generic x86 APIC driver probe layer. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HOTPLUG_CPU +#define DEFAULT_SEND_IPI (1) +#else +#define DEFAULT_SEND_IPI (0) +#endif + +int no_broadcast = DEFAULT_SEND_IPI; + +#ifdef CONFIG_X86_LOCAL_APIC + +void default_setup_apic_routing(void) +{ +#ifdef CONFIG_X86_IO_APIC + printk(KERN_INFO + "Enabling APIC mode: Flat. Using %d I/O APICs\n", + nr_ioapics); +#endif +} + +static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* + * Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; +} + +/* should be called last. */ +static int probe_default(void) +{ + return 1; +} + +struct apic apic_default = { + + .name = "default", + .probe = probe_default, + .acpi_madt_oem_check = NULL, + .apic_id_registered = default_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + /* logical delivery broadcast to all CPUs: */ + .irq_dest_mode = 1, + + .target_cpus = default_target_cpus, + .disable_esr = 0, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = default_check_apicid_used, + .check_apicid_present = default_check_apicid_present, + + .vector_allocation_domain = default_vector_allocation_domain, + .init_apic_ldr = default_init_apic_ldr, + + .ioapic_phys_id_map = default_ioapic_phys_id_map, + .setup_apic_routing = default_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = default_apicid_to_node, + .cpu_to_logical_apicid = default_cpu_to_logical_apicid, + .cpu_present_to_apicid = default_cpu_present_to_apicid, + .apicid_to_cpu_present = default_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = default_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = default_phys_pkg_id, + .mps_oem_check = NULL, + + .get_apic_id = default_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0x0F << 24, + + .cpu_mask_to_apicid = default_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, + + .send_IPI_mask = default_send_IPI_mask_logical, + .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, + .send_IPI_allbutself = default_send_IPI_allbutself, + .send_IPI_all = default_send_IPI_all, + .send_IPI_self = default_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + + .wait_for_init_deassert = default_wait_for_init_deassert, + + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; + +extern struct apic apic_numaq; +extern struct apic apic_summit; +extern struct apic apic_bigsmp; +extern struct apic apic_es7000; +extern struct apic apic_default; + +struct apic *apic = &apic_default; +EXPORT_SYMBOL_GPL(apic); + +static struct apic *apic_probe[] __initdata = { +#ifdef CONFIG_X86_NUMAQ + &apic_numaq, +#endif +#ifdef CONFIG_X86_SUMMIT + &apic_summit, +#endif +#ifdef CONFIG_X86_BIGSMP + &apic_bigsmp, +#endif +#ifdef CONFIG_X86_ES7000 + &apic_es7000, +#endif + &apic_default, /* must be last */ + NULL, +}; + +static int cmdline_apic __initdata; +static int __init parse_apic(char *arg) +{ + int i; + + if (!arg) + return -EINVAL; + + for (i = 0; apic_probe[i]; i++) { + if (!strcmp(apic_probe[i]->name, arg)) { + apic = apic_probe[i]; + cmdline_apic = 1; + return 0; + } + } + + if (x86_quirks->update_apic) + x86_quirks->update_apic(); + + /* Parsed again by __setup for debug/verbose */ + return 0; +} +early_param("apic", parse_apic); + +void __init generic_bigsmp_probe(void) +{ +#ifdef CONFIG_X86_BIGSMP + /* + * This routine is used to switch to bigsmp mode when + * - There is no apic= option specified by the user + * - generic_apic_probe() has chosen apic_default as the sub_arch + * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support + */ + + if (!cmdline_apic && apic == &apic_default) { + if (apic_bigsmp.probe()) { + apic = &apic_bigsmp; + if (x86_quirks->update_apic) + x86_quirks->update_apic(); + printk(KERN_INFO "Overriding APIC driver with %s\n", + apic->name); + } + } +#endif +} + +void __init generic_apic_probe(void) +{ + if (!cmdline_apic) { + int i; + for (i = 0; apic_probe[i]; i++) { + if (apic_probe[i]->probe()) { + apic = apic_probe[i]; + break; + } + } + /* Not visible without early console */ + if (!apic_probe[i]) + panic("Didn't find an APIC driver"); + + if (x86_quirks->update_apic) + x86_quirks->update_apic(); + } + printk(KERN_INFO "Using APIC driver %s\n", apic->name); +} + +/* These functions can switch the APIC even after the initial ->probe() */ + +int __init +generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +{ + int i; + + for (i = 0; apic_probe[i]; ++i) { + if (!apic_probe[i]->mps_oem_check) + continue; + if (!apic_probe[i]->mps_oem_check(mpc, oem, productid)) + continue; + + if (!cmdline_apic) { + apic = apic_probe[i]; + if (x86_quirks->update_apic) + x86_quirks->update_apic(); + printk(KERN_INFO "Switched to APIC driver `%s'.\n", + apic->name); + } + return 1; + } + return 0; +} + +int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + int i; + + for (i = 0; apic_probe[i]; ++i) { + if (!apic_probe[i]->acpi_madt_oem_check) + continue; + if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) + continue; + + if (!cmdline_apic) { + apic = apic_probe[i]; + if (x86_quirks->update_apic) + x86_quirks->update_apic(); + printk(KERN_INFO "Switched to APIC driver `%s'.\n", + apic->name); + } + return 1; + } + return 0; +} + +#endif /* CONFIG_X86_LOCAL_APIC */ + +/** + * pre_intr_init_hook - initialisation prior to setting up interrupt vectors + * + * Description: + * Perform any necessary interrupt initialisation prior to setting up + * the "ordinary" interrupt call gates. For legacy reasons, the ISA + * interrupts should be initialised here if the machine emulates a PC + * in any way. + **/ +void __init pre_intr_init_hook(void) +{ + if (x86_quirks->arch_pre_intr_init) { + if (x86_quirks->arch_pre_intr_init()) + return; + } + init_ISA_irqs(); +} + +/** + * intr_init_hook - post gate setup interrupt initialisation + * + * Description: + * Fill in any interrupts that may have been left out by the general + * init_IRQ() routine. interrupts having to do with the machine rather + * than the devices on the I/O bus (like APIC interrupts in intel MP + * systems) are started here. + **/ +void __init intr_init_hook(void) +{ + if (x86_quirks->arch_intr_init) { + if (x86_quirks->arch_intr_init()) + return; + } +} + +/** + * pre_setup_arch_hook - hook called prior to any setup_arch() execution + * + * Description: + * generally used to activate any machine specific identification + * routines that may be needed before setup_arch() runs. On Voyager + * this is used to get the board revision and type. + **/ +void __init pre_setup_arch_hook(void) +{ +} + +/** + * trap_init_hook - initialise system specific traps + * + * Description: + * Called as the final act of trap_init(). Used in VISWS to initialise + * the various board specific APIC traps. + **/ +void __init trap_init_hook(void) +{ + if (x86_quirks->arch_trap_init) { + if (x86_quirks->arch_trap_init()) + return; + } +} + +static struct irqaction irq0 = { + .handler = timer_interrupt, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, + .mask = CPU_MASK_NONE, + .name = "timer" +}; + +/** + * pre_time_init_hook - do any specific initialisations before. + * + **/ +void __init pre_time_init_hook(void) +{ + if (x86_quirks->arch_pre_time_init) + x86_quirks->arch_pre_time_init(); +} + +/** + * time_init_hook - do any specific initialisations for the system timer. + * + * Description: + * Must plug the system timer interrupt source at HZ into the IRQ listed + * in irq_vectors.h:TIMER_IRQ + **/ +void __init time_init_hook(void) +{ + if (x86_quirks->arch_time_init) { + /* + * A nonzero return code does not mean failure, it means + * that the architecture quirk does not want any + * generic (timer) setup to be performed after this: + */ + if (x86_quirks->arch_time_init()) + return; + } + + irq0.mask = cpumask_of_cpu(0); + setup_irq(0, &irq0); +} + +#ifdef CONFIG_MCA +/** + * mca_nmi_hook - hook into MCA specific NMI chain + * + * Description: + * The MCA (Microchannel Architecture) has an NMI chain for NMI sources + * along the MCA bus. Use this to hook into that chain if you will need + * it. + **/ +void mca_nmi_hook(void) +{ + /* + * If I recall correctly, there's a whole bunch of other things that + * we can do to check for NMI problems, but that's all I know about + * at the moment. + */ + pr_warning("NMI generated from unknown source!\n"); +} +#endif + +static __init int no_ipi_broadcast(char *str) +{ + get_option(&str, &no_broadcast); + pr_info("Using %s mode\n", + no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); + return 1; +} +__setup("no_ipi_broadcast=", no_ipi_broadcast); + +static int __init print_ipi_mode(void) +{ + pr_info("Using IPI %s mode\n", + no_broadcast ? "No-Shortcut" : "Shortcut"); + return 0; +} + +late_initcall(print_ipi_mode); + diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c new file mode 100644 index 00000000000..70935dd904d --- /dev/null +++ b/arch/x86/kernel/apic/probe_64.c @@ -0,0 +1,89 @@ +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC sub-arch probe layer. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +extern struct apic apic_flat; +extern struct apic apic_physflat; +extern struct apic apic_x2xpic_uv_x; +extern struct apic apic_x2apic_phys; +extern struct apic apic_x2apic_cluster; + +struct apic __read_mostly *apic = &apic_flat; +EXPORT_SYMBOL_GPL(apic); + +static struct apic *apic_probe[] __initdata = { +#ifdef CONFIG_X86_UV + &apic_x2apic_uv_x, +#endif +#ifdef CONFIG_X86_X2APIC + &apic_x2apic_phys, + &apic_x2apic_cluster, +#endif + &apic_physflat, + NULL, +}; + +/* + * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. + */ +void __init default_setup_apic_routing(void) +{ +#ifdef CONFIG_X86_X2APIC + if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { + if (!intr_remapping_enabled) + apic = &apic_flat; + } +#endif + + if (apic == &apic_flat) { + if (max_physical_apicid >= 8) + apic = &apic_physflat; + printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); + } + + if (x86_quirks->update_apic) + x86_quirks->update_apic(); +} + +/* Same for both flat and physical. */ + +void apic_send_IPI_self(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); +} + +int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + int i; + + for (i = 0; apic_probe[i]; ++i) { + if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { + apic = apic_probe[i]; + printk(KERN_INFO "Setting APIC routing to %s.\n", + apic->name); + return 1; + } + } + return 0; +} diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c new file mode 100644 index 00000000000..cfe7b09015d --- /dev/null +++ b/arch/x86/kernel/apic/summit_32.c @@ -0,0 +1,601 @@ +/* + * IBM Summit-Specific Code + * + * Written By: Matthew Dobson, IBM Corporation + * + * Copyright (c) 2003 IBM Corp. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to + * + */ + +#include +#include +#include +#include + +/* + * APIC driver for the IBM "Summit" chipset. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline unsigned summit_get_apic_id(unsigned long x) +{ + return (x >> 24) & 0xFF; +} + +static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) +{ + default_send_IPI_mask_sequence_logical(mask, vector); +} + +static inline void summit_send_IPI_allbutself(int vector) +{ + cpumask_t mask = cpu_online_map; + cpu_clear(smp_processor_id(), mask); + + if (!cpus_empty(mask)) + summit_send_IPI_mask(&mask, vector); +} + +static inline void summit_send_IPI_all(int vector) +{ + summit_send_IPI_mask(&cpu_online_map, vector); +} + +#include + +extern int use_cyclone; + +#ifdef CONFIG_X86_SUMMIT_NUMA +extern void setup_summit(void); +#else +#define setup_summit() {} +#endif + +static inline int +summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +{ + if (!strncmp(oem, "IBM ENSW", 8) && + (!strncmp(productid, "VIGIL SMP", 9) + || !strncmp(productid, "EXA", 3) + || !strncmp(productid, "RUTHLESS SMP", 12))){ + mark_tsc_unstable("Summit based system"); + use_cyclone = 1; /*enable cyclone-timer*/ + setup_summit(); + return 1; + } + return 0; +} + +/* Hook from generic ACPI tables.c */ +static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + if (!strncmp(oem_id, "IBM", 3) && + (!strncmp(oem_table_id, "SERVIGIL", 8) + || !strncmp(oem_table_id, "EXA", 3))){ + mark_tsc_unstable("Summit based system"); + use_cyclone = 1; /*enable cyclone-timer*/ + setup_summit(); + return 1; + } + return 0; +} + +struct rio_table_hdr { + unsigned char version; /* Version number of this data structure */ + /* Version 3 adds chassis_num & WP_index */ + unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */ + unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */ +} __attribute__((packed)); + +struct scal_detail { + unsigned char node_id; /* Scalability Node ID */ + unsigned long CBAR; /* Address of 1MB register space */ + unsigned char port0node; /* Node ID port connected to: 0xFF=None */ + unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char port1node; /* Node ID port connected to: 0xFF = None */ + unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char port2node; /* Node ID port connected to: 0xFF = None */ + unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */ +} __attribute__((packed)); + +struct rio_detail { + unsigned char node_id; /* RIO Node ID */ + unsigned long BBAR; /* Address of 1MB register space */ + unsigned char type; /* Type of device */ + unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/ + /* For CYC: Node ID of Twister that owns this CYC */ + unsigned char port0node; /* Node ID port connected to: 0xFF=None */ + unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char port1node; /* Node ID port connected to: 0xFF=None */ + unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ + unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */ + /* For CYC: 0 */ + unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */ + /* = 0 : the XAPIC is not used, ie:*/ + /* ints fwded to another XAPIC */ + /* Bits1:7 Reserved */ + /* For CYC: Bits0:7 Reserved */ + unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */ + /* lower slot numbers/PCI bus numbers */ + /* For CYC: No meaning */ + unsigned char chassis_num; /* 1 based Chassis number */ + /* For LookOut WPEGs this field indicates the */ + /* Expansion Chassis #, enumerated from Boot */ + /* Node WPEG external port, then Boot Node CYC */ + /* external port, then Next Vigil chassis WPEG */ + /* external port, etc. */ + /* Shared Lookouts have only 1 chassis number (the */ + /* first one assigned) */ +} __attribute__((packed)); + + +typedef enum { + CompatTwister = 0, /* Compatibility Twister */ + AltTwister = 1, /* Alternate Twister of internal 8-way */ + CompatCyclone = 2, /* Compatibility Cyclone */ + AltCyclone = 3, /* Alternate Cyclone of internal 8-way */ + CompatWPEG = 4, /* Compatibility WPEG */ + AltWPEG = 5, /* Second Planar WPEG */ + LookOutAWPEG = 6, /* LookOut WPEG */ + LookOutBWPEG = 7, /* LookOut WPEG */ +} node_type; + +static inline int is_WPEG(struct rio_detail *rio){ + return (rio->type == CompatWPEG || rio->type == AltWPEG || + rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); +} + + +/* In clustered mode, the high nibble of APIC ID is a cluster number. + * The low nibble is a 4-bit bitmap. */ +#define XAPIC_DEST_CPUS_SHIFT 4 +#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) +#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) + +#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) + +static inline const cpumask_t *summit_target_cpus(void) +{ + /* CPU_MASK_ALL (0xff) has undefined behaviour with + * dest_LowestPrio mode logical clustered apic interrupt routing + * Just start on cpu 0. IRQ balancing will spread load + */ + return &cpumask_of_cpu(0); +} + +static inline unsigned long +summit_check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return 0; +} + +/* we don't use the phys_cpu_present_map to indicate apicid presence */ +static inline unsigned long summit_check_apicid_present(int bit) +{ + return 1; +} + +static inline void summit_init_apic_ldr(void) +{ + unsigned long val, id; + int count = 0; + u8 my_id = (u8)hard_smp_processor_id(); + u8 my_cluster = APIC_CLUSTER(my_id); +#ifdef CONFIG_SMP + u8 lid; + int i; + + /* Create logical APIC IDs by counting CPUs already in cluster. */ + for (count = 0, i = nr_cpu_ids; --i >= 0; ) { + lid = cpu_2_logical_apicid[i]; + if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) + ++count; + } +#endif + /* We only have a 4 wide bitmap in cluster mode. If a deranged + * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ + BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); + id = my_cluster | (1UL << count); + apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_APIC_LOGICAL_ID(id); + apic_write(APIC_LDR, val); +} + +static inline int summit_apic_id_registered(void) +{ + return 1; +} + +static inline void summit_setup_apic_routing(void) +{ + printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", + nr_ioapics); +} + +static inline int summit_apicid_to_node(int logical_apicid) +{ +#ifdef CONFIG_SMP + return apicid_2_node[hard_smp_processor_id()]; +#else + return 0; +#endif +} + +/* Mapping from cpu number to logical apicid */ +static inline int summit_cpu_to_logical_apicid(int cpu) +{ +#ifdef CONFIG_SMP + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return cpu_2_logical_apicid[cpu]; +#else + return logical_smp_processor_id(); +#endif +} + +static inline int summit_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static inline physid_mask_t +summit_ioapic_phys_id_map(physid_mask_t phys_id_map) +{ + /* For clustered we don't have a good way to do this yet - hack */ + return physids_promote(0x0F); +} + +static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) +{ + return physid_mask_of_physid(0); +} + +static inline void summit_setup_portio_remap(void) +{ +} + +static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return 1; +} + +static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) +{ + int cpus_found = 0; + int num_bits_set; + int apicid; + int cpu; + + num_bits_set = cpus_weight(*cpumask); + /* Return id to all */ + if (num_bits_set >= nr_cpu_ids) + return 0xFF; + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of target_cpus(): + */ + cpu = first_cpu(*cpumask); + apicid = summit_cpu_to_logical_apicid(cpu); + + while (cpus_found < num_bits_set) { + if (cpu_isset(cpu, *cpumask)) { + int new_apicid = summit_cpu_to_logical_apicid(cpu); + + if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { + printk ("%s: Not a valid mask!\n", __func__); + + return 0xFF; + } + apicid = apicid | new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static inline unsigned int +summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, + const struct cpumask *andmask) +{ + int apicid = summit_cpu_to_logical_apicid(0); + cpumask_var_t cpumask; + + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) + return apicid; + + cpumask_and(cpumask, inmask, andmask); + cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = summit_cpu_mask_to_apicid(cpumask); + + free_cpumask_var(cpumask); + + return apicid; +} + +/* + * cpuid returns the value latched in the HW at reset, not the APIC ID + * register's value. For any box whose BIOS changes APIC IDs, like + * clustered APIC systems, we must use hard_smp_processor_id. + * + * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. + */ +static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return hard_smp_processor_id() >> index_msb; +} + +static int probe_summit(void) +{ + /* probed later in mptable/ACPI hooks */ + return 0; +} + +static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; +} + +#ifdef CONFIG_X86_SUMMIT_NUMA +static struct rio_table_hdr *rio_table_hdr __initdata; +static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; +static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; + +#ifndef CONFIG_X86_NUMAQ +static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; +#endif + +static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) +{ + int twister = 0, node = 0; + int i, bus, num_buses; + + for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { + if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) { + twister = rio_devs[i]->owner_id; + break; + } + } + if (i == rio_table_hdr->num_rio_dev) { + printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__); + return last_bus; + } + + for (i = 0; i < rio_table_hdr->num_scal_dev; i++) { + if (scal_devs[i]->node_id == twister) { + node = scal_devs[i]->node_id; + break; + } + } + if (i == rio_table_hdr->num_scal_dev) { + printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__); + return last_bus; + } + + switch (rio_devs[wpeg_num]->type) { + case CompatWPEG: + /* + * The Compatibility Winnipeg controls the 2 legacy buses, + * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case + * a PCI-PCI bridge card is used in either slot: total 5 buses. + */ + num_buses = 5; + break; + case AltWPEG: + /* + * The Alternate Winnipeg controls the 2 133MHz buses [1 slot + * each], their 2 "extra" buses, the 100MHz bus [2 slots] and + * the "extra" buses for each of those slots: total 7 buses. + */ + num_buses = 7; + break; + case LookOutAWPEG: + case LookOutBWPEG: + /* + * A Lookout Winnipeg controls 3 100MHz buses [2 slots each] + * & the "extra" buses for each of those slots: total 9 buses. + */ + num_buses = 9; + break; + default: + printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__); + return last_bus; + } + + for (bus = last_bus; bus < last_bus + num_buses; bus++) + mp_bus_id_to_node[bus] = node; + return bus; +} + +static int __init build_detail_arrays(void) +{ + unsigned long ptr; + int i, scal_detail_size, rio_detail_size; + + if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) { + printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); + return 0; + } + + switch (rio_table_hdr->version) { + default: + printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version); + return 0; + case 2: + scal_detail_size = 11; + rio_detail_size = 13; + break; + case 3: + scal_detail_size = 12; + rio_detail_size = 15; + break; + } + + ptr = (unsigned long)rio_table_hdr + 3; + for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size) + scal_devs[i] = (struct scal_detail *)ptr; + + for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size) + rio_devs[i] = (struct rio_detail *)ptr; + + return 1; +} + +void __init setup_summit(void) +{ + unsigned long ptr; + unsigned short offset; + int i, next_wpeg, next_bus = 0; + + /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */ + ptr = get_bios_ebda(); + ptr = (unsigned long)phys_to_virt(ptr); + + rio_table_hdr = NULL; + offset = 0x180; + while (offset) { + /* The block id is stored in the 2nd word */ + if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) { + /* set the pointer past the offset & block id */ + rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4); + break; + } + /* The next offset is stored in the 1st word. 0 means no more */ + offset = *((unsigned short *)(ptr + offset)); + } + if (!rio_table_hdr) { + printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__); + return; + } + + if (!build_detail_arrays()) + return; + + /* The first Winnipeg we're looking for has an index of 0 */ + next_wpeg = 0; + do { + for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { + if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) { + /* It's the Winnipeg we're looking for! */ + next_bus = setup_pci_node_map_for_wpeg(i, next_bus); + next_wpeg++; + break; + } + } + /* + * If we go through all Rio devices and don't find one with + * the next index, it means we've found all the Winnipegs, + * and thus all the PCI buses. + */ + if (i == rio_table_hdr->num_rio_dev) + next_wpeg = 0; + } while (next_wpeg != 0); +} +#endif + +struct apic apic_summit = { + + .name = "summit", + .probe = probe_summit, + .acpi_madt_oem_check = summit_acpi_madt_oem_check, + .apic_id_registered = summit_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + /* logical delivery broadcast to all CPUs: */ + .irq_dest_mode = 1, + + .target_cpus = summit_target_cpus, + .disable_esr = 1, + .dest_logical = APIC_DEST_LOGICAL, + .check_apicid_used = summit_check_apicid_used, + .check_apicid_present = summit_check_apicid_present, + + .vector_allocation_domain = summit_vector_allocation_domain, + .init_apic_ldr = summit_init_apic_ldr, + + .ioapic_phys_id_map = summit_ioapic_phys_id_map, + .setup_apic_routing = summit_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = summit_apicid_to_node, + .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, + .cpu_present_to_apicid = summit_cpu_present_to_apicid, + .apicid_to_cpu_present = summit_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = summit_check_phys_apicid_present, + .enable_apic_mode = NULL, + .phys_pkg_id = summit_phys_pkg_id, + .mps_oem_check = summit_mps_oem_check, + + .get_apic_id = summit_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0xFF << 24, + + .cpu_mask_to_apicid = summit_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, + + .send_IPI_mask = summit_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = summit_send_IPI_allbutself, + .send_IPI_all = summit_send_IPI_all, + .send_IPI_self = default_send_IPI_self, + + .wakeup_cpu = NULL, + .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, + .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, + + .wait_for_init_deassert = default_wait_for_init_deassert, + + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c deleted file mode 100644 index 0b1093394fd..00000000000 --- a/arch/x86/kernel/bigsmp_32.c +++ /dev/null @@ -1,274 +0,0 @@ -/* - * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs. - * - * Drives the local APIC in "clustered mode". - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -static inline unsigned bigsmp_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0xFF; -} - -static inline int bigsmp_apic_id_registered(void) -{ - return 1; -} - -static inline const cpumask_t *bigsmp_target_cpus(void) -{ -#ifdef CONFIG_SMP - return &cpu_online_map; -#else - return &cpumask_of_cpu(0); -#endif -} - -static inline unsigned long -bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} - -static inline unsigned long bigsmp_check_apicid_present(int bit) -{ - return 1; -} - -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long val, id; - - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - id = per_cpu(x86_bios_cpu_apicid, cpu); - val |= SET_APIC_LOGICAL_ID(id); - - return val; -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void bigsmp_init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_FLAT); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); -} - -static inline void bigsmp_setup_apic_routing(void) -{ - printk(KERN_INFO - "Enabling APIC mode: Physflat. Using %d I/O APICs\n", - nr_ioapics); -} - -static inline int bigsmp_apicid_to_node(int logical_apicid) -{ - return apicid_2_node[hard_smp_processor_id()]; -} - -static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < nr_cpu_ids) - return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); - - return BAD_APICID; -} - -static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) -{ - return physid_mask_of_physid(phys_apicid); -} - -/* Mapping from cpu number to logical apicid */ -static inline int bigsmp_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_physical_id(cpu); -} - -static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xFFL); -} - -static inline void bigsmp_setup_portio_remap(void) -{ -} - -static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -/* As we are using single CPU as destination, pick only one CPU here */ -static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); -} - -static inline unsigned int -bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - int cpu; - - /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. - * May as well be the first. - */ - for_each_cpu_and(cpu, cpumask, andmask) { - if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; - } - if (cpu < nr_cpu_ids) - return bigsmp_cpu_to_logical_apicid(cpu); - - return BAD_APICID; -} - -static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_sequence_phys(mask, vector); -} - -static inline void bigsmp_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); -} - -static inline void bigsmp_send_IPI_all(int vector) -{ - bigsmp_send_IPI_mask(cpu_online_mask, vector); -} - -static int dmi_bigsmp; /* can be set by dmi scanners */ - -static int hp_ht_bigsmp(const struct dmi_system_id *d) -{ - printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); - dmi_bigsmp = 1; - - return 0; -} - - -static const struct dmi_system_id bigsmp_dmi_table[] = { - { hp_ht_bigsmp, "HP ProLiant DL760 G2", - { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P44-"), - } - }, - - { hp_ht_bigsmp, "HP ProLiant DL740", - { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P47-"), - } - }, - { } /* NULL entry stops DMI scanning */ -}; - -static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - cpus_clear(*retmask); - cpu_set(cpu, *retmask); -} - -static int probe_bigsmp(void) -{ - if (def_to_bigsmp) - dmi_bigsmp = 1; - else - dmi_check_system(bigsmp_dmi_table); - - return dmi_bigsmp; -} - -struct apic apic_bigsmp = { - - .name = "bigsmp", - .probe = probe_bigsmp, - .acpi_madt_oem_check = NULL, - .apic_id_registered = bigsmp_apic_id_registered, - - .irq_delivery_mode = dest_Fixed, - /* phys delivery to target CPU: */ - .irq_dest_mode = 0, - - .target_cpus = bigsmp_target_cpus, - .disable_esr = 1, - .dest_logical = 0, - .check_apicid_used = bigsmp_check_apicid_used, - .check_apicid_present = bigsmp_check_apicid_present, - - .vector_allocation_domain = bigsmp_vector_allocation_domain, - .init_apic_ldr = bigsmp_init_apic_ldr, - - .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, - .setup_apic_routing = bigsmp_setup_apic_routing, - .multi_timer_check = NULL, - .apicid_to_node = bigsmp_apicid_to_node, - .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, - .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, - .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, - .setup_portio_remap = NULL, - .check_phys_apicid_present = bigsmp_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = bigsmp_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = bigsmp_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0xFF << 24, - - .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, - - .send_IPI_mask = bigsmp_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = bigsmp_send_IPI_allbutself, - .send_IPI_all = bigsmp_send_IPI_all, - .send_IPI_self = default_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - - .wait_for_init_deassert = default_wait_for_init_deassert, - - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = default_inquire_remote_apic, - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = native_apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, -}; diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c deleted file mode 100644 index 320f2d2e4e5..00000000000 --- a/arch/x86/kernel/es7000_32.c +++ /dev/null @@ -1,757 +0,0 @@ -/* - * Written by: Garry Forsgren, Unisys Corporation - * Natalie Protasevich, Unisys Corporation - * - * This file contains the code to configure and interface - * with Unisys ES7000 series hardware system manager. - * - * Copyright (c) 2003 Unisys Corporation. - * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar - * - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * Contact information: Unisys Corporation, Township Line & Union Meeting - * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or: - * - * http://www.unisys.com - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -/* - * ES7000 chipsets - */ - -#define NON_UNISYS 0 -#define ES7000_CLASSIC 1 -#define ES7000_ZORRO 2 - -#define MIP_REG 1 -#define MIP_PSAI_REG 4 - -#define MIP_BUSY 1 -#define MIP_SPIN 0xf0000 -#define MIP_VALID 0x0100000000000000ULL -#define MIP_SW_APIC 0x1020b - -#define MIP_PORT(val) ((val >> 32) & 0xffff) - -#define MIP_RD_LO(val) (val & 0xffffffff) - -struct mip_reg { - unsigned long long off_0x00; - unsigned long long off_0x08; - unsigned long long off_0x10; - unsigned long long off_0x18; - unsigned long long off_0x20; - unsigned long long off_0x28; - unsigned long long off_0x30; - unsigned long long off_0x38; -}; - -struct mip_reg_info { - unsigned long long mip_info; - unsigned long long delivery_info; - unsigned long long host_reg; - unsigned long long mip_reg; -}; - -struct psai { - unsigned long long entry_type; - unsigned long long addr; - unsigned long long bep_addr; -}; - -#ifdef CONFIG_ACPI - -struct es7000_oem_table { - struct acpi_table_header Header; - u32 OEMTableAddr; - u32 OEMTableSize; -}; - -static unsigned long oem_addrX; -static unsigned long oem_size; - -#endif - -/* - * ES7000 Globals - */ - -static volatile unsigned long *psai; -static struct mip_reg *mip_reg; -static struct mip_reg *host_reg; -static int mip_port; -static unsigned long mip_addr; -static unsigned long host_addr; - -int es7000_plat; - -/* - * GSI override for ES7000 platforms. - */ - -static unsigned int base; - -static int -es7000_rename_gsi(int ioapic, int gsi) -{ - if (es7000_plat == ES7000_ZORRO) - return gsi; - - if (!base) { - int i; - for (i = 0; i < nr_ioapics; i++) - base += nr_ioapic_registers[i]; - } - - if (!ioapic && (gsi < 16)) - gsi += base; - - return gsi; -} - -static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) -{ - unsigned long vect = 0, psaival = 0; - - if (psai == NULL) - return -1; - - vect = ((unsigned long)__pa(eip)/0x1000) << 16; - psaival = (0x1000000 | vect | cpu); - - while (*psai & 0x1000000) - ; - - *psai = psaival; - - return 0; -} - -static int __init es7000_update_apic(void) -{ - apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; - - /* MPENTIUMIII */ - if (boot_cpu_data.x86 == 6 && - (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { - es7000_update_apic_to_cluster(); - apic->wait_for_init_deassert = NULL; - apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; - } - - return 0; -} - -static void __init setup_unisys(void) -{ - /* - * Determine the generation of the ES7000 currently running. - * - * es7000_plat = 1 if the machine is a 5xx ES7000 box - * es7000_plat = 2 if the machine is a x86_64 ES7000 box - * - */ - if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2)) - es7000_plat = ES7000_ZORRO; - else - es7000_plat = ES7000_CLASSIC; - ioapic_renumber_irq = es7000_rename_gsi; - - x86_quirks->update_apic = es7000_update_apic; -} - -/* - * Parse the OEM Table: - */ -static int __init parse_unisys_oem(char *oemptr) -{ - int i; - int success = 0; - unsigned char type, size; - unsigned long val; - char *tp = NULL; - struct psai *psaip = NULL; - struct mip_reg_info *mi; - struct mip_reg *host, *mip; - - tp = oemptr; - - tp += 8; - - for (i = 0; i <= 6; i++) { - type = *tp++; - size = *tp++; - tp -= 2; - switch (type) { - case MIP_REG: - mi = (struct mip_reg_info *)tp; - val = MIP_RD_LO(mi->host_reg); - host_addr = val; - host = (struct mip_reg *)val; - host_reg = __va(host); - val = MIP_RD_LO(mi->mip_reg); - mip_port = MIP_PORT(mi->mip_info); - mip_addr = val; - mip = (struct mip_reg *)val; - mip_reg = __va(mip); - pr_debug("es7000_mipcfg: host_reg = 0x%lx \n", - (unsigned long)host_reg); - pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n", - (unsigned long)mip_reg); - success++; - break; - case MIP_PSAI_REG: - psaip = (struct psai *)tp; - if (tp != NULL) { - if (psaip->addr) - psai = __va(psaip->addr); - else - psai = NULL; - success++; - } - break; - default: - break; - } - tp += size; - } - - if (success < 2) - es7000_plat = NON_UNISYS; - else - setup_unisys(); - - return es7000_plat; -} - -#ifdef CONFIG_ACPI -static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) -{ - struct acpi_table_header *header = NULL; - struct es7000_oem_table *table; - acpi_size tbl_size; - acpi_status ret; - int i = 0; - - for (;;) { - ret = acpi_get_table_with_size("OEM1", i++, &header, &tbl_size); - if (!ACPI_SUCCESS(ret)) - return -1; - - if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) - break; - - early_acpi_os_unmap_memory(header, tbl_size); - } - - table = (void *)header; - - oem_addrX = table->OEMTableAddr; - oem_size = table->OEMTableSize; - - early_acpi_os_unmap_memory(header, tbl_size); - - *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size); - - return 0; -} - -static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) -{ - if (!oem_addr) - return; - - __acpi_unmap_table((char *)oem_addr, oem_size); -} - -static int es7000_check_dsdt(void) -{ - struct acpi_table_header header; - - if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && - !strncmp(header.oem_id, "UNISYS", 6)) - return 1; - return 0; -} - -/* Hook from generic ACPI tables.c */ -static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - unsigned long oem_addr = 0; - int check_dsdt; - int ret = 0; - - /* check dsdt at first to avoid clear fix_map for oem_addr */ - check_dsdt = es7000_check_dsdt(); - - if (!find_unisys_acpi_oem_table(&oem_addr)) { - if (check_dsdt) { - ret = parse_unisys_oem((char *)oem_addr); - } else { - setup_unisys(); - ret = 1; - } - /* - * we need to unmap it - */ - unmap_unisys_acpi_oem_table(oem_addr); - } - return ret; -} -#else /* !CONFIG_ACPI: */ -static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 0; -} -#endif /* !CONFIG_ACPI */ - -static void es7000_spin(int n) -{ - int i = 0; - - while (i++ < n) - rep_nop(); -} - -static int __init -es7000_mip_write(struct mip_reg *mip_reg) -{ - int status = 0; - int spin; - - spin = MIP_SPIN; - while ((host_reg->off_0x38 & MIP_VALID) != 0) { - if (--spin <= 0) { - WARN(1, "Timeout waiting for Host Valid Flag\n"); - return -1; - } - es7000_spin(MIP_SPIN); - } - - memcpy(host_reg, mip_reg, sizeof(struct mip_reg)); - outb(1, mip_port); - - spin = MIP_SPIN; - - while ((mip_reg->off_0x38 & MIP_VALID) == 0) { - if (--spin <= 0) { - WARN(1, "Timeout waiting for MIP Valid Flag\n"); - return -1; - } - es7000_spin(MIP_SPIN); - } - - status = (mip_reg->off_0x00 & 0xffff0000000000ULL) >> 48; - mip_reg->off_0x38 &= ~MIP_VALID; - - return status; -} - -static void __init es7000_enable_apic_mode(void) -{ - struct mip_reg es7000_mip_reg; - int mip_status; - - if (!es7000_plat) - return; - - printk(KERN_INFO "ES7000: Enabling APIC mode.\n"); - memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); - es7000_mip_reg.off_0x00 = MIP_SW_APIC; - es7000_mip_reg.off_0x38 = MIP_VALID; - - while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) - WARN(1, "Command failed, status = %x\n", mip_status); -} - -static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; -} - - -static void es7000_wait_for_init_deassert(atomic_t *deassert) -{ -#ifndef CONFIG_ES7000_CLUSTERED_APIC - while (!atomic_read(deassert)) - cpu_relax(); -#endif - return; -} - -static unsigned int es7000_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0xFF; -} - -static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_sequence_phys(mask, vector); -} - -static void es7000_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); -} - -static void es7000_send_IPI_all(int vector) -{ - es7000_send_IPI_mask(cpu_online_mask, vector); -} - -static int es7000_apic_id_registered(void) -{ - return 1; -} - -static const cpumask_t *target_cpus_cluster(void) -{ - return &CPU_MASK_ALL; -} - -static const cpumask_t *es7000_target_cpus(void) -{ - return &cpumask_of_cpu(smp_processor_id()); -} - -static unsigned long -es7000_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} -static unsigned long es7000_check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -static unsigned long calculate_ldr(int cpu) -{ - unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); - - return SET_APIC_LOGICAL_ID(id); -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LdR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static void es7000_init_apic_ldr_cluster(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_CLUSTER); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); -} - -static void es7000_init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_FLAT); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); -} - -static void es7000_setup_apic_routing(void) -{ - int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); - - printk(KERN_INFO - "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", - (apic_version[apic] == 0x14) ? - "Physical Cluster" : "Logical Cluster", - nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); -} - -static int es7000_apicid_to_node(int logical_apicid) -{ - return 0; -} - - -static int es7000_cpu_present_to_apicid(int mps_cpu) -{ - if (!mps_cpu) - return boot_cpu_physical_apicid; - else if (mps_cpu < nr_cpu_ids) - return per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - -static int cpu_id; - -static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) -{ - physid_mask_t mask; - - mask = physid_mask_of_physid(cpu_id); - ++cpu_id; - - return mask; -} - -/* Mapping from cpu number to logical apicid */ -static int es7000_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - -static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xff); -} - -static int es7000_check_phys_apicid_present(int cpu_physical_apicid) -{ - boot_cpu_physical_apicid = read_apic_id(); - return 1; -} - -static unsigned int -es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpumask_weight(cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - return 0xFF; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = cpumask_first(cpumask); - apicid = es7000_cpu_to_logical_apicid(cpu); - - while (cpus_found < num_bits_set) { - if (cpumask_test_cpu(cpu, cpumask)) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); - - if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - WARN(1, "Not a valid mask!"); - - return 0xFF; - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpus_weight(*cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - return es7000_cpu_to_logical_apicid(0); - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = first_cpu(*cpumask); - apicid = es7000_cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask)) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); - - if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk("%s: Not a valid mask!\n", __func__); - - return es7000_cpu_to_logical_apicid(0); - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static unsigned int -es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask) -{ - int apicid = es7000_cpu_to_logical_apicid(0); - cpumask_var_t cpumask; - - if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) - return apicid; - - cpumask_and(cpumask, inmask, andmask); - cpumask_and(cpumask, cpumask, cpu_online_mask); - apicid = es7000_cpu_mask_to_apicid(cpumask); - - free_cpumask_var(cpumask); - - return apicid; -} - -static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -void __init es7000_update_apic_to_cluster(void) -{ - apic->target_cpus = target_cpus_cluster; - apic->irq_delivery_mode = dest_LowestPrio; - /* logical delivery broadcast to all procs: */ - apic->irq_dest_mode = 1; - - apic->init_apic_ldr = es7000_init_apic_ldr_cluster; - - apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster; -} - -static int probe_es7000(void) -{ - /* probed later in mptable/ACPI hooks */ - return 0; -} - -static __init int -es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - if (mpc->oemptr) { - struct mpc_oemtable *oem_table = - (struct mpc_oemtable *)mpc->oemptr; - - if (!strncmp(oem, "UNISYS", 6)) - return parse_unisys_oem((char *)oem_table); - } - return 0; -} - - -struct apic apic_es7000 = { - - .name = "es7000", - .probe = probe_es7000, - .acpi_madt_oem_check = es7000_acpi_madt_oem_check, - .apic_id_registered = es7000_apic_id_registered, - - .irq_delivery_mode = dest_Fixed, - /* phys delivery to target CPUs: */ - .irq_dest_mode = 0, - - .target_cpus = es7000_target_cpus, - .disable_esr = 1, - .dest_logical = 0, - .check_apicid_used = es7000_check_apicid_used, - .check_apicid_present = es7000_check_apicid_present, - - .vector_allocation_domain = es7000_vector_allocation_domain, - .init_apic_ldr = es7000_init_apic_ldr, - - .ioapic_phys_id_map = es7000_ioapic_phys_id_map, - .setup_apic_routing = es7000_setup_apic_routing, - .multi_timer_check = NULL, - .apicid_to_node = es7000_apicid_to_node, - .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, - .cpu_present_to_apicid = es7000_cpu_present_to_apicid, - .apicid_to_cpu_present = es7000_apicid_to_cpu_present, - .setup_portio_remap = NULL, - .check_phys_apicid_present = es7000_check_phys_apicid_present, - .enable_apic_mode = es7000_enable_apic_mode, - .phys_pkg_id = es7000_phys_pkg_id, - .mps_oem_check = es7000_mps_oem_check, - - .get_apic_id = es7000_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0xFF << 24, - - .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, - - .send_IPI_mask = es7000_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = es7000_send_IPI_allbutself, - .send_IPI_all = es7000_send_IPI_all, - .send_IPI_self = default_send_IPI_self, - - .wakeup_cpu = NULL, - - .trampoline_phys_low = 0x467, - .trampoline_phys_high = 0x469, - - .wait_for_init_deassert = es7000_wait_for_init_deassert, - - /* Nothing to do for most platforms, since cleared by the INIT cycle: */ - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = default_inquire_remote_apic, - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = native_apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, -}; diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c deleted file mode 100644 index d9d6d61eed8..00000000000 --- a/arch/x86/kernel/numaq_32.c +++ /dev/null @@ -1,565 +0,0 @@ -/* - * Written by: Patricia Gaughen, IBM Corporation - * - * Copyright (C) 2002, IBM Corp. - * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) - -int found_numaq; - -/* - * Have to match translation table entries to main table entries by counter - * hence the mpc_record variable .... can't see a less disgusting way of - * doing this .... - */ -struct mpc_trans { - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; -}; - -/* x86_quirks member */ -static int mpc_record; - -static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; - -int mp_bus_id_to_node[MAX_MP_BUSSES]; -int mp_bus_id_to_local[MAX_MP_BUSSES]; -int quad_local_to_mp_bus_id[NR_CPUS/4][4]; - - -static inline void numaq_register_node(int node, struct sys_cfg_data *scd) -{ - struct eachquadmem *eq = scd->eq + node; - - node_set_online(node); - - /* Convert to pages */ - node_start_pfn[node] = - MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size); - - node_end_pfn[node] = - MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); - - e820_register_active_regions(node, node_start_pfn[node], - node_end_pfn[node]); - - memory_present(node, node_start_pfn[node], node_end_pfn[node]); - - node_remap_size[node] = node_memmap_size_bytes(node, - node_start_pfn[node], - node_end_pfn[node]); -} - -/* - * Function: smp_dump_qct() - * - * Description: gets memory layout from the quad config table. This - * function also updates node_online_map with the nodes (quads) present. - */ -static void __init smp_dump_qct(void) -{ - struct sys_cfg_data *scd; - int node; - - scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR); - - nodes_clear(node_online_map); - for_each_node(node) { - if (scd->quads_present31_0 & (1 << node)) - numaq_register_node(node, scd); - } -} - -void __cpuinit numaq_tsc_disable(void) -{ - if (!found_numaq) - return; - - if (num_online_nodes() > 1) { - printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); - setup_clear_cpu_cap(X86_FEATURE_TSC); - } -} - -static int __init numaq_pre_time_init(void) -{ - numaq_tsc_disable(); - return 0; -} - -static inline int generate_logical_apicid(int quad, int phys_apicid) -{ - return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1); -} - -/* x86_quirks member */ -static int mpc_apic_id(struct mpc_cpu *m) -{ - int quad = translation_table[mpc_record]->trans_quad; - int logical_apicid = generate_logical_apicid(quad, m->apicid); - - printk(KERN_DEBUG - "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", - m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8, - (m->cpufeature & CPU_MODEL_MASK) >> 4, - m->apicver, quad, logical_apicid); - - return logical_apicid; -} - -/* x86_quirks member */ -static void mpc_oem_bus_info(struct mpc_bus *m, char *name) -{ - int quad = translation_table[mpc_record]->trans_quad; - int local = translation_table[mpc_record]->trans_local; - - mp_bus_id_to_node[m->busid] = quad; - mp_bus_id_to_local[m->busid] = local; - - printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad); -} - -/* x86_quirks member */ -static void mpc_oem_pci_bus(struct mpc_bus *m) -{ - int quad = translation_table[mpc_record]->trans_quad; - int local = translation_table[mpc_record]->trans_local; - - quad_local_to_mp_bus_id[quad][local] = m->busid; -} - -static void __init MP_translation_info(struct mpc_trans *m) -{ - printk(KERN_INFO - "Translation: record %d, type %d, quad %d, global %d, local %d\n", - mpc_record, m->trans_type, m->trans_quad, m->trans_global, - m->trans_local); - - if (mpc_record >= MAX_MPC_ENTRY) - printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); - else - translation_table[mpc_record] = m; /* stash this for later */ - - if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) - node_set_online(m->trans_quad); -} - -static int __init mpf_checksum(unsigned char *mp, int len) -{ - int sum = 0; - - while (len--) - sum += *mp++; - - return sum & 0xFF; -} - -/* - * Read/parse the MPC oem tables - */ -static void __init - smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize) -{ - int count = sizeof(*oemtable); /* the header size */ - unsigned char *oemptr = ((unsigned char *)oemtable) + count; - - mpc_record = 0; - printk(KERN_INFO - "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); - - if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) { - printk(KERN_WARNING - "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", - oemtable->signature[0], oemtable->signature[1], - oemtable->signature[2], oemtable->signature[3]); - return; - } - - if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) { - printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); - return; - } - - while (count < oemtable->length) { - switch (*oemptr) { - case MP_TRANSLATION: - { - struct mpc_trans *m = (void *)oemptr; - - MP_translation_info(m); - oemptr += sizeof(*m); - count += sizeof(*m); - ++mpc_record; - break; - } - default: - printk(KERN_WARNING - "Unrecognised OEM table entry type! - %d\n", - (int)*oemptr); - return; - } - } -} - -static int __init numaq_setup_ioapic_ids(void) -{ - /* so can skip it */ - return 1; -} - -static int __init numaq_update_apic(void) -{ - apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; - - return 0; -} - -static struct x86_quirks numaq_x86_quirks __initdata = { - .arch_pre_time_init = numaq_pre_time_init, - .arch_time_init = NULL, - .arch_pre_intr_init = NULL, - .arch_memory_setup = NULL, - .arch_intr_init = NULL, - .arch_trap_init = NULL, - .mach_get_smp_config = NULL, - .mach_find_smp_config = NULL, - .mpc_record = &mpc_record, - .mpc_apic_id = mpc_apic_id, - .mpc_oem_bus_info = mpc_oem_bus_info, - .mpc_oem_pci_bus = mpc_oem_pci_bus, - .smp_read_mpc_oem = smp_read_mpc_oem, - .setup_ioapic_ids = numaq_setup_ioapic_ids, - .update_apic = numaq_update_apic, -}; - -static __init void early_check_numaq(void) -{ - /* - * Find possible boot-time SMP configuration: - */ - early_find_smp_config(); - - /* - * get boot-time SMP configuration: - */ - if (smp_found_config) - early_get_smp_config(); - - if (found_numaq) - x86_quirks = &numaq_x86_quirks; -} - -int __init get_memcfg_numaq(void) -{ - early_check_numaq(); - if (!found_numaq) - return 0; - smp_dump_qct(); - - return 1; -} - -#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) - -static inline unsigned int numaq_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0x0F; -} - -static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) -{ - default_send_IPI_mask_sequence_logical(mask, vector); -} - -static inline void numaq_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector); -} - -static inline void numaq_send_IPI_all(int vector) -{ - numaq_send_IPI_mask(cpu_online_mask, vector); -} - -#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8) -#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa) - -/* - * Because we use NMIs rather than the INIT-STARTUP sequence to - * bootstrap the CPUs, the APIC may be in a weird state. Kick it: - */ -static inline void numaq_smp_callin_clear_local_apic(void) -{ - clear_local_APIC(); -} - -static inline const cpumask_t *numaq_target_cpus(void) -{ - return &CPU_MASK_ALL; -} - -static inline unsigned long -numaq_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} - -static inline unsigned long numaq_check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -static inline int numaq_apic_id_registered(void) -{ - return 1; -} - -static inline void numaq_init_apic_ldr(void) -{ - /* Already done in NUMA-Q firmware */ -} - -static inline void numaq_setup_apic_routing(void) -{ - printk(KERN_INFO - "Enabling APIC mode: NUMA-Q. Using %d I/O APICs\n", - nr_ioapics); -} - -/* - * Skip adding the timer int on secondary nodes, which causes - * a small but painful rift in the time-space continuum. - */ -static inline int numaq_multi_timer_check(int apic, int irq) -{ - return apic != 0 && irq == 0; -} - -static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* We don't have a good way to do this yet - hack */ - return physids_promote(0xFUL); -} - -static inline int numaq_cpu_to_logical_apicid(int cpu) -{ - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_2_logical_apicid[cpu]; -} - -/* - * Supporting over 60 cpus on NUMA-Q requires a locality-dependent - * cpu to APIC ID relation to properly interact with the intelligent - * mode of the cluster controller. - */ -static inline int numaq_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < 60) - return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); - else - return BAD_APICID; -} - -static inline int numaq_apicid_to_node(int logical_apicid) -{ - return logical_apicid >> 4; -} - -static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) -{ - int node = numaq_apicid_to_node(logical_apicid); - int cpu = __ffs(logical_apicid & 0xf); - - return physid_mask_of_physid(cpu + 4*node); -} - -/* Where the IO area was mapped on multiquad, always 0 otherwise */ -void *xquad_portio; - -static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -/* - * We use physical apicids here, not logical, so just return the default - * physical broadcast to stop people from breaking us - */ -static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - return 0x0F; -} - -static inline unsigned int -numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask) -{ - return 0x0F; -} - -/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ -static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -static int -numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - if (strncmp(oem, "IBM NUMA", 8)) - printk(KERN_ERR "Warning! Not a NUMA-Q system!\n"); - else - found_numaq = 1; - - return found_numaq; -} - -static int probe_numaq(void) -{ - /* already know from get_memcfg_numaq() */ - return found_numaq; -} - -static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; -} - -static void numaq_setup_portio_remap(void) -{ - int num_quads = num_online_nodes(); - - if (num_quads <= 1) - return; - - printk(KERN_INFO - "Remapping cross-quad port I/O for %d quads\n", num_quads); - - xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); - - printk(KERN_INFO - "xquad_portio vaddr 0x%08lx, len %08lx\n", - (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); -} - -struct apic apic_numaq = { - - .name = "NUMAQ", - .probe = probe_numaq, - .acpi_madt_oem_check = NULL, - .apic_id_registered = numaq_apic_id_registered, - - .irq_delivery_mode = dest_LowestPrio, - /* physical delivery on LOCAL quad: */ - .irq_dest_mode = 0, - - .target_cpus = numaq_target_cpus, - .disable_esr = 1, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = numaq_check_apicid_used, - .check_apicid_present = numaq_check_apicid_present, - - .vector_allocation_domain = numaq_vector_allocation_domain, - .init_apic_ldr = numaq_init_apic_ldr, - - .ioapic_phys_id_map = numaq_ioapic_phys_id_map, - .setup_apic_routing = numaq_setup_apic_routing, - .multi_timer_check = numaq_multi_timer_check, - .apicid_to_node = numaq_apicid_to_node, - .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, - .cpu_present_to_apicid = numaq_cpu_present_to_apicid, - .apicid_to_cpu_present = numaq_apicid_to_cpu_present, - .setup_portio_remap = numaq_setup_portio_remap, - .check_phys_apicid_present = numaq_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = numaq_phys_pkg_id, - .mps_oem_check = numaq_mps_oem_check, - - .get_apic_id = numaq_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0x0F << 24, - - .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, - - .send_IPI_mask = numaq_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = numaq_send_IPI_allbutself, - .send_IPI_all = numaq_send_IPI_all, - .send_IPI_self = default_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, - - /* We don't do anything here because we use NMI's to boot instead */ - .wait_for_init_deassert = NULL, - - .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, - .inquire_remote_apic = NULL, - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = native_apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, -}; diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c deleted file mode 100644 index 5fa48332c5c..00000000000 --- a/arch/x86/kernel/probe_32.c +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Default generic APIC driver. This handles up to 8 CPUs. - * - * Copyright 2003 Andi Kleen, SuSE Labs. - * Subject to the GNU Public License, v.2 - * - * Generic x86 APIC driver probe layer. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_HOTPLUG_CPU -#define DEFAULT_SEND_IPI (1) -#else -#define DEFAULT_SEND_IPI (0) -#endif - -int no_broadcast = DEFAULT_SEND_IPI; - -#ifdef CONFIG_X86_LOCAL_APIC - -void default_setup_apic_routing(void) -{ -#ifdef CONFIG_X86_IO_APIC - printk(KERN_INFO - "Enabling APIC mode: Flat. Using %d I/O APICs\n", - nr_ioapics); -#endif -} - -static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) -{ - /* - * Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; -} - -/* should be called last. */ -static int probe_default(void) -{ - return 1; -} - -struct apic apic_default = { - - .name = "default", - .probe = probe_default, - .acpi_madt_oem_check = NULL, - .apic_id_registered = default_apic_id_registered, - - .irq_delivery_mode = dest_LowestPrio, - /* logical delivery broadcast to all CPUs: */ - .irq_dest_mode = 1, - - .target_cpus = default_target_cpus, - .disable_esr = 0, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = default_check_apicid_used, - .check_apicid_present = default_check_apicid_present, - - .vector_allocation_domain = default_vector_allocation_domain, - .init_apic_ldr = default_init_apic_ldr, - - .ioapic_phys_id_map = default_ioapic_phys_id_map, - .setup_apic_routing = default_setup_apic_routing, - .multi_timer_check = NULL, - .apicid_to_node = default_apicid_to_node, - .cpu_to_logical_apicid = default_cpu_to_logical_apicid, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - .apicid_to_cpu_present = default_apicid_to_cpu_present, - .setup_portio_remap = NULL, - .check_phys_apicid_present = default_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = default_phys_pkg_id, - .mps_oem_check = NULL, - - .get_apic_id = default_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0x0F << 24, - - .cpu_mask_to_apicid = default_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, - - .send_IPI_mask = default_send_IPI_mask_logical, - .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, - .send_IPI_allbutself = default_send_IPI_allbutself, - .send_IPI_all = default_send_IPI_all, - .send_IPI_self = default_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - - .wait_for_init_deassert = default_wait_for_init_deassert, - - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = default_inquire_remote_apic, - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = native_apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, -}; - -extern struct apic apic_numaq; -extern struct apic apic_summit; -extern struct apic apic_bigsmp; -extern struct apic apic_es7000; -extern struct apic apic_default; - -struct apic *apic = &apic_default; -EXPORT_SYMBOL_GPL(apic); - -static struct apic *apic_probe[] __initdata = { -#ifdef CONFIG_X86_NUMAQ - &apic_numaq, -#endif -#ifdef CONFIG_X86_SUMMIT - &apic_summit, -#endif -#ifdef CONFIG_X86_BIGSMP - &apic_bigsmp, -#endif -#ifdef CONFIG_X86_ES7000 - &apic_es7000, -#endif - &apic_default, /* must be last */ - NULL, -}; - -static int cmdline_apic __initdata; -static int __init parse_apic(char *arg) -{ - int i; - - if (!arg) - return -EINVAL; - - for (i = 0; apic_probe[i]; i++) { - if (!strcmp(apic_probe[i]->name, arg)) { - apic = apic_probe[i]; - cmdline_apic = 1; - return 0; - } - } - - if (x86_quirks->update_apic) - x86_quirks->update_apic(); - - /* Parsed again by __setup for debug/verbose */ - return 0; -} -early_param("apic", parse_apic); - -void __init generic_bigsmp_probe(void) -{ -#ifdef CONFIG_X86_BIGSMP - /* - * This routine is used to switch to bigsmp mode when - * - There is no apic= option specified by the user - * - generic_apic_probe() has chosen apic_default as the sub_arch - * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support - */ - - if (!cmdline_apic && apic == &apic_default) { - if (apic_bigsmp.probe()) { - apic = &apic_bigsmp; - if (x86_quirks->update_apic) - x86_quirks->update_apic(); - printk(KERN_INFO "Overriding APIC driver with %s\n", - apic->name); - } - } -#endif -} - -void __init generic_apic_probe(void) -{ - if (!cmdline_apic) { - int i; - for (i = 0; apic_probe[i]; i++) { - if (apic_probe[i]->probe()) { - apic = apic_probe[i]; - break; - } - } - /* Not visible without early console */ - if (!apic_probe[i]) - panic("Didn't find an APIC driver"); - - if (x86_quirks->update_apic) - x86_quirks->update_apic(); - } - printk(KERN_INFO "Using APIC driver %s\n", apic->name); -} - -/* These functions can switch the APIC even after the initial ->probe() */ - -int __init -generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - int i; - - for (i = 0; apic_probe[i]; ++i) { - if (!apic_probe[i]->mps_oem_check) - continue; - if (!apic_probe[i]->mps_oem_check(mpc, oem, productid)) - continue; - - if (!cmdline_apic) { - apic = apic_probe[i]; - if (x86_quirks->update_apic) - x86_quirks->update_apic(); - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - apic->name); - } - return 1; - } - return 0; -} - -int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - int i; - - for (i = 0; apic_probe[i]; ++i) { - if (!apic_probe[i]->acpi_madt_oem_check) - continue; - if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) - continue; - - if (!cmdline_apic) { - apic = apic_probe[i]; - if (x86_quirks->update_apic) - x86_quirks->update_apic(); - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - apic->name); - } - return 1; - } - return 0; -} - -#endif /* CONFIG_X86_LOCAL_APIC */ - -/** - * pre_intr_init_hook - initialisation prior to setting up interrupt vectors - * - * Description: - * Perform any necessary interrupt initialisation prior to setting up - * the "ordinary" interrupt call gates. For legacy reasons, the ISA - * interrupts should be initialised here if the machine emulates a PC - * in any way. - **/ -void __init pre_intr_init_hook(void) -{ - if (x86_quirks->arch_pre_intr_init) { - if (x86_quirks->arch_pre_intr_init()) - return; - } - init_ISA_irqs(); -} - -/** - * intr_init_hook - post gate setup interrupt initialisation - * - * Description: - * Fill in any interrupts that may have been left out by the general - * init_IRQ() routine. interrupts having to do with the machine rather - * than the devices on the I/O bus (like APIC interrupts in intel MP - * systems) are started here. - **/ -void __init intr_init_hook(void) -{ - if (x86_quirks->arch_intr_init) { - if (x86_quirks->arch_intr_init()) - return; - } -} - -/** - * pre_setup_arch_hook - hook called prior to any setup_arch() execution - * - * Description: - * generally used to activate any machine specific identification - * routines that may be needed before setup_arch() runs. On Voyager - * this is used to get the board revision and type. - **/ -void __init pre_setup_arch_hook(void) -{ -} - -/** - * trap_init_hook - initialise system specific traps - * - * Description: - * Called as the final act of trap_init(). Used in VISWS to initialise - * the various board specific APIC traps. - **/ -void __init trap_init_hook(void) -{ - if (x86_quirks->arch_trap_init) { - if (x86_quirks->arch_trap_init()) - return; - } -} - -static struct irqaction irq0 = { - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, - .mask = CPU_MASK_NONE, - .name = "timer" -}; - -/** - * pre_time_init_hook - do any specific initialisations before. - * - **/ -void __init pre_time_init_hook(void) -{ - if (x86_quirks->arch_pre_time_init) - x86_quirks->arch_pre_time_init(); -} - -/** - * time_init_hook - do any specific initialisations for the system timer. - * - * Description: - * Must plug the system timer interrupt source at HZ into the IRQ listed - * in irq_vectors.h:TIMER_IRQ - **/ -void __init time_init_hook(void) -{ - if (x86_quirks->arch_time_init) { - /* - * A nonzero return code does not mean failure, it means - * that the architecture quirk does not want any - * generic (timer) setup to be performed after this: - */ - if (x86_quirks->arch_time_init()) - return; - } - - irq0.mask = cpumask_of_cpu(0); - setup_irq(0, &irq0); -} - -#ifdef CONFIG_MCA -/** - * mca_nmi_hook - hook into MCA specific NMI chain - * - * Description: - * The MCA (Microchannel Architecture) has an NMI chain for NMI sources - * along the MCA bus. Use this to hook into that chain if you will need - * it. - **/ -void mca_nmi_hook(void) -{ - /* - * If I recall correctly, there's a whole bunch of other things that - * we can do to check for NMI problems, but that's all I know about - * at the moment. - */ - pr_warning("NMI generated from unknown source!\n"); -} -#endif - -static __init int no_ipi_broadcast(char *str) -{ - get_option(&str, &no_broadcast); - pr_info("Using %s mode\n", - no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); - return 1; -} -__setup("no_ipi_broadcast=", no_ipi_broadcast); - -static int __init print_ipi_mode(void) -{ - pr_info("Using IPI %s mode\n", - no_broadcast ? "No-Shortcut" : "Shortcut"); - return 0; -} - -late_initcall(print_ipi_mode); - diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c deleted file mode 100644 index cfe7b09015d..00000000000 --- a/arch/x86/kernel/summit_32.c +++ /dev/null @@ -1,601 +0,0 @@ -/* - * IBM Summit-Specific Code - * - * Written By: Matthew Dobson, IBM Corporation - * - * Copyright (c) 2003 IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to - * - */ - -#include -#include -#include -#include - -/* - * APIC driver for the IBM "Summit" chipset. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static inline unsigned summit_get_apic_id(unsigned long x) -{ - return (x >> 24) & 0xFF; -} - -static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) -{ - default_send_IPI_mask_sequence_logical(mask, vector); -} - -static inline void summit_send_IPI_allbutself(int vector) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - - if (!cpus_empty(mask)) - summit_send_IPI_mask(&mask, vector); -} - -static inline void summit_send_IPI_all(int vector) -{ - summit_send_IPI_mask(&cpu_online_map, vector); -} - -#include - -extern int use_cyclone; - -#ifdef CONFIG_X86_SUMMIT_NUMA -extern void setup_summit(void); -#else -#define setup_summit() {} -#endif - -static inline int -summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) -{ - if (!strncmp(oem, "IBM ENSW", 8) && - (!strncmp(productid, "VIGIL SMP", 9) - || !strncmp(productid, "EXA", 3) - || !strncmp(productid, "RUTHLESS SMP", 12))){ - mark_tsc_unstable("Summit based system"); - use_cyclone = 1; /*enable cyclone-timer*/ - setup_summit(); - return 1; - } - return 0; -} - -/* Hook from generic ACPI tables.c */ -static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - if (!strncmp(oem_id, "IBM", 3) && - (!strncmp(oem_table_id, "SERVIGIL", 8) - || !strncmp(oem_table_id, "EXA", 3))){ - mark_tsc_unstable("Summit based system"); - use_cyclone = 1; /*enable cyclone-timer*/ - setup_summit(); - return 1; - } - return 0; -} - -struct rio_table_hdr { - unsigned char version; /* Version number of this data structure */ - /* Version 3 adds chassis_num & WP_index */ - unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */ - unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */ -} __attribute__((packed)); - -struct scal_detail { - unsigned char node_id; /* Scalability Node ID */ - unsigned long CBAR; /* Address of 1MB register space */ - unsigned char port0node; /* Node ID port connected to: 0xFF=None */ - unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port1node; /* Node ID port connected to: 0xFF = None */ - unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port2node; /* Node ID port connected to: 0xFF = None */ - unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */ -} __attribute__((packed)); - -struct rio_detail { - unsigned char node_id; /* RIO Node ID */ - unsigned long BBAR; /* Address of 1MB register space */ - unsigned char type; /* Type of device */ - unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/ - /* For CYC: Node ID of Twister that owns this CYC */ - unsigned char port0node; /* Node ID port connected to: 0xFF=None */ - unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port1node; /* Node ID port connected to: 0xFF=None */ - unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */ - /* For CYC: 0 */ - unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */ - /* = 0 : the XAPIC is not used, ie:*/ - /* ints fwded to another XAPIC */ - /* Bits1:7 Reserved */ - /* For CYC: Bits0:7 Reserved */ - unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */ - /* lower slot numbers/PCI bus numbers */ - /* For CYC: No meaning */ - unsigned char chassis_num; /* 1 based Chassis number */ - /* For LookOut WPEGs this field indicates the */ - /* Expansion Chassis #, enumerated from Boot */ - /* Node WPEG external port, then Boot Node CYC */ - /* external port, then Next Vigil chassis WPEG */ - /* external port, etc. */ - /* Shared Lookouts have only 1 chassis number (the */ - /* first one assigned) */ -} __attribute__((packed)); - - -typedef enum { - CompatTwister = 0, /* Compatibility Twister */ - AltTwister = 1, /* Alternate Twister of internal 8-way */ - CompatCyclone = 2, /* Compatibility Cyclone */ - AltCyclone = 3, /* Alternate Cyclone of internal 8-way */ - CompatWPEG = 4, /* Compatibility WPEG */ - AltWPEG = 5, /* Second Planar WPEG */ - LookOutAWPEG = 6, /* LookOut WPEG */ - LookOutBWPEG = 7, /* LookOut WPEG */ -} node_type; - -static inline int is_WPEG(struct rio_detail *rio){ - return (rio->type == CompatWPEG || rio->type == AltWPEG || - rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); -} - - -/* In clustered mode, the high nibble of APIC ID is a cluster number. - * The low nibble is a 4-bit bitmap. */ -#define XAPIC_DEST_CPUS_SHIFT 4 -#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) -#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) - -#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) - -static inline const cpumask_t *summit_target_cpus(void) -{ - /* CPU_MASK_ALL (0xff) has undefined behaviour with - * dest_LowestPrio mode logical clustered apic interrupt routing - * Just start on cpu 0. IRQ balancing will spread load - */ - return &cpumask_of_cpu(0); -} - -static inline unsigned long -summit_check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} - -/* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long summit_check_apicid_present(int bit) -{ - return 1; -} - -static inline void summit_init_apic_ldr(void) -{ - unsigned long val, id; - int count = 0; - u8 my_id = (u8)hard_smp_processor_id(); - u8 my_cluster = APIC_CLUSTER(my_id); -#ifdef CONFIG_SMP - u8 lid; - int i; - - /* Create logical APIC IDs by counting CPUs already in cluster. */ - for (count = 0, i = nr_cpu_ids; --i >= 0; ) { - lid = cpu_2_logical_apicid[i]; - if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) - ++count; - } -#endif - /* We only have a 4 wide bitmap in cluster mode. If a deranged - * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ - BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); - id = my_cluster | (1UL << count); - apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(id); - apic_write(APIC_LDR, val); -} - -static inline int summit_apic_id_registered(void) -{ - return 1; -} - -static inline void summit_setup_apic_routing(void) -{ - printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", - nr_ioapics); -} - -static inline int summit_apicid_to_node(int logical_apicid) -{ -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; -#else - return 0; -#endif -} - -/* Mapping from cpu number to logical apicid */ -static inline int summit_cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= nr_cpu_ids) - return BAD_APICID; - return cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - -static inline int summit_cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < nr_cpu_ids) - return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - -static inline physid_mask_t -summit_ioapic_phys_id_map(physid_mask_t phys_id_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0x0F); -} - -static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) -{ - return physid_mask_of_physid(0); -} - -static inline void summit_setup_portio_remap(void) -{ -} - -static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpus_weight(*cpumask); - /* Return id to all */ - if (num_bits_set >= nr_cpu_ids) - return 0xFF; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = first_cpu(*cpumask); - apicid = summit_cpu_to_logical_apicid(cpu); - - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask)) { - int new_apicid = summit_cpu_to_logical_apicid(cpu); - - if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); - - return 0xFF; - } - apicid = apicid | new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static inline unsigned int -summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask) -{ - int apicid = summit_cpu_to_logical_apicid(0); - cpumask_var_t cpumask; - - if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) - return apicid; - - cpumask_and(cpumask, inmask, andmask); - cpumask_and(cpumask, cpumask, cpu_online_mask); - apicid = summit_cpu_mask_to_apicid(cpumask); - - free_cpumask_var(cpumask); - - return apicid; -} - -/* - * cpuid returns the value latched in the HW at reset, not the APIC ID - * register's value. For any box whose BIOS changes APIC IDs, like - * clustered APIC systems, we must use hard_smp_processor_id. - * - * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. - */ -static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) -{ - return hard_smp_processor_id() >> index_msb; -} - -static int probe_summit(void) -{ - /* probed later in mptable/ACPI hooks */ - return 0; -} - -static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) -{ - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; -} - -#ifdef CONFIG_X86_SUMMIT_NUMA -static struct rio_table_hdr *rio_table_hdr __initdata; -static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; -static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; - -#ifndef CONFIG_X86_NUMAQ -static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; -#endif - -static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) -{ - int twister = 0, node = 0; - int i, bus, num_buses; - - for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { - if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) { - twister = rio_devs[i]->owner_id; - break; - } - } - if (i == rio_table_hdr->num_rio_dev) { - printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__); - return last_bus; - } - - for (i = 0; i < rio_table_hdr->num_scal_dev; i++) { - if (scal_devs[i]->node_id == twister) { - node = scal_devs[i]->node_id; - break; - } - } - if (i == rio_table_hdr->num_scal_dev) { - printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__); - return last_bus; - } - - switch (rio_devs[wpeg_num]->type) { - case CompatWPEG: - /* - * The Compatibility Winnipeg controls the 2 legacy buses, - * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case - * a PCI-PCI bridge card is used in either slot: total 5 buses. - */ - num_buses = 5; - break; - case AltWPEG: - /* - * The Alternate Winnipeg controls the 2 133MHz buses [1 slot - * each], their 2 "extra" buses, the 100MHz bus [2 slots] and - * the "extra" buses for each of those slots: total 7 buses. - */ - num_buses = 7; - break; - case LookOutAWPEG: - case LookOutBWPEG: - /* - * A Lookout Winnipeg controls 3 100MHz buses [2 slots each] - * & the "extra" buses for each of those slots: total 9 buses. - */ - num_buses = 9; - break; - default: - printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__); - return last_bus; - } - - for (bus = last_bus; bus < last_bus + num_buses; bus++) - mp_bus_id_to_node[bus] = node; - return bus; -} - -static int __init build_detail_arrays(void) -{ - unsigned long ptr; - int i, scal_detail_size, rio_detail_size; - - if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) { - printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); - return 0; - } - - switch (rio_table_hdr->version) { - default: - printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version); - return 0; - case 2: - scal_detail_size = 11; - rio_detail_size = 13; - break; - case 3: - scal_detail_size = 12; - rio_detail_size = 15; - break; - } - - ptr = (unsigned long)rio_table_hdr + 3; - for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size) - scal_devs[i] = (struct scal_detail *)ptr; - - for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size) - rio_devs[i] = (struct rio_detail *)ptr; - - return 1; -} - -void __init setup_summit(void) -{ - unsigned long ptr; - unsigned short offset; - int i, next_wpeg, next_bus = 0; - - /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */ - ptr = get_bios_ebda(); - ptr = (unsigned long)phys_to_virt(ptr); - - rio_table_hdr = NULL; - offset = 0x180; - while (offset) { - /* The block id is stored in the 2nd word */ - if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) { - /* set the pointer past the offset & block id */ - rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4); - break; - } - /* The next offset is stored in the 1st word. 0 means no more */ - offset = *((unsigned short *)(ptr + offset)); - } - if (!rio_table_hdr) { - printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__); - return; - } - - if (!build_detail_arrays()) - return; - - /* The first Winnipeg we're looking for has an index of 0 */ - next_wpeg = 0; - do { - for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { - if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) { - /* It's the Winnipeg we're looking for! */ - next_bus = setup_pci_node_map_for_wpeg(i, next_bus); - next_wpeg++; - break; - } - } - /* - * If we go through all Rio devices and don't find one with - * the next index, it means we've found all the Winnipegs, - * and thus all the PCI buses. - */ - if (i == rio_table_hdr->num_rio_dev) - next_wpeg = 0; - } while (next_wpeg != 0); -} -#endif - -struct apic apic_summit = { - - .name = "summit", - .probe = probe_summit, - .acpi_madt_oem_check = summit_acpi_madt_oem_check, - .apic_id_registered = summit_apic_id_registered, - - .irq_delivery_mode = dest_LowestPrio, - /* logical delivery broadcast to all CPUs: */ - .irq_dest_mode = 1, - - .target_cpus = summit_target_cpus, - .disable_esr = 1, - .dest_logical = APIC_DEST_LOGICAL, - .check_apicid_used = summit_check_apicid_used, - .check_apicid_present = summit_check_apicid_present, - - .vector_allocation_domain = summit_vector_allocation_domain, - .init_apic_ldr = summit_init_apic_ldr, - - .ioapic_phys_id_map = summit_ioapic_phys_id_map, - .setup_apic_routing = summit_setup_apic_routing, - .multi_timer_check = NULL, - .apicid_to_node = summit_apicid_to_node, - .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, - .cpu_present_to_apicid = summit_cpu_present_to_apicid, - .apicid_to_cpu_present = summit_apicid_to_cpu_present, - .setup_portio_remap = NULL, - .check_phys_apicid_present = summit_check_phys_apicid_present, - .enable_apic_mode = NULL, - .phys_pkg_id = summit_phys_pkg_id, - .mps_oem_check = summit_mps_oem_check, - - .get_apic_id = summit_get_apic_id, - .set_apic_id = NULL, - .apic_id_mask = 0xFF << 24, - - .cpu_mask_to_apicid = summit_cpu_mask_to_apicid, - .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, - - .send_IPI_mask = summit_send_IPI_mask, - .send_IPI_mask_allbutself = NULL, - .send_IPI_allbutself = summit_send_IPI_allbutself, - .send_IPI_all = summit_send_IPI_all, - .send_IPI_self = default_send_IPI_self, - - .wakeup_cpu = NULL, - .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, - .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, - - .wait_for_init_deassert = default_wait_for_init_deassert, - - .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = default_inquire_remote_apic, - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = native_apic_wait_icr_idle, - .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, -}; -- cgit v1.2.3 From a7eb518998529c08cc53fef17756d9fe433b0c23 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 17 Feb 2009 13:01:51 -0800 Subject: x86: truncate ISA addresses to unsigned int Impact: Cleanup; fix inappropriate macro use ISA addresses on x86 are mapped 1:1 with the physical address space. Since the ISA address space is only 24 bits (32 for VLB or LPC) it will always fit in an unsigned int, and at least in the aha1542 driver using a wider type would cause an undesirable promotion. Hence explicitly cast the ISA bus addresses to unsigned int. Signed-off-by: H. Peter Anvin Cc: James Bottomley --- arch/x86/include/asm/io.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 4f8e820cf38..683d0b4c00f 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -124,10 +124,15 @@ static inline void *phys_to_virt(phys_addr_t address) /* * ISA I/O bus memory addresses are 1:1 with the physical address. + * However, we truncate the address to unsigned int to avoid undesirable + * promitions in legacy drivers. */ -#define isa_virt_to_bus (unsigned long)virt_to_phys -#define isa_page_to_bus page_to_phys -#define isa_bus_to_virt phys_to_virt +static inline unsigned int isa_virt_to_bus(volatile void *address) +{ + return (unsigned int)virt_to_phys(address); +} +#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) +#define isa_bus_to_virt phys_to_virt /* * However PCI ones are not necessarily 1:1 and therefore these interfaces -- cgit v1.2.3 From 9be1b56a3e718aa998772019c57c398dbb19e258 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 17 Feb 2009 23:12:48 +0100 Subject: x86, apic: separate 32-bit setup functionality out of apic_32.c Impact: build fix, cleanup A couple of arch setup callbacks were mistakenly in apic_32.c, breaking the build. Also simplify the code a bit. Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/apic/Makefile | 3 +- arch/x86/kernel/apic/probe_32.c | 160 ++++------------------------------------ arch/x86/kernel/setup.c | 124 +++++++++++++++++++++++++++++++ 4 files changed, 142 insertions(+), 147 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c70537d8c15..de5657c039e 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -63,7 +63,7 @@ obj-$(CONFIG_SMP) += setup_percpu.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_MPPARSE) += mpparse.o -obj-$(CONFIG_X86_LOCAL_APIC) += apic/ +obj-y += apic/ obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile index 97f558db5c3..da7b7b9f8bd 100644 --- a/arch/x86/kernel/apic/Makefile +++ b/arch/x86/kernel/apic/Makefile @@ -2,10 +2,9 @@ # Makefile for local APIC drivers and for the IO-APIC code # -obj-y := apic.o probe_$(BITS).o ipi.o nmi.o +obj-$(CONFIG_X86_LOCAL_APIC) += apic.o probe_$(BITS).o ipi.o nmi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o obj-$(CONFIG_SMP) += ipi.o -obj-$ ifeq ($(CONFIG_X86_64),y) obj-y += apic_flat_64.o diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 5fa48332c5c..c9ec90742e9 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -47,7 +47,22 @@ int no_broadcast = DEFAULT_SEND_IPI; -#ifdef CONFIG_X86_LOCAL_APIC +static __init int no_ipi_broadcast(char *str) +{ + get_option(&str, &no_broadcast); + pr_info("Using %s mode\n", + no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); + return 1; +} +__setup("no_ipi_broadcast=", no_ipi_broadcast); + +static int __init print_ipi_mode(void) +{ + pr_info("Using IPI %s mode\n", + no_broadcast ? "No-Shortcut" : "Shortcut"); + return 0; +} +late_initcall(print_ipi_mode); void default_setup_apic_routing(void) { @@ -279,146 +294,3 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) } return 0; } - -#endif /* CONFIG_X86_LOCAL_APIC */ - -/** - * pre_intr_init_hook - initialisation prior to setting up interrupt vectors - * - * Description: - * Perform any necessary interrupt initialisation prior to setting up - * the "ordinary" interrupt call gates. For legacy reasons, the ISA - * interrupts should be initialised here if the machine emulates a PC - * in any way. - **/ -void __init pre_intr_init_hook(void) -{ - if (x86_quirks->arch_pre_intr_init) { - if (x86_quirks->arch_pre_intr_init()) - return; - } - init_ISA_irqs(); -} - -/** - * intr_init_hook - post gate setup interrupt initialisation - * - * Description: - * Fill in any interrupts that may have been left out by the general - * init_IRQ() routine. interrupts having to do with the machine rather - * than the devices on the I/O bus (like APIC interrupts in intel MP - * systems) are started here. - **/ -void __init intr_init_hook(void) -{ - if (x86_quirks->arch_intr_init) { - if (x86_quirks->arch_intr_init()) - return; - } -} - -/** - * pre_setup_arch_hook - hook called prior to any setup_arch() execution - * - * Description: - * generally used to activate any machine specific identification - * routines that may be needed before setup_arch() runs. On Voyager - * this is used to get the board revision and type. - **/ -void __init pre_setup_arch_hook(void) -{ -} - -/** - * trap_init_hook - initialise system specific traps - * - * Description: - * Called as the final act of trap_init(). Used in VISWS to initialise - * the various board specific APIC traps. - **/ -void __init trap_init_hook(void) -{ - if (x86_quirks->arch_trap_init) { - if (x86_quirks->arch_trap_init()) - return; - } -} - -static struct irqaction irq0 = { - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, - .mask = CPU_MASK_NONE, - .name = "timer" -}; - -/** - * pre_time_init_hook - do any specific initialisations before. - * - **/ -void __init pre_time_init_hook(void) -{ - if (x86_quirks->arch_pre_time_init) - x86_quirks->arch_pre_time_init(); -} - -/** - * time_init_hook - do any specific initialisations for the system timer. - * - * Description: - * Must plug the system timer interrupt source at HZ into the IRQ listed - * in irq_vectors.h:TIMER_IRQ - **/ -void __init time_init_hook(void) -{ - if (x86_quirks->arch_time_init) { - /* - * A nonzero return code does not mean failure, it means - * that the architecture quirk does not want any - * generic (timer) setup to be performed after this: - */ - if (x86_quirks->arch_time_init()) - return; - } - - irq0.mask = cpumask_of_cpu(0); - setup_irq(0, &irq0); -} - -#ifdef CONFIG_MCA -/** - * mca_nmi_hook - hook into MCA specific NMI chain - * - * Description: - * The MCA (Microchannel Architecture) has an NMI chain for NMI sources - * along the MCA bus. Use this to hook into that chain if you will need - * it. - **/ -void mca_nmi_hook(void) -{ - /* - * If I recall correctly, there's a whole bunch of other things that - * we can do to check for NMI problems, but that's all I know about - * at the moment. - */ - pr_warning("NMI generated from unknown source!\n"); -} -#endif - -static __init int no_ipi_broadcast(char *str) -{ - get_option(&str, &no_broadcast); - pr_info("Using %s mode\n", - no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); - return 1; -} -__setup("no_ipi_broadcast=", no_ipi_broadcast); - -static int __init print_ipi_mode(void) -{ - pr_info("Using IPI %s mode\n", - no_broadcast ? "No-Shortcut" : "Shortcut"); - return 0; -} - -late_initcall(print_ipi_mode); - diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6b588d6b388..ebef8005579 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -985,4 +985,128 @@ void __init setup_arch(char **cmdline_p) #endif } +#ifdef CONFIG_X86_32 + +/** + * pre_intr_init_hook - initialisation prior to setting up interrupt vectors + * + * Description: + * Perform any necessary interrupt initialisation prior to setting up + * the "ordinary" interrupt call gates. For legacy reasons, the ISA + * interrupts should be initialised here if the machine emulates a PC + * in any way. + **/ +void __init pre_intr_init_hook(void) +{ + if (x86_quirks->arch_pre_intr_init) { + if (x86_quirks->arch_pre_intr_init()) + return; + } + init_ISA_irqs(); +} + +/** + * intr_init_hook - post gate setup interrupt initialisation + * + * Description: + * Fill in any interrupts that may have been left out by the general + * init_IRQ() routine. interrupts having to do with the machine rather + * than the devices on the I/O bus (like APIC interrupts in intel MP + * systems) are started here. + **/ +void __init intr_init_hook(void) +{ + if (x86_quirks->arch_intr_init) { + if (x86_quirks->arch_intr_init()) + return; + } +} + +/** + * pre_setup_arch_hook - hook called prior to any setup_arch() execution + * + * Description: + * generally used to activate any machine specific identification + * routines that may be needed before setup_arch() runs. On Voyager + * this is used to get the board revision and type. + **/ +void __init pre_setup_arch_hook(void) +{ +} + +/** + * trap_init_hook - initialise system specific traps + * + * Description: + * Called as the final act of trap_init(). Used in VISWS to initialise + * the various board specific APIC traps. + **/ +void __init trap_init_hook(void) +{ + if (x86_quirks->arch_trap_init) { + if (x86_quirks->arch_trap_init()) + return; + } +} + +static struct irqaction irq0 = { + .handler = timer_interrupt, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, + .mask = CPU_MASK_NONE, + .name = "timer" +}; + +/** + * pre_time_init_hook - do any specific initialisations before. + * + **/ +void __init pre_time_init_hook(void) +{ + if (x86_quirks->arch_pre_time_init) + x86_quirks->arch_pre_time_init(); +} + +/** + * time_init_hook - do any specific initialisations for the system timer. + * + * Description: + * Must plug the system timer interrupt source at HZ into the IRQ listed + * in irq_vectors.h:TIMER_IRQ + **/ +void __init time_init_hook(void) +{ + if (x86_quirks->arch_time_init) { + /* + * A nonzero return code does not mean failure, it means + * that the architecture quirk does not want any + * generic (timer) setup to be performed after this: + */ + if (x86_quirks->arch_time_init()) + return; + } + + irq0.mask = cpumask_of_cpu(0); + setup_irq(0, &irq0); +} + +#ifdef CONFIG_MCA +/** + * mca_nmi_hook - hook into MCA specific NMI chain + * + * Description: + * The MCA (Microchannel Architecture) has an NMI chain for NMI sources + * along the MCA bus. Use this to hook into that chain if you will need + * it. + **/ +void mca_nmi_hook(void) +{ + /* + * If I recall correctly, there's a whole bunch of other things that + * we can do to check for NMI problems, but that's all I know about + * at the moment. + */ + pr_warning("NMI generated from unknown source!\n"); +} +#endif /* CONFIG_MCA */ +#endif /* CONFIG_X86_32 */ -- cgit v1.2.3 From 109568e110ed67d4be1b28609b9fa00fca97f8eb Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 9 Jan 2009 16:49:30 +1100 Subject: crypto: aes - Move key_length in struct crypto_aes_ctx to be the last field The Intel AES-NI AES acceleration instructions need key_enc, key_dec in struct crypto_aes_ctx to be 16 byte aligned, it make this easier to move key_length to be the last one. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- arch/x86/crypto/aes-i586-asm_32.S | 6 +++--- arch/x86/crypto/aes-x86_64-asm_64.S | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S index e41b147f450..5252c388017 100644 --- a/arch/x86/crypto/aes-i586-asm_32.S +++ b/arch/x86/crypto/aes-i586-asm_32.S @@ -46,9 +46,9 @@ #define in_blk 16 /* offsets in crypto_tfm structure */ -#define klen (crypto_tfm_ctx_offset + 0) -#define ekey (crypto_tfm_ctx_offset + 4) -#define dkey (crypto_tfm_ctx_offset + 244) +#define klen (crypto_tfm_ctx_offset + 480) +#define ekey (crypto_tfm_ctx_offset + 0) +#define dkey (crypto_tfm_ctx_offset + 240) // register mapping for encrypt and decrypt subroutines diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S index a120f526c3d..7f28f62737d 100644 --- a/arch/x86/crypto/aes-x86_64-asm_64.S +++ b/arch/x86/crypto/aes-x86_64-asm_64.S @@ -56,13 +56,13 @@ .align 8; \ FUNC: movq r1,r2; \ movq r3,r4; \ - leaq BASE+KEY+48+4(r8),r9; \ + leaq BASE+KEY+48(r8),r9; \ movq r10,r11; \ movl (r7),r5 ## E; \ movl 4(r7),r1 ## E; \ movl 8(r7),r6 ## E; \ movl 12(r7),r7 ## E; \ - movl BASE+0(r8),r10 ## E; \ + movl BASE+480(r8),r10 ## E; \ xorl -48(r9),r5 ## E; \ xorl -44(r9),r1 ## E; \ xorl -40(r9),r6 ## E; \ -- cgit v1.2.3 From 07bf44f86989f5ed866510374fe761d1903681fb Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 9 Jan 2009 17:25:50 +1100 Subject: crypto: aes - Export x86 AES encrypt/decrypt functions Intel AES-NI AES acceleration instructions touch XMM state, to use that in soft_irq context, general x86 AES implementation is used as fallback. The first parameter is changed from struct crypto_tfm * to struct crypto_aes_ctx * to make it easier to deal with 16 bytes alignment requirement of AES-NI implementation. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- arch/x86/crypto/aes-i586-asm_32.S | 18 +++++++++--------- arch/x86/crypto/aes-x86_64-asm_64.S | 6 ++---- arch/x86/crypto/aes_glue.c | 20 ++++++++++++++++---- arch/x86/include/asm/aes.h | 11 +++++++++++ 4 files changed, 38 insertions(+), 17 deletions(-) create mode 100644 arch/x86/include/asm/aes.h (limited to 'arch') diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S index 5252c388017..b949ec2f9af 100644 --- a/arch/x86/crypto/aes-i586-asm_32.S +++ b/arch/x86/crypto/aes-i586-asm_32.S @@ -41,14 +41,14 @@ #define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) /* offsets to parameters with one register pushed onto stack */ -#define tfm 8 +#define ctx 8 #define out_blk 12 #define in_blk 16 -/* offsets in crypto_tfm structure */ -#define klen (crypto_tfm_ctx_offset + 480) -#define ekey (crypto_tfm_ctx_offset + 0) -#define dkey (crypto_tfm_ctx_offset + 240) +/* offsets in crypto_aes_ctx structure */ +#define klen (480) +#define ekey (0) +#define dkey (240) // register mapping for encrypt and decrypt subroutines @@ -217,7 +217,7 @@ do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ // AES (Rijndael) Encryption Subroutine -/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ +/* void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */ .global aes_enc_blk @@ -228,7 +228,7 @@ aes_enc_blk: push %ebp - mov tfm(%esp),%ebp + mov ctx(%esp),%ebp // CAUTION: the order and the values used in these assigns // rely on the register mappings @@ -292,7 +292,7 @@ aes_enc_blk: ret // AES (Rijndael) Decryption Subroutine -/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ +/* void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */ .global aes_dec_blk @@ -303,7 +303,7 @@ aes_enc_blk: aes_dec_blk: push %ebp - mov tfm(%esp),%ebp + mov ctx(%esp),%ebp // CAUTION: the order and the values used in these assigns // rely on the register mappings diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S index 7f28f62737d..5b577d5a059 100644 --- a/arch/x86/crypto/aes-x86_64-asm_64.S +++ b/arch/x86/crypto/aes-x86_64-asm_64.S @@ -17,8 +17,6 @@ #include -#define BASE crypto_tfm_ctx_offset - #define R1 %rax #define R1E %eax #define R1X %ax @@ -56,13 +54,13 @@ .align 8; \ FUNC: movq r1,r2; \ movq r3,r4; \ - leaq BASE+KEY+48(r8),r9; \ + leaq KEY+48(r8),r9; \ movq r10,r11; \ movl (r7),r5 ## E; \ movl 4(r7),r1 ## E; \ movl 8(r7),r6 ## E; \ movl 12(r7),r7 ## E; \ - movl BASE+480(r8),r10 ## E; \ + movl 480(r8),r10 ## E; \ xorl -48(r9),r5 ## E; \ xorl -44(r9),r1 ## E; \ xorl -40(r9),r6 ## E; \ diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c index 71f45782711..49ae9fe32b2 100644 --- a/arch/x86/crypto/aes_glue.c +++ b/arch/x86/crypto/aes_glue.c @@ -5,17 +5,29 @@ #include -asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); -asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); +asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); +asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) +{ + aes_enc_blk(ctx, dst, src); +} +EXPORT_SYMBOL_GPL(crypto_aes_encrypt_x86); + +void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) +{ + aes_dec_blk(ctx, dst, src); +} +EXPORT_SYMBOL_GPL(crypto_aes_decrypt_x86); static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - aes_enc_blk(tfm, dst, src); + aes_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - aes_dec_blk(tfm, dst, src); + aes_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static struct crypto_alg aes_alg = { diff --git a/arch/x86/include/asm/aes.h b/arch/x86/include/asm/aes.h new file mode 100644 index 00000000000..80545a1cbe3 --- /dev/null +++ b/arch/x86/include/asm/aes.h @@ -0,0 +1,11 @@ +#ifndef ASM_X86_AES_H +#define ASM_X86_AES_H + +#include +#include + +void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, + const u8 *src); +void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, + const u8 *src); +#endif -- cgit v1.2.3 From 54b6a1bd5364aca95cd6ffae00f2b64c6511122c Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Sun, 18 Jan 2009 16:28:34 +1100 Subject: crypto: aes-ni - Add support to Intel AES-NI instructions for x86_64 platform Intel AES-NI is a new set of Single Instruction Multiple Data (SIMD) instructions that are going to be introduced in the next generation of Intel processor, as of 2009. These instructions enable fast and secure data encryption and decryption, using the Advanced Encryption Standard (AES), defined by FIPS Publication number 197. The architecture introduces six instructions that offer full hardware support for AES. Four of them support high performance data encryption and decryption, and the other two instructions support the AES key expansion procedure. The white paper can be downloaded from: http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf AES may be used in soft_irq context, but MMX/SSE context can not be touched safely in soft_irq context. So in_interrupt() is checked, if in IRQ or soft_irq context, the general x86_64 implementation are used instead. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 3 + arch/x86/crypto/aesni-intel_asm.S | 896 +++++++++++++++++++++++++++++++++++++ arch/x86/crypto/aesni-intel_glue.c | 461 +++++++++++++++++++ arch/x86/include/asm/cpufeature.h | 1 + 4 files changed, 1361 insertions(+) create mode 100644 arch/x86/crypto/aesni-intel_asm.S create mode 100644 arch/x86/crypto/aesni-intel_glue.c (limited to 'arch') diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 903de4aa509..ebe7deedd5b 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o +obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o @@ -19,3 +20,5 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o + +aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S new file mode 100644 index 00000000000..caba9960170 --- /dev/null +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -0,0 +1,896 @@ +/* + * Implement AES algorithm in Intel AES-NI instructions. + * + * The white paper of AES-NI instructions can be downloaded from: + * http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf + * + * Copyright (C) 2008, Intel Corp. + * Author: Huang Ying + * Vinodh Gopal + * Kahraman Akdemir + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +.text + +#define STATE1 %xmm0 +#define STATE2 %xmm4 +#define STATE3 %xmm5 +#define STATE4 %xmm6 +#define STATE STATE1 +#define IN1 %xmm1 +#define IN2 %xmm7 +#define IN3 %xmm8 +#define IN4 %xmm9 +#define IN IN1 +#define KEY %xmm2 +#define IV %xmm3 + +#define KEYP %rdi +#define OUTP %rsi +#define INP %rdx +#define LEN %rcx +#define IVP %r8 +#define KLEN %r9d +#define T1 %r10 +#define TKEYP T1 +#define T2 %r11 + +_key_expansion_128: +_key_expansion_256a: + pshufd $0b11111111, %xmm1, %xmm1 + shufps $0b00010000, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + shufps $0b10001100, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + pxor %xmm1, %xmm0 + movaps %xmm0, (%rcx) + add $0x10, %rcx + ret + +_key_expansion_192a: + pshufd $0b01010101, %xmm1, %xmm1 + shufps $0b00010000, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + shufps $0b10001100, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + pxor %xmm1, %xmm0 + + movaps %xmm2, %xmm5 + movaps %xmm2, %xmm6 + pslldq $4, %xmm5 + pshufd $0b11111111, %xmm0, %xmm3 + pxor %xmm3, %xmm2 + pxor %xmm5, %xmm2 + + movaps %xmm0, %xmm1 + shufps $0b01000100, %xmm0, %xmm6 + movaps %xmm6, (%rcx) + shufps $0b01001110, %xmm2, %xmm1 + movaps %xmm1, 16(%rcx) + add $0x20, %rcx + ret + +_key_expansion_192b: + pshufd $0b01010101, %xmm1, %xmm1 + shufps $0b00010000, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + shufps $0b10001100, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + pxor %xmm1, %xmm0 + + movaps %xmm2, %xmm5 + pslldq $4, %xmm5 + pshufd $0b11111111, %xmm0, %xmm3 + pxor %xmm3, %xmm2 + pxor %xmm5, %xmm2 + + movaps %xmm0, (%rcx) + add $0x10, %rcx + ret + +_key_expansion_256b: + pshufd $0b10101010, %xmm1, %xmm1 + shufps $0b00010000, %xmm2, %xmm4 + pxor %xmm4, %xmm2 + shufps $0b10001100, %xmm2, %xmm4 + pxor %xmm4, %xmm2 + pxor %xmm1, %xmm2 + movaps %xmm2, (%rcx) + add $0x10, %rcx + ret + +/* + * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, + * unsigned int key_len) + */ +ENTRY(aesni_set_key) + movups (%rsi), %xmm0 # user key (first 16 bytes) + movaps %xmm0, (%rdi) + lea 0x10(%rdi), %rcx # key addr + movl %edx, 480(%rdi) + pxor %xmm4, %xmm4 # xmm4 is assumed 0 in _key_expansion_x + cmp $24, %dl + jb .Lenc_key128 + je .Lenc_key192 + movups 0x10(%rsi), %xmm2 # other user key + movaps %xmm2, (%rcx) + add $0x10, %rcx + # aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01 + call _key_expansion_256a + # aeskeygenassist $0x1, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01 + call _key_expansion_256b + # aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02 + call _key_expansion_256a + # aeskeygenassist $0x2, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02 + call _key_expansion_256b + # aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04 + call _key_expansion_256a + # aeskeygenassist $0x4, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04 + call _key_expansion_256b + # aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08 + call _key_expansion_256a + # aeskeygenassist $0x8, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08 + call _key_expansion_256b + # aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10 + call _key_expansion_256a + # aeskeygenassist $0x10, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10 + call _key_expansion_256b + # aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20 + call _key_expansion_256a + # aeskeygenassist $0x20, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20 + call _key_expansion_256b + # aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40 + call _key_expansion_256a + jmp .Ldec_key +.Lenc_key192: + movq 0x10(%rsi), %xmm2 # other user key + # aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01 + call _key_expansion_192a + # aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02 + call _key_expansion_192b + # aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04 + call _key_expansion_192a + # aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08 + call _key_expansion_192b + # aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10 + call _key_expansion_192a + # aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20 + call _key_expansion_192b + # aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40 + call _key_expansion_192a + # aeskeygenassist $0x80, %xmm2, %xmm1 # round 8 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x80 + call _key_expansion_192b + jmp .Ldec_key +.Lenc_key128: + # aeskeygenassist $0x1, %xmm0, %xmm1 # round 1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01 + call _key_expansion_128 + # aeskeygenassist $0x2, %xmm0, %xmm1 # round 2 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02 + call _key_expansion_128 + # aeskeygenassist $0x4, %xmm0, %xmm1 # round 3 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04 + call _key_expansion_128 + # aeskeygenassist $0x8, %xmm0, %xmm1 # round 4 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08 + call _key_expansion_128 + # aeskeygenassist $0x10, %xmm0, %xmm1 # round 5 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10 + call _key_expansion_128 + # aeskeygenassist $0x20, %xmm0, %xmm1 # round 6 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20 + call _key_expansion_128 + # aeskeygenassist $0x40, %xmm0, %xmm1 # round 7 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x40 + call _key_expansion_128 + # aeskeygenassist $0x80, %xmm0, %xmm1 # round 8 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x80 + call _key_expansion_128 + # aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x1b + call _key_expansion_128 + # aeskeygenassist $0x36, %xmm0, %xmm1 # round 10 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x36 + call _key_expansion_128 +.Ldec_key: + sub $0x10, %rcx + movaps (%rdi), %xmm0 + movaps (%rcx), %xmm1 + movaps %xmm0, 240(%rcx) + movaps %xmm1, 240(%rdi) + add $0x10, %rdi + lea 240-16(%rcx), %rsi +.align 4 +.Ldec_key_loop: + movaps (%rdi), %xmm0 + # aesimc %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x38, 0xdb, 0xc8 + movaps %xmm1, (%rsi) + add $0x10, %rdi + sub $0x10, %rsi + cmp %rcx, %rdi + jb .Ldec_key_loop + xor %rax, %rax + ret + +/* + * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) + */ +ENTRY(aesni_enc) + movl 480(KEYP), KLEN # key length + movups (INP), STATE # input + call _aesni_enc1 + movups STATE, (OUTP) # output + ret + +/* + * _aesni_enc1: internal ABI + * input: + * KEYP: key struct pointer + * KLEN: round count + * STATE: initial state (input) + * output: + * STATE: finial state (output) + * changed: + * KEY + * TKEYP (T1) + */ +_aesni_enc1: + movaps (KEYP), KEY # key + mov KEYP, TKEYP + pxor KEY, STATE # round 0 + add $0x30, TKEYP + cmp $24, KLEN + jb .Lenc128 + lea 0x20(TKEYP), TKEYP + je .Lenc192 + add $0x20, TKEYP + movaps -0x60(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps -0x50(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 +.align 4 +.Lenc192: + movaps -0x40(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps -0x30(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 +.align 4 +.Lenc128: + movaps -0x20(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps -0x10(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps (TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x10(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x20(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x30(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x40(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x50(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x60(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x70(TKEYP), KEY + # aesenclast KEY, STATE # last round + .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2 + ret + +/* + * _aesni_enc4: internal ABI + * input: + * KEYP: key struct pointer + * KLEN: round count + * STATE1: initial state (input) + * STATE2 + * STATE3 + * STATE4 + * output: + * STATE1: finial state (output) + * STATE2 + * STATE3 + * STATE4 + * changed: + * KEY + * TKEYP (T1) + */ +_aesni_enc4: + movaps (KEYP), KEY # key + mov KEYP, TKEYP + pxor KEY, STATE1 # round 0 + pxor KEY, STATE2 + pxor KEY, STATE3 + pxor KEY, STATE4 + add $0x30, TKEYP + cmp $24, KLEN + jb .L4enc128 + lea 0x20(TKEYP), TKEYP + je .L4enc192 + add $0x20, TKEYP + movaps -0x60(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps -0x50(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 +#.align 4 +.L4enc192: + movaps -0x40(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps -0x30(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 +#.align 4 +.L4enc128: + movaps -0x20(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps -0x10(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps (TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x10(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x20(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x30(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x40(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x50(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x60(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x70(TKEYP), KEY + # aesenclast KEY, STATE1 # last round + .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2 + # aesenclast KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdd, 0xe2 + # aesenclast KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdd, 0xea + # aesenclast KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2 + ret + +/* + * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) + */ +ENTRY(aesni_dec) + mov 480(KEYP), KLEN # key length + add $240, KEYP + movups (INP), STATE # input + call _aesni_dec1 + movups STATE, (OUTP) #output + ret + +/* + * _aesni_dec1: internal ABI + * input: + * KEYP: key struct pointer + * KLEN: key length + * STATE: initial state (input) + * output: + * STATE: finial state (output) + * changed: + * KEY + * TKEYP (T1) + */ +_aesni_dec1: + movaps (KEYP), KEY # key + mov KEYP, TKEYP + pxor KEY, STATE # round 0 + add $0x30, TKEYP + cmp $24, KLEN + jb .Ldec128 + lea 0x20(TKEYP), TKEYP + je .Ldec192 + add $0x20, TKEYP + movaps -0x60(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps -0x50(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 +.align 4 +.Ldec192: + movaps -0x40(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps -0x30(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 +.align 4 +.Ldec128: + movaps -0x20(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps -0x10(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps (TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x10(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x20(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x30(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x40(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x50(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x60(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x70(TKEYP), KEY + # aesdeclast KEY, STATE # last round + .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2 + ret + +/* + * _aesni_dec4: internal ABI + * input: + * KEYP: key struct pointer + * KLEN: key length + * STATE1: initial state (input) + * STATE2 + * STATE3 + * STATE4 + * output: + * STATE1: finial state (output) + * STATE2 + * STATE3 + * STATE4 + * changed: + * KEY + * TKEYP (T1) + */ +_aesni_dec4: + movaps (KEYP), KEY # key + mov KEYP, TKEYP + pxor KEY, STATE1 # round 0 + pxor KEY, STATE2 + pxor KEY, STATE3 + pxor KEY, STATE4 + add $0x30, TKEYP + cmp $24, KLEN + jb .L4dec128 + lea 0x20(TKEYP), TKEYP + je .L4dec192 + add $0x20, TKEYP + movaps -0x60(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps -0x50(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 +.align 4 +.L4dec192: + movaps -0x40(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps -0x30(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 +.align 4 +.L4dec128: + movaps -0x20(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps -0x10(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps (TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x10(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x20(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x30(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x40(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x50(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x60(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x70(TKEYP), KEY + # aesdeclast KEY, STATE1 # last round + .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2 + # aesdeclast KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdf, 0xe2 + # aesdeclast KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdf, 0xea + # aesdeclast KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2 + ret + +/* + * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len) + */ +ENTRY(aesni_ecb_enc) + test LEN, LEN # check length + jz .Lecb_enc_ret + mov 480(KEYP), KLEN + cmp $16, LEN + jb .Lecb_enc_ret + cmp $64, LEN + jb .Lecb_enc_loop1 +.align 4 +.Lecb_enc_loop4: + movups (INP), STATE1 + movups 0x10(INP), STATE2 + movups 0x20(INP), STATE3 + movups 0x30(INP), STATE4 + call _aesni_enc4 + movups STATE1, (OUTP) + movups STATE2, 0x10(OUTP) + movups STATE3, 0x20(OUTP) + movups STATE4, 0x30(OUTP) + sub $64, LEN + add $64, INP + add $64, OUTP + cmp $64, LEN + jge .Lecb_enc_loop4 + cmp $16, LEN + jb .Lecb_enc_ret +.align 4 +.Lecb_enc_loop1: + movups (INP), STATE1 + call _aesni_enc1 + movups STATE1, (OUTP) + sub $16, LEN + add $16, INP + add $16, OUTP + cmp $16, LEN + jge .Lecb_enc_loop1 +.Lecb_enc_ret: + ret + +/* + * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len); + */ +ENTRY(aesni_ecb_dec) + test LEN, LEN + jz .Lecb_dec_ret + mov 480(KEYP), KLEN + add $240, KEYP + cmp $16, LEN + jb .Lecb_dec_ret + cmp $64, LEN + jb .Lecb_dec_loop1 +.align 4 +.Lecb_dec_loop4: + movups (INP), STATE1 + movups 0x10(INP), STATE2 + movups 0x20(INP), STATE3 + movups 0x30(INP), STATE4 + call _aesni_dec4 + movups STATE1, (OUTP) + movups STATE2, 0x10(OUTP) + movups STATE3, 0x20(OUTP) + movups STATE4, 0x30(OUTP) + sub $64, LEN + add $64, INP + add $64, OUTP + cmp $64, LEN + jge .Lecb_dec_loop4 + cmp $16, LEN + jb .Lecb_dec_ret +.align 4 +.Lecb_dec_loop1: + movups (INP), STATE1 + call _aesni_dec1 + movups STATE1, (OUTP) + sub $16, LEN + add $16, INP + add $16, OUTP + cmp $16, LEN + jge .Lecb_dec_loop1 +.Lecb_dec_ret: + ret + +/* + * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len, u8 *iv) + */ +ENTRY(aesni_cbc_enc) + cmp $16, LEN + jb .Lcbc_enc_ret + mov 480(KEYP), KLEN + movups (IVP), STATE # load iv as initial state +.align 4 +.Lcbc_enc_loop: + movups (INP), IN # load input + pxor IN, STATE + call _aesni_enc1 + movups STATE, (OUTP) # store output + sub $16, LEN + add $16, INP + add $16, OUTP + cmp $16, LEN + jge .Lcbc_enc_loop + movups STATE, (IVP) +.Lcbc_enc_ret: + ret + +/* + * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len, u8 *iv) + */ +ENTRY(aesni_cbc_dec) + cmp $16, LEN + jb .Lcbc_dec_ret + mov 480(KEYP), KLEN + add $240, KEYP + movups (IVP), IV + cmp $64, LEN + jb .Lcbc_dec_loop1 +.align 4 +.Lcbc_dec_loop4: + movups (INP), IN1 + movaps IN1, STATE1 + movups 0x10(INP), IN2 + movaps IN2, STATE2 + movups 0x20(INP), IN3 + movaps IN3, STATE3 + movups 0x30(INP), IN4 + movaps IN4, STATE4 + call _aesni_dec4 + pxor IV, STATE1 + pxor IN1, STATE2 + pxor IN2, STATE3 + pxor IN3, STATE4 + movaps IN4, IV + movups STATE1, (OUTP) + movups STATE2, 0x10(OUTP) + movups STATE3, 0x20(OUTP) + movups STATE4, 0x30(OUTP) + sub $64, LEN + add $64, INP + add $64, OUTP + cmp $64, LEN + jge .Lcbc_dec_loop4 + cmp $16, LEN + jb .Lcbc_dec_ret +.align 4 +.Lcbc_dec_loop1: + movups (INP), IN + movaps IN, STATE + call _aesni_dec1 + pxor IV, STATE + movups STATE, (OUTP) + movaps IN, IV + sub $16, LEN + add $16, INP + add $16, OUTP + cmp $16, LEN + jge .Lcbc_dec_loop1 + movups IV, (IVP) +.Lcbc_dec_ret: + ret diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c new file mode 100644 index 00000000000..02af0af6549 --- /dev/null +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -0,0 +1,461 @@ +/* + * Support for Intel AES-NI instructions. This file contains glue + * code, the real AES implementation is in intel-aes_asm.S. + * + * Copyright (C) 2008, Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct async_aes_ctx { + struct cryptd_ablkcipher *cryptd_tfm; +}; + +#define AESNI_ALIGN 16 +#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) + +asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len); +asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in); +asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in); +asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len); +asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len); +asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); +asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); + +static inline int kernel_fpu_using(void) +{ + if (in_interrupt() && !(read_cr0() & X86_CR0_TS)) + return 1; + return 0; +} + +static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) +{ + unsigned long addr = (unsigned long)raw_ctx; + unsigned long align = AESNI_ALIGN; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct crypto_aes_ctx *)ALIGN(addr, align); +} + +static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, + const u8 *in_key, unsigned int key_len) +{ + struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); + u32 *flags = &tfm->crt_flags; + int err; + + if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && + key_len != AES_KEYSIZE_256) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + if (kernel_fpu_using()) + err = crypto_aes_expand_key(ctx, in_key, key_len); + else { + kernel_fpu_begin(); + err = aesni_set_key(ctx, in_key, key_len); + kernel_fpu_end(); + } + + return err; +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); +} + +static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); + + if (kernel_fpu_using()) + crypto_aes_encrypt_x86(ctx, dst, src); + else { + kernel_fpu_begin(); + aesni_enc(ctx, dst, src); + kernel_fpu_end(); + } +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); + + if (kernel_fpu_using()) + crypto_aes_decrypt_x86(ctx, dst, src); + else { + kernel_fpu_begin(); + aesni_dec(ctx, dst, src); + kernel_fpu_end(); + } +} + +static struct crypto_alg aesni_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-aesni", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt + } + } +}; + +static int ecb_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + kernel_fpu_end(); + + return err; +} + +static int ecb_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + kernel_fpu_end(); + + return err; +} + +static struct crypto_alg blk_ecb_alg = { + .cra_name = "__ecb-aes-aesni", + .cra_driver_name = "__driver-ecb-aes-aesni", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, + .cra_alignmask = 0, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, + }, +}; + +static int cbc_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + kernel_fpu_end(); + + return err; +} + +static int cbc_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + kernel_fpu_end(); + + return err; +} + +static struct crypto_alg blk_cbc_alg = { + .cra_name = "__cbc-aes-aesni", + .cra_driver_name = "__driver-cbc-aes-aesni", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, + .cra_alignmask = 0, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, + }, +}; + +static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len); +} + +static int ablk_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (kernel_fpu_using()) { + struct ablkcipher_request *cryptd_req = + ablkcipher_request_ctx(req); + memcpy(cryptd_req, req, sizeof(*req)); + ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + return crypto_ablkcipher_encrypt(cryptd_req); + } else { + struct blkcipher_desc desc; + desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); + desc.info = req->info; + desc.flags = 0; + return crypto_blkcipher_crt(desc.tfm)->encrypt( + &desc, req->dst, req->src, req->nbytes); + } +} + +static int ablk_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (kernel_fpu_using()) { + struct ablkcipher_request *cryptd_req = + ablkcipher_request_ctx(req); + memcpy(cryptd_req, req, sizeof(*req)); + ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + return crypto_ablkcipher_decrypt(cryptd_req); + } else { + struct blkcipher_desc desc; + desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); + desc.info = req->info; + desc.flags = 0; + return crypto_blkcipher_crt(desc.tfm)->decrypt( + &desc, req->dst, req->src, req->nbytes); + } +} + +static void ablk_exit(struct crypto_tfm *tfm) +{ + struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + cryptd_free_ablkcipher(ctx->cryptd_tfm); +} + +static void ablk_init_common(struct crypto_tfm *tfm, + struct cryptd_ablkcipher *cryptd_tfm) +{ + struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->cryptd_tfm = cryptd_tfm; + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(&cryptd_tfm->base); +} + +static int ablk_ecb_init(struct crypto_tfm *tfm) +{ + struct cryptd_ablkcipher *cryptd_tfm; + + cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + ablk_init_common(tfm, cryptd_tfm); + return 0; +} + +static struct crypto_alg ablk_ecb_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-aesni", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct async_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list), + .cra_init = ablk_ecb_init, + .cra_exit = ablk_exit, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = ablk_set_key, + .encrypt = ablk_encrypt, + .decrypt = ablk_decrypt, + }, + }, +}; + +static int ablk_cbc_init(struct crypto_tfm *tfm) +{ + struct cryptd_ablkcipher *cryptd_tfm; + + cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + ablk_init_common(tfm, cryptd_tfm); + return 0; +} + +static struct crypto_alg ablk_cbc_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-aesni", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct async_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list), + .cra_init = ablk_cbc_init, + .cra_exit = ablk_exit, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ablk_set_key, + .encrypt = ablk_encrypt, + .decrypt = ablk_decrypt, + }, + }, +}; + +static int __init aesni_init(void) +{ + int err; + + if (!cpu_has_aes) { + printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); + return -ENODEV; + } + if ((err = crypto_register_alg(&aesni_alg))) + goto aes_err; + if ((err = crypto_register_alg(&blk_ecb_alg))) + goto blk_ecb_err; + if ((err = crypto_register_alg(&blk_cbc_alg))) + goto blk_cbc_err; + if ((err = crypto_register_alg(&ablk_ecb_alg))) + goto ablk_ecb_err; + if ((err = crypto_register_alg(&ablk_cbc_alg))) + goto ablk_cbc_err; + + return err; + +ablk_cbc_err: + crypto_unregister_alg(&ablk_ecb_alg); +ablk_ecb_err: + crypto_unregister_alg(&blk_cbc_alg); +blk_cbc_err: + crypto_unregister_alg(&blk_ecb_alg); +blk_ecb_err: + crypto_unregister_alg(&aesni_alg); +aes_err: + return err; +} + +static void __exit aesni_exit(void) +{ + crypto_unregister_alg(&ablk_cbc_alg); + crypto_unregister_alg(&ablk_ecb_alg); + crypto_unregister_alg(&blk_cbc_alg); + crypto_unregister_alg(&blk_ecb_alg); + crypto_unregister_alg(&aesni_alg); +} + +module_init(aesni_init); +module_exit(aesni_exit); + +MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("aes"); diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 7301e60dc4a..0beba0d1468 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -213,6 +213,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) +#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) -- cgit v1.2.3 From 563f346d04e8373739240604a51ce8529dd9f07e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 18 Jan 2009 20:33:33 +1100 Subject: crypto: sha-s390 - Switch to shash This patch converts the S390 sha algorithms to the new shash interface. With fixes by Jan Glauber. Signed-off-by: Herbert Xu --- arch/s390/crypto/sha.h | 6 ++-- arch/s390/crypto/sha1_s390.c | 40 +++++++++++---------- arch/s390/crypto/sha256_s390.c | 40 +++++++++++---------- arch/s390/crypto/sha512_s390.c | 80 ++++++++++++++++++++++-------------------- arch/s390/crypto/sha_common.c | 20 ++++++----- 5 files changed, 100 insertions(+), 86 deletions(-) (limited to 'arch') diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h index 1ceafa571ea..f4e9dc71675 100644 --- a/arch/s390/crypto/sha.h +++ b/arch/s390/crypto/sha.h @@ -29,7 +29,9 @@ struct s390_sha_ctx { int func; /* KIMD function to use */ }; -void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len); -void s390_sha_final(struct crypto_tfm *tfm, u8 *out); +struct shash_desc; + +int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len); +int s390_sha_final(struct shash_desc *desc, u8 *out); #endif diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index b3cb5a89b00..e85ba348722 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -23,17 +23,17 @@ * any later version. * */ +#include #include #include -#include #include #include "crypt_s390.h" #include "sha.h" -static void sha1_init(struct crypto_tfm *tfm) +static int sha1_init(struct shash_desc *desc) { - struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm); + struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA1_H0; sctx->state[1] = SHA1_H1; @@ -42,34 +42,36 @@ static void sha1_init(struct crypto_tfm *tfm) sctx->state[4] = SHA1_H4; sctx->count = 0; sctx->func = KIMD_SHA_1; + + return 0; } -static struct crypto_alg alg = { - .cra_name = "sha1", - .cra_driver_name= "sha1-s390", - .cra_priority = CRYPT_S390_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_sha_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA1_DIGEST_SIZE, - .dia_init = sha1_init, - .dia_update = s390_sha_update, - .dia_final = s390_sha_final } } +static struct shash_alg alg = { + .digestsize = SHA1_DIGEST_SIZE, + .init = sha1_init, + .update = s390_sha_update, + .final = s390_sha_final, + .descsize = sizeof(struct s390_sha_ctx), + .base = { + .cra_name = "sha1", + .cra_driver_name= "sha1-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init sha1_s390_init(void) { if (!crypt_s390_func_available(KIMD_SHA_1)) return -EOPNOTSUPP; - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit sha1_s390_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(sha1_s390_init); diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 19c03fb6ba7..f9fefc56963 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -16,17 +16,17 @@ * any later version. * */ +#include #include #include -#include #include #include "crypt_s390.h" #include "sha.h" -static void sha256_init(struct crypto_tfm *tfm) +static int sha256_init(struct shash_desc *desc) { - struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm); + struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA256_H0; sctx->state[1] = SHA256_H1; @@ -38,22 +38,24 @@ static void sha256_init(struct crypto_tfm *tfm) sctx->state[7] = SHA256_H7; sctx->count = 0; sctx->func = KIMD_SHA_256; + + return 0; } -static struct crypto_alg alg = { - .cra_name = "sha256", - .cra_driver_name = "sha256-s390", - .cra_priority = CRYPT_S390_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_sha_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA256_DIGEST_SIZE, - .dia_init = sha256_init, - .dia_update = s390_sha_update, - .dia_final = s390_sha_final } } +static struct shash_alg alg = { + .digestsize = SHA256_DIGEST_SIZE, + .init = sha256_init, + .update = s390_sha_update, + .final = s390_sha_final, + .descsize = sizeof(struct s390_sha_ctx), + .base = { + .cra_name = "sha256", + .cra_driver_name= "sha256-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int sha256_s390_init(void) @@ -61,12 +63,12 @@ static int sha256_s390_init(void) if (!crypt_s390_func_available(KIMD_SHA_256)) return -EOPNOTSUPP; - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit sha256_s390_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(sha256_s390_init); diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 23c7861f6ae..420acf41b72 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -12,16 +12,16 @@ * any later version. * */ +#include #include #include -#include #include "sha.h" #include "crypt_s390.h" -static void sha512_init(struct crypto_tfm *tfm) +static int sha512_init(struct shash_desc *desc) { - struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_sha_ctx *ctx = shash_desc_ctx(desc); *(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL; *(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL; @@ -33,29 +33,31 @@ static void sha512_init(struct crypto_tfm *tfm) *(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL; ctx->count = 0; ctx->func = KIMD_SHA_512; + + return 0; } -static struct crypto_alg sha512_alg = { - .cra_name = "sha512", - .cra_driver_name = "sha512-s390", - .cra_priority = CRYPT_S390_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_sha_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(sha512_alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA512_DIGEST_SIZE, - .dia_init = sha512_init, - .dia_update = s390_sha_update, - .dia_final = s390_sha_final } } +static struct shash_alg sha512_alg = { + .digestsize = SHA512_DIGEST_SIZE, + .init = sha512_init, + .update = s390_sha_update, + .final = s390_sha_final, + .descsize = sizeof(struct s390_sha_ctx), + .base = { + .cra_name = "sha512", + .cra_driver_name= "sha512-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; MODULE_ALIAS("sha512"); -static void sha384_init(struct crypto_tfm *tfm) +static int sha384_init(struct shash_desc *desc) { - struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_sha_ctx *ctx = shash_desc_ctx(desc); *(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL; *(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL; @@ -67,22 +69,24 @@ static void sha384_init(struct crypto_tfm *tfm) *(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL; ctx->count = 0; ctx->func = KIMD_SHA_512; + + return 0; } -static struct crypto_alg sha384_alg = { - .cra_name = "sha384", - .cra_driver_name = "sha384-s390", - .cra_priority = CRYPT_S390_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_sha_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(sha384_alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA384_DIGEST_SIZE, - .dia_init = sha384_init, - .dia_update = s390_sha_update, - .dia_final = s390_sha_final } } +static struct shash_alg sha384_alg = { + .digestsize = SHA384_DIGEST_SIZE, + .init = sha384_init, + .update = s390_sha_update, + .final = s390_sha_final, + .descsize = sizeof(struct s390_sha_ctx), + .base = { + .cra_name = "sha384", + .cra_driver_name= "sha384-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_ctxsize = sizeof(struct s390_sha_ctx), + .cra_module = THIS_MODULE, + } }; MODULE_ALIAS("sha384"); @@ -93,18 +97,18 @@ static int __init init(void) if (!crypt_s390_func_available(KIMD_SHA_512)) return -EOPNOTSUPP; - if ((ret = crypto_register_alg(&sha512_alg)) < 0) + if ((ret = crypto_register_shash(&sha512_alg)) < 0) goto out; - if ((ret = crypto_register_alg(&sha384_alg)) < 0) - crypto_unregister_alg(&sha512_alg); + if ((ret = crypto_register_shash(&sha384_alg)) < 0) + crypto_unregister_shash(&sha512_alg); out: return ret; } static void __exit fini(void) { - crypto_unregister_alg(&sha512_alg); - crypto_unregister_alg(&sha384_alg); + crypto_unregister_shash(&sha512_alg); + crypto_unregister_shash(&sha384_alg); } module_init(init); diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index 9d6eb8c3d37..7903ec47e6b 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c @@ -13,14 +13,14 @@ * */ -#include +#include #include "sha.h" #include "crypt_s390.h" -void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) +int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); - unsigned int bsize = crypto_tfm_alg_blocksize(tfm); + struct s390_sha_ctx *ctx = shash_desc_ctx(desc); + unsigned int bsize = crypto_shash_blocksize(desc->tfm); unsigned int index; int ret; @@ -51,13 +51,15 @@ void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) store: if (len) memcpy(ctx->buf + index , data, len); + + return 0; } EXPORT_SYMBOL_GPL(s390_sha_update); -void s390_sha_final(struct crypto_tfm *tfm, u8 *out) +int s390_sha_final(struct shash_desc *desc, u8 *out) { - struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); - unsigned int bsize = crypto_tfm_alg_blocksize(tfm); + struct s390_sha_ctx *ctx = shash_desc_ctx(desc); + unsigned int bsize = crypto_shash_blocksize(desc->tfm); u64 bits; unsigned int index, end, plen; int ret; @@ -87,9 +89,11 @@ void s390_sha_final(struct crypto_tfm *tfm, u8 *out) BUG_ON(ret != end); /* copy digest to out */ - memcpy(out, ctx->state, crypto_hash_digestsize(crypto_hash_cast(tfm))); + memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); /* wipe context */ memset(ctx, 0, sizeof *ctx); + + return 0; } EXPORT_SYMBOL_GPL(s390_sha_final); -- cgit v1.2.3 From 049359d655277c382683a6030ae0bac485568ffc Mon Sep 17 00:00:00 2001 From: James Hsiao Date: Thu, 5 Feb 2009 16:18:13 +1100 Subject: crypto: amcc - Add crypt4xx driver This patch adds support for AMCC ppc4xx security device driver. This is the initial release that includes the driver framework with AES and SHA1 algorithms support. The remaining algorithms will be released in the near future. Signed-off-by: James Hsiao Signed-off-by: Herbert Xu --- arch/powerpc/boot/dts/canyonlands.dts | 7 +++++++ arch/powerpc/boot/dts/kilauea.dts | 7 +++++++ 2 files changed, 14 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts index 8b5ba8261a3..4447def69dc 100644 --- a/arch/powerpc/boot/dts/canyonlands.dts +++ b/arch/powerpc/boot/dts/canyonlands.dts @@ -127,6 +127,13 @@ dcr-reg = <0x010 0x002>; }; + CRYPTO: crypto@180000 { + compatible = "amcc,ppc460ex-crypto", "amcc,ppc4xx-crypto"; + reg = <4 0x00180000 0x80400>; + interrupt-parent = <&UIC0>; + interrupts = <0x1d 0x4>; + }; + MAL0: mcmal { compatible = "ibm,mcmal-460ex", "ibm,mcmal2"; dcr-reg = <0x180 0x062>; diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts index 2804444812e..5e6b08ff6f6 100644 --- a/arch/powerpc/boot/dts/kilauea.dts +++ b/arch/powerpc/boot/dts/kilauea.dts @@ -97,6 +97,13 @@ 0x6 0x4>; /* ECC SEC Error */ }; + CRYPTO: crypto@ef700000 { + compatible = "amcc,ppc405ex-crypto", "amcc,ppc4xx-crypto"; + reg = <0xef700000 0x80400>; + interrupt-parent = <&UIC0>; + interrupts = <0x17 0x2>; + }; + MAL0: mcmal { compatible = "ibm,mcmal-405ex", "ibm,mcmal2"; dcr-reg = <0x180 0x062>; -- cgit v1.2.3 From 74019224ac34b044b44a31dd89a54e3477db4896 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 18 Feb 2009 12:23:29 +0100 Subject: timers: add mod_timer_pending() Impact: new timer API Based on an idea from Martin Josefsson with the help of Patrick McHardy and Stephen Hemminger: introduce the mod_timer_pending() API which is a mod_timer() offspring that is an invariant on already removed timers. (regular mod_timer() re-activates non-pending timers.) This is useful for the networking code in that it can allow unserialized mod_timer_pending() timer-forwarding calls, but a single del_timer*() will stop the timer from being reactivated again. Also while at it: - optimize the regular mod_timer() path some more, the timer-stat and a debug check was needlessly duplicated in __mod_timer(). - make the exports come straight after the function, as most other exports in timer.c already did. - eliminate __mod_timer() as an external API, change the users to mod_timer(). The regular mod_timer() code path is not impacted significantly, due to inlining optimizations and due to the simplifications. Based-on-patch-from: Stephen Hemminger Acked-by: Stephen Hemminger Cc: "David S. Miller" Cc: Patrick McHardy Cc: netdev@vger.kernel.org Cc: Oleg Nesterov Cc: Andrew Morton Signed-off-by: Ingo Molnar --- arch/powerpc/platforms/cell/spufs/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 6a0ad196aeb..f085369301b 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -508,7 +508,7 @@ static void __spu_add_to_rq(struct spu_context *ctx) list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); set_bit(ctx->prio, spu_prio->bitmap); if (!spu_prio->nr_waiting++) - __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); + mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); } } -- cgit v1.2.3 From de5483029b8f18e23395d8fd4f4ef6ae15beb809 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Thu, 19 Feb 2009 12:20:17 +0530 Subject: x86: include/asm/processor.h remove double declaration of print_cpu_info Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/processor.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index dabab1a19dd..72914d0315e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -403,7 +403,6 @@ DECLARE_PER_CPU(unsigned long, stack_canary); #endif #endif /* X86_64 */ -extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int xstate_size; extern void free_thread_xstate(struct task_struct *); extern struct kmem_cache *task_xstate_cachep; -- cgit v1.2.3 From 71d8f9784a99991a7571dd20226f5f450dda7f34 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 18 Feb 2009 11:07:52 -0800 Subject: x86: syscalls.h: remove asmlinkage from declaration of sys_rt_sigreturn() Impact: cleanup asmlinkage for sys_rt_sigreturn() no longer exists in arch/x86/kernel/signal.c. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/include/asm/syscalls.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 258ef730aaa..7043408f690 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -82,7 +82,7 @@ asmlinkage long sys_iopl(unsigned int, struct pt_regs *); /* kernel/signal_64.c */ asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, struct pt_regs *); -asmlinkage long sys_rt_sigreturn(struct pt_regs *); +long sys_rt_sigreturn(struct pt_regs *); /* kernel/sys_x86_64.c */ asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, -- cgit v1.2.3 From 95695547a7db44b88a7ee36cf5df188de267e99e Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sat, 14 Feb 2009 00:50:18 +0300 Subject: x86: asm linkage - introduce GLOBAL macro If the code is time critical and this entry is called from other places we use ENTRY to have it globally defined and especially aligned. Contrary we have some snippets which are size critical. So we use plane ".globl name; name:" directive. Introduce GLOBAL macro for this. Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/include/asm/linkage.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 5d98d0b68ff..2ecf0f6fc9e 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -52,6 +52,10 @@ #endif +#define GLOBAL(name) \ + .globl name; \ + name: + #ifdef CONFIG_X86_ALIGNMENT_16 #define __ALIGN .align 16,0x90 #define __ALIGN_STR ".align 16,0x90" -- cgit v1.2.3 From 1b25f3b4e18d1acffeb41258a18f13db71da9a7a Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sat, 14 Feb 2009 00:50:19 +0300 Subject: x86: linkage - get rid of _X86 macros Impact: cleanup There was an attempt to bring build-time checking for missed ENTRY_X86/END_X86 and KPROBE... pairs. Using them will add messy in code. Get just rid of them. This commit could be easily restored if the need appear in future. Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/include/asm/linkage.h | 60 ------------------------------------------ 1 file changed, 60 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 2ecf0f6fc9e..9320e2a8a26 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -61,65 +61,5 @@ #define __ALIGN_STR ".align 16,0x90" #endif -/* - * to check ENTRY_X86/END_X86 and - * KPROBE_ENTRY_X86/KPROBE_END_X86 - * unbalanced-missed-mixed appearance - */ -#define __set_entry_x86 .set ENTRY_X86_IN, 0 -#define __unset_entry_x86 .set ENTRY_X86_IN, 1 -#define __set_kprobe_x86 .set KPROBE_X86_IN, 0 -#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1 - -#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed" - -#define __check_entry_x86 \ - .ifdef ENTRY_X86_IN; \ - .ifeq ENTRY_X86_IN; \ - __macro_err_x86; \ - .abort; \ - .endif; \ - .endif - -#define __check_kprobe_x86 \ - .ifdef KPROBE_X86_IN; \ - .ifeq KPROBE_X86_IN; \ - __macro_err_x86; \ - .abort; \ - .endif; \ - .endif - -#define __check_entry_kprobe_x86 \ - __check_entry_x86; \ - __check_kprobe_x86 - -#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86 - -#define ENTRY_X86(name) \ - __check_entry_kprobe_x86; \ - __set_entry_x86; \ - .globl name; \ - __ALIGN; \ - name: - -#define END_X86(name) \ - __unset_entry_x86; \ - __check_entry_kprobe_x86; \ - .size name, .-name - -#define KPROBE_ENTRY_X86(name) \ - __check_entry_kprobe_x86; \ - __set_kprobe_x86; \ - .pushsection .kprobes.text, "ax"; \ - .globl name; \ - __ALIGN; \ - name: - -#define KPROBE_END_X86(name) \ - __unset_kprobe_x86; \ - __check_entry_kprobe_x86; \ - .size name, .-name; \ - .popsection - #endif /* _ASM_X86_LINKAGE_H */ -- cgit v1.2.3 From 2f7955509710fd378a1ac96e19d29d5a0e3301fd Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sat, 14 Feb 2009 00:50:20 +0300 Subject: x86: copy.S - use GLOBAL,ENDPROC macros Impact: cleanup Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/boot/copy.S | 40 ++++++++++++++-------------------------- 1 file changed, 14 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S index ef50c84e8b4..11f272c6f5e 100644 --- a/arch/x86/boot/copy.S +++ b/arch/x86/boot/copy.S @@ -8,6 +8,8 @@ * * ----------------------------------------------------------------------- */ +#include + /* * Memory copy routines */ @@ -15,9 +17,7 @@ .code16gcc .text - .globl memcpy - .type memcpy, @function -memcpy: +GLOBAL(memcpy) pushw %si pushw %di movw %ax, %di @@ -31,11 +31,9 @@ memcpy: popw %di popw %si ret - .size memcpy, .-memcpy +ENDPROC(memcpy) - .globl memset - .type memset, @function -memset: +GLOBAL(memset) pushw %di movw %ax, %di movzbl %dl, %eax @@ -48,52 +46,42 @@ memset: rep; stosb popw %di ret - .size memset, .-memset +ENDPROC(memset) - .globl copy_from_fs - .type copy_from_fs, @function -copy_from_fs: +GLOBAL(copy_from_fs) pushw %ds pushw %fs popw %ds call memcpy popw %ds ret - .size copy_from_fs, .-copy_from_fs +ENDPROC(copy_from_fs) - .globl copy_to_fs - .type copy_to_fs, @function -copy_to_fs: +GLOBAL(copy_to_fs) pushw %es pushw %fs popw %es call memcpy popw %es ret - .size copy_to_fs, .-copy_to_fs +ENDPROC(copy_to_fs) #if 0 /* Not currently used, but can be enabled as needed */ - - .globl copy_from_gs - .type copy_from_gs, @function -copy_from_gs: +GLOBAL(copy_from_gs) pushw %ds pushw %gs popw %ds call memcpy popw %ds ret - .size copy_from_gs, .-copy_from_gs - .globl copy_to_gs +ENDPROC(copy_from_gs) - .type copy_to_gs, @function -copy_to_gs: +GLOBAL(copy_to_gs) pushw %es pushw %gs popw %es call memcpy popw %es ret - .size copy_to_gs, .-copy_to_gs - +ENDPROC(copy_to_gs) #endif -- cgit v1.2.3 From 324bda9e47f53aebec1376ee89bba8128c8455e2 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sat, 14 Feb 2009 00:50:21 +0300 Subject: x86: pmjump - use GLOBAL,ENDPROC macros Impact: cleanup We are in setup stage so we use GLOBAL instead of ENTRY and do not increase code size. Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/boot/pmjump.S | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S index 141b6e20ed3..019c17a7585 100644 --- a/arch/x86/boot/pmjump.S +++ b/arch/x86/boot/pmjump.S @@ -15,18 +15,15 @@ #include #include #include +#include .text - - .globl protected_mode_jump - .type protected_mode_jump, @function - .code16 /* * void protected_mode_jump(u32 entrypoint, u32 bootparams); */ -protected_mode_jump: +GLOBAL(protected_mode_jump) movl %edx, %esi # Pointer to boot_params table xorl %ebx, %ebx @@ -47,12 +44,10 @@ protected_mode_jump: .byte 0x66, 0xea # ljmpl opcode 2: .long in_pm32 # offset .word __BOOT_CS # segment - - .size protected_mode_jump, .-protected_mode_jump +ENDPROC(protected_mode_jump) .code32 - .type in_pm32, @function -in_pm32: +GLOBAL(in_pm32) # Set up data segments for flat 32-bit mode movl %ecx, %ds movl %ecx, %es @@ -78,5 +73,4 @@ in_pm32: lldt %cx jmpl *%eax # Jump to the 32-bit entrypoint - - .size in_pm32, .-in_pm32 +ENDPROC(in_pm32) -- cgit v1.2.3 From 2d4eeecb98ade6a736940d43311275b7d32dab21 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sat, 14 Feb 2009 00:50:22 +0300 Subject: x86: compressed head_64 - use ENTRY,ENDPROC macros Impact: clenaup Linker script will put startup_32 at predefined address so using ENTRY will not bloat the code size. Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 1d5dff4123e..ba9229ba7f0 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -35,9 +35,7 @@ .section ".text.head" .code32 - .globl startup_32 - -startup_32: +ENTRY(startup_32) cld /* test KEEP_SEGMENTS flag to see if the bootloader is asking * us to not reload segments */ @@ -176,6 +174,7 @@ startup_32: /* Jump from 32bit compatibility mode into 64bit mode. */ lret +ENDPROC(startup_32) no_longmode: /* This isn't an x86-64 CPU so hang */ @@ -295,7 +294,6 @@ relocated: call decompress_kernel popq %rsi - /* * Jump to the decompressed kernel. */ -- cgit v1.2.3 From cb425afd2183e90a481bb211ff49361a117a3ecc Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sat, 14 Feb 2009 00:50:23 +0300 Subject: x86: compressed head_32 - use ENTRY,ENDPROC macros Impact: clenaup Linker script will put startup_32 at predefined address so using startup_32 will not bloat the code size. Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_32.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 29c5fbf0839..9f20d379406 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -30,9 +30,7 @@ #include .section ".text.head","ax",@progbits - .globl startup_32 - -startup_32: +ENTRY(startup_32) cld /* test KEEP_SEGMENTS flag to see if the bootloader is asking * us to not reload segments */ @@ -113,6 +111,8 @@ startup_32: */ leal relocated(%ebx), %eax jmp *%eax +ENDPROC(startup_32) + .section ".text" relocated: -- cgit v1.2.3 From 42f8faecf7a88371de0f30aebb052d1ae51762c0 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 17 Feb 2009 11:46:42 +0800 Subject: x86: use percpu data for 4k hardirq and softirq stacks Impact: economize memory for large NR_CPUS percpu data is setup earlier than irq, we can use percpu data to economize memory. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- arch/x86/kernel/irq_32.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index e0f29be8ab0..90cd94aba69 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -55,13 +56,13 @@ static inline void print_stack_overflow(void) { } union irq_ctx { struct thread_info tinfo; u32 stack[THREAD_SIZE/sizeof(u32)]; -}; +} __attribute__((aligned(PAGE_SIZE))); -static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; -static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; +static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); +static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); -static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; -static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; +static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack); +static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack); static void call_on_stack(void *func, void *stack) { @@ -81,7 +82,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) u32 *isp, arg1, arg2; curctx = (union irq_ctx *) current_thread_info(); - irqctx = hardirq_ctx[smp_processor_id()]; + irqctx = __get_cpu_var(hardirq_ctx); /* * this is where we switch to the IRQ stack. However, if we are @@ -125,34 +126,34 @@ void __cpuinit irq_ctx_init(int cpu) { union irq_ctx *irqctx; - if (hardirq_ctx[cpu]) + if (per_cpu(hardirq_ctx, cpu)) return; - irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; + irqctx = &per_cpu(hardirq_stack, cpu); irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); - hardirq_ctx[cpu] = irqctx; + per_cpu(hardirq_ctx, cpu) = irqctx; - irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE]; + irqctx = &per_cpu(softirq_stack, cpu); irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; irqctx->tinfo.preempt_count = 0; irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); - softirq_ctx[cpu] = irqctx; + per_cpu(softirq_ctx, cpu) = irqctx; printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", - cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); } void irq_ctx_exit(int cpu) { - hardirq_ctx[cpu] = NULL; + per_cpu(hardirq_ctx, cpu) = NULL; } asmlinkage void do_softirq(void) @@ -169,7 +170,7 @@ asmlinkage void do_softirq(void) if (local_softirq_pending()) { curctx = current_thread_info(); - irqctx = softirq_ctx[smp_processor_id()]; + irqctx = __get_cpu_var(softirq_ctx); irqctx->tinfo.task = curctx->task; irqctx->tinfo.previous_esp = current_stack_pointer; -- cgit v1.2.3 From b36128c830a8f5bd7d4981f5b0b69950f5928ee6 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 20 Feb 2009 16:29:08 +0900 Subject: alloc_percpu: change percpu_ptr to per_cpu_ptr Impact: cleanup There are two allocated per-cpu accessor macros with almost identical spelling. The original and far more popular is per_cpu_ptr (44 files), so change over the other 4 files. tj: kill percpu_ptr() and update UP too Signed-off-by: Rusty Russell Cc: mingo@redhat.com Cc: lenb@kernel.org Cc: cpufreq@vger.kernel.org Signed-off-by: Tejun Heo --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c..22590cf688a 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (!data) return -ENOMEM; - data->acpi_data = percpu_ptr(acpi_perf_data, cpu); + data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); per_cpu(drv_data, cpu) = data; if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) -- cgit v1.2.3 From f0aa6617903648077dffe5cfcf7c4458f4610fa7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 20 Feb 2009 16:29:08 +0900 Subject: vmalloc: implement vm_area_register_early() Impact: allow multiple early vm areas There are places where kernel VM area needs to be allocated before vmalloc is initialized. This is done by allocating static vm_struct, initializing several fields and linking it to vmlist and later vmalloc initialization picking up these from vmlist. This is currently done manually and if there's more than one such areas, there's no defined way to arbitrate who gets which address. This patch implements vm_area_register_early(), which takes vm_area struct with flags and size initialized, assigns address to it and puts it on the vmlist. This way, multiple early vm areas can determine which addresses they should use. The only current user - alpha mm init - is converted to use it. Signed-off-by: Tejun Heo --- arch/alpha/mm/init.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 5d7a16eab31..df6df025ded 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -189,9 +189,21 @@ callback_init(void * kernel_end) if (alpha_using_srm) { static struct vm_struct console_remap_vm; - unsigned long vaddr = VMALLOC_START; + unsigned long nr_pages = 0; + unsigned long vaddr; unsigned long i, j; + /* calculate needed size */ + for (i = 0; i < crb->map_entries; ++i) + nr_pages += crb->map[i].count; + + /* register the vm area */ + console_remap_vm.flags = VM_ALLOC; + console_remap_vm.size = nr_pages << PAGE_SHIFT; + vm_area_register_early(&console_remap_vm); + + vaddr = (unsigned long)consle_remap_vm.addr; + /* Set up the third level PTEs and update the virtual addresses of the CRB entries. */ for (i = 0; i < crb->map_entries; ++i) { @@ -213,12 +225,6 @@ callback_init(void * kernel_end) vaddr += PAGE_SIZE; } } - - /* Let vmalloc know that we've allocated some space. */ - console_remap_vm.flags = VM_ALLOC; - console_remap_vm.addr = (void *) VMALLOC_START; - console_remap_vm.size = vaddr - VMALLOC_START; - vmlist = &console_remap_vm; } callback_init_done = 1; -- cgit v1.2.3 From 11124411aa95827404d6bfdfc14c908e1b54513c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 20 Feb 2009 16:29:09 +0900 Subject: x86: convert to the new dynamic percpu allocator Impact: use new dynamic allocator, unified access to static/dynamic percpu memory Convert to the new dynamic percpu allocator. * implement populate_extra_pte() for both 32 and 64 * update setup_per_cpu_areas() to use pcpu_setup_static() * define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() * define config HAVE_DYNAMIC_PER_CPU_AREA Signed-off-by: Tejun Heo --- arch/x86/Kconfig | 3 ++ arch/x86/include/asm/percpu.h | 8 ++++++ arch/x86/include/asm/pgtable.h | 1 + arch/x86/kernel/setup_percpu.c | 62 +++++++++++++++++++++++++++--------------- arch/x86/mm/init_32.c | 10 +++++++ arch/x86/mm/init_64.c | 19 +++++++++++++ 6 files changed, 81 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f760a22f95d..d3f6eadfd4b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -135,6 +135,9 @@ config ARCH_HAS_CACHE_LINE_SIZE config HAVE_SETUP_PER_CPU_AREA def_bool y +config HAVE_DYNAMIC_PER_CPU_AREA + def_bool y + config HAVE_CPUMASK_OF_CPU_MAP def_bool X86_64_SMP diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index aee103b26d0..8f1d2fbec1d 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -43,6 +43,14 @@ #else /* ...!ASSEMBLY */ #include +#include + +#define __addr_to_pcpu_ptr(addr) \ + (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ + + (unsigned long)__per_cpu_start) +#define __pcpu_ptr_to_addr(ptr) \ + (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ + - (unsigned long)__per_cpu_start) #ifdef CONFIG_SMP #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 6f7c102018b..dd91c2515c6 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -402,6 +402,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, /* Install a pte for a particular vaddr in kernel space. */ void set_pte_vaddr(unsigned long vaddr, pte_t pte); +void populate_extra_pte(unsigned long vaddr); #ifdef CONFIG_X86_32 extern void native_pagetable_setup_start(pgd_t *base); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d992e6cff73..2dce4355821 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -61,38 +61,56 @@ static inline void setup_percpu_segment(int cpu) */ void __init setup_per_cpu_areas(void) { - ssize_t size; - char *ptr; - int cpu; - - /* Copy section for each CPU (we discard the original) */ - size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE); + ssize_t size = __per_cpu_end - __per_cpu_start; + unsigned int nr_cpu_pages = DIV_ROUND_UP(size, PAGE_SIZE); + static struct page **pages; + size_t pages_size; + unsigned int cpu, i, j; + unsigned long delta; + size_t pcpu_unit_size; pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); + pr_info("PERCPU: Allocating %zd bytes for static per cpu data\n", size); - pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); + pages_size = nr_cpu_pages * num_possible_cpus() * sizeof(pages[0]); + pages = alloc_bootmem(pages_size); + j = 0; for_each_possible_cpu(cpu) { + void *ptr; + + for (i = 0; i < nr_cpu_pages; i++) { #ifndef CONFIG_NEED_MULTIPLE_NODES - ptr = alloc_bootmem_pages(size); + ptr = alloc_bootmem_pages(PAGE_SIZE); #else - int node = early_cpu_to_node(cpu); - if (!node_online(node) || !NODE_DATA(node)) { - ptr = alloc_bootmem_pages(size); - pr_info("cpu %d has no node %d or node-local memory\n", - cpu, node); - pr_debug("per cpu data for cpu%d at %016lx\n", - cpu, __pa(ptr)); - } else { - ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); - pr_debug("per cpu data for cpu%d on node%d at %016lx\n", - cpu, node, __pa(ptr)); - } + int node = early_cpu_to_node(cpu); + + if (!node_online(node) || !NODE_DATA(node)) { + ptr = alloc_bootmem_pages(PAGE_SIZE); + pr_info("cpu %d has no node %d or node-local " + "memory\n", cpu, node); + pr_debug("per cpu data for cpu%d at %016lx\n", + cpu, __pa(ptr)); + } else { + ptr = alloc_bootmem_pages_node(NODE_DATA(node), + PAGE_SIZE); + pr_debug("per cpu data for cpu%d on node%d " + "at %016lx\n", cpu, node, __pa(ptr)); + } #endif + memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); + pages[j++] = virt_to_page(ptr); + } + } + + pcpu_unit_size = pcpu_setup_static(populate_extra_pte, pages, size); - memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); - per_cpu_offset(cpu) = ptr - __per_cpu_start; + free_bootmem(__pa(pages), pages_size); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) { + per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; setup_percpu_segment(cpu); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 00263bf07a8..8b1a0ef7f87 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -137,6 +137,16 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) return pte_offset_kernel(pmd, 0); } +void __init populate_extra_pte(unsigned long vaddr) +{ + int pgd_idx = pgd_index(vaddr); + int pmd_idx = pmd_index(vaddr); + pmd_t *pmd; + + pmd = one_md_table_init(swapper_pg_dir + pgd_idx); + one_page_table_init(pmd + pmd_idx); +} + static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsigned long vaddr, pte_t *lastpte) { diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index e6d36b49025..7f91e2cdc4c 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -223,6 +223,25 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval) set_pte_vaddr_pud(pud_page, vaddr, pteval); } +void __init populate_extra_pte(unsigned long vaddr) +{ + pgd_t *pgd; + pud_t *pud; + + pgd = pgd_offset_k(vaddr); + if (pgd_none(*pgd)) { + pud = (pud_t *)spp_getpage(); + pgd_populate(&init_mm, pgd, pud); + if (pud != pud_offset(pgd, 0)) { + printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", + pud, pud_offset(pgd, 0)); + return; + } + } + + set_pte_vaddr_pud((pud_t *)pgd_page_vaddr(*pgd), vaddr, __pte(0)); +} + /* * Create large page table mappings for a range of physical addresses. */ -- cgit v1.2.3 From ecab22aa6dc9d42ca52de2cad0854b4c6bd85ac9 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 20 Feb 2009 11:56:38 +0100 Subject: x86: use symbolic constants for MSR_IA32_MISC_ENABLE bits Impact: Cleanup. No functional changes. Signed-off-by: Vegard Nossum Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/e_powersaver.c | 6 +++--- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 6 +++--- arch/x86/kernel/cpu/intel.c | 4 ++-- arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 6 +++--- arch/x86/kernel/cpu/mcheck/p4.c | 4 ++-- 5 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index c2f930d8664..41ab3f064cb 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c @@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy) } /* Enable Enhanced PowerSaver */ rdmsrl(MSR_IA32_MISC_ENABLE, val); - if (!(val & 1 << 16)) { - val |= 1 << 16; + if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { + val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; wrmsrl(MSR_IA32_MISC_ENABLE, val); /* Can be locked at 0 */ rdmsrl(MSR_IA32_MISC_ENABLE, val); - if (!(val & 1 << 16)) { + if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); return -ENODEV; } diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index f08998278a3..c9f1fdc0283 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) enable it if not. */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); - if (!(l & (1<<16))) { - l |= (1<<16); + if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { + l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); wrmsr(MSR_IA32_MISC_ENABLE, l, h); /* check to see if it stuck */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); - if (!(l & (1<<16))) { + if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); return -ENODEV; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1f137a87d4b..c8ff69a4668 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -147,10 +147,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); - if ((lo & (1<<9)) == 0) { + if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); - lo |= (1<<9); /* Disable hw prefetching */ + lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); } } diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 5e8c79e748a..ae00938ea50 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -49,13 +49,13 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); h = apic_read(APIC_LVTTHMR); - if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { + if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu); return; } - if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) + if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) tm2 = 1; if (h & APIC_VECTOR_MASK) { @@ -73,7 +73,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); rdmsr(MSR_IA32_MISC_ENABLE, l, h); - wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); + wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index 9b60fce09f7..f53bdcbaf38 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c @@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); h = apic_read(APIC_LVTTHMR); - if ((l & (1<<3)) && (h & APIC_DM_SMI)) { + if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu); return; /* -EBUSY */ @@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) vendor_thermal_interrupt = intel_thermal_interrupt; rdmsr(MSR_IA32_MISC_ENABLE, l, h); - wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); + wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); -- cgit v1.2.3 From 3c3e5694add02e665bbbd0fecfbbdcc0b903097a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 19 Feb 2009 11:46:36 -0500 Subject: x86: check PMD in spurious_fault handler Impact: fix to prevent hard lockup on bad PMD permissions If the PMD does not have the correct permissions for a page access, but the PTE does, the spurious fault handler will mistake the fault as a lazy TLB transaction. This will result in an infinite loop of: fault -> spurious_fault check (pass) -> return to code -> fault This patch adds a check and a warn on if the PTE passes the permissions but the PMD does not. [ Updated: Ingo Molnar suggested using WARN_ONCE with some text ] Signed-off-by: Steven Rostedt --- arch/x86/mm/fault.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c76ef1d701c..278d645d108 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -455,6 +455,7 @@ static int spurious_fault(unsigned long address, pud_t *pud; pmd_t *pmd; pte_t *pte; + int ret; /* Reserved-bit violation or user access to kernel space? */ if (error_code & (PF_USER | PF_RSVD)) @@ -482,7 +483,17 @@ static int spurious_fault(unsigned long address, if (!pte_present(*pte)) return 0; - return spurious_fault_check(error_code, pte); + ret = spurious_fault_check(error_code, pte); + if (!ret) + return 0; + + /* + * Make sure we have permissions in PMD + * If not, then there's a bug in the page tables. + */ + ret = spurious_fault_check(error_code, (pte_t *) pmd); + WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); + return ret; } /* -- cgit v1.2.3 From 7a5714e0186030676d79a7b4b9830c8e45c3b0a1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 17:44:21 +0100 Subject: x86, pat: add large-PAT check to split_large_page() Impact: future-proof the split_large_page() function Linus noticed that split_large_page() is not safe wrt. the PAT bit: it is bit 12 on the 1GB and 2MB page table level (_PAGE_BIT_PAT_LARGE), and it is bit 7 on the 4K page table level (_PAGE_BIT_PAT). Currently it is not a problem because we never set _PAGE_BIT_PAT_LARGE on any of the large-page mappings - but should this happen in the future the split_large_page() would silently lift bit 12 into the lowlevel 4K pte and would start corrupting the physical page frame offset. Not fun. So add a debug warning, to make sure if something ever sets the PAT bit then this function gets updated too. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 7be47d1a97e..8253bc97587 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -482,6 +482,13 @@ static int split_large_page(pte_t *kpte, unsigned long address) pbase = (pte_t *)page_address(base); paravirt_alloc_pte(&init_mm, page_to_pfn(base)); ref_prot = pte_pgprot(pte_clrhuge(*kpte)); + /* + * If we ever want to utilize the PAT bit, we need to + * update this function to make sure it's converted from + * bit 12 to bit 7 when we cross from the 2MB level to + * the 4K level: + */ + WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE); #ifdef CONFIG_X86_64 if (level == PG_LEVEL_1G) { -- cgit v1.2.3 From fdb17aeb28aaad3eae5ef57e70cb287d34d1049d Mon Sep 17 00:00:00 2001 From: Alok Kataria Date: Fri, 20 Feb 2009 10:23:18 -0800 Subject: x86, vmi: TSC going backwards check in vmi clocksource, cleanup clean up vmi_read_cycles to use max() Reported-b: Andrew Morton Signed-off-by: Alok N Kataria Cc: Zach Amsden Signed-off-by: Ingo Molnar --- arch/x86/kernel/vmiclock_32.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 9cd28c04952..b77ad5789af 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -288,8 +288,7 @@ static struct clocksource clocksource_vmi; static cycle_t read_real_cycles(void) { cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); - return ret >= clocksource_vmi.cycle_last ? - ret : clocksource_vmi.cycle_last; + return max(ret, clocksource_vmi.cycle_last); } static struct clocksource clocksource_vmi = { -- cgit v1.2.3 From 2d4a71676f4d89418a0d53e60b89e8b804b390b2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 19:56:40 +0100 Subject: x86, mm: fault.c cleanup Impact: cleanup, no code changed Clean up various small details, which can be correctness checked automatically: - tidy up the include file section - eliminate unnecessary includes - introduce show_signal_msg() to clean up code flow - standardize the code flow - standardize comments and other style details - more cleanups, pointed out by checkpatch No code changed on either 32-bit nor 64-bit: arch/x86/mm/fault.o: text data bss dec hex filename 4632 32 24 4688 1250 fault.o.before 4632 32 24 4688 1250 fault.o.after the md5 changed due to a change in a single instruction: 2e8a8241e7f0d69706776a5a26c90bc0 fault.o.before.asm c5c3d36e725586eb74f0e10692f0193e fault.o.after.asm Because a __LINE__ reference in a WARN_ONCE() has changed. On 32-bit a few stack offsets changed - no code size difference nor any functionality difference. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 546 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 331 insertions(+), 215 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e4b9fc5001c..351d679bf97 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1,56 +1,59 @@ /* * Copyright (C) 1995 Linus Torvalds - * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs. + * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include /* For unblank_screen() */ +#include +#include #include #include -#include /* for max_low_pfn */ -#include -#include #include #include +#include +#include +#include +#include +#include +#include +#include #include +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include -#include -#include -#include -#include -#include #include +#include +#include +#include #include -#include #include +#include /* - * Page fault error code bits - * bit 0 == 0 means no page found, 1 means protection fault - * bit 1 == 0 means read, 1 means write - * bit 2 == 0 means kernel, 1 means user-mode - * bit 3 == 1 means use of reserved bit detected - * bit 4 == 1 means fault was an instruction fetch + * Page fault error code bits: + * + * bit 0 == 0: no page found 1: protection fault + * bit 1 == 0: read access 1: write access + * bit 2 == 0: kernel-mode access 1: user-mode access + * bit 3 == 1: use of reserved bit detected + * bit 4 == 1: fault was an instruction fetch */ -#define PF_PROT (1<<0) -#define PF_WRITE (1<<1) -#define PF_USER (1<<2) -#define PF_RSVD (1<<3) -#define PF_INSTR (1<<4) +enum x86_pf_error_code { + + PF_PROT = 1 << 0, + PF_WRITE = 1 << 1, + PF_USER = 1 << 2, + PF_RSVD = 1 << 3, + PF_INSTR = 1 << 4, +}; static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) { @@ -82,23 +85,27 @@ static inline int notify_page_fault(struct pt_regs *regs) } /* - * X86_32 - * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. - * Check that here and ignore it. + * Prefetch quirks: + * + * 32-bit mode: + * + * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. + * Check that here and ignore it. * - * X86_64 - * Sometimes the CPU reports invalid exceptions on prefetch. - * Check that here and ignore it. + * 64-bit mode: * - * Opcode checker based on code by Richard Brunner + * Sometimes the CPU reports invalid exceptions on prefetch. + * Check that here and ignore it. + * + * Opcode checker based on code by Richard Brunner. */ -static int is_prefetch(struct pt_regs *regs, unsigned long error_code, - unsigned long addr) +static int +is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) { + unsigned char *max_instr; unsigned char *instr; int scan_more = 1; int prefetch = 0; - unsigned char *max_instr; /* * If it was a exec (instruction fetch) fault on NX page, then @@ -114,9 +121,9 @@ static int is_prefetch(struct pt_regs *regs, unsigned long error_code, return 0; while (scan_more && instr < max_instr) { - unsigned char opcode; unsigned char instr_hi; unsigned char instr_lo; + unsigned char opcode; if (probe_kernel_address(instr, opcode)) break; @@ -173,15 +180,17 @@ static int is_prefetch(struct pt_regs *regs, unsigned long error_code, return prefetch; } -static void force_sig_info_fault(int si_signo, int si_code, - unsigned long address, struct task_struct *tsk) +static void +force_sig_info_fault(int si_signo, int si_code, unsigned long address, + struct task_struct *tsk) { siginfo_t info; - info.si_signo = si_signo; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *)address; + info.si_signo = si_signo; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = (void __user *)address; + force_sig_info(si_signo, &info, tsk); } @@ -189,6 +198,7 @@ static void force_sig_info_fault(int si_signo, int si_code, static int bad_address(void *p) { unsigned long dummy; + return probe_kernel_address((unsigned long *)p, dummy); } #endif @@ -200,13 +210,14 @@ static void dump_pagetable(unsigned long address) page = read_cr3(); page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; + #ifdef CONFIG_X86_PAE printk("*pdpt = %016Lx ", page); if ((page >> PAGE_SHIFT) < max_low_pfn && page & _PAGE_PRESENT) { page &= PAGE_MASK; page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) - & (PTRS_PER_PMD - 1)]; + & (PTRS_PER_PMD - 1)]; printk(KERN_CONT "*pde = %016Lx ", page); page &= ~_PAGE_NX; } @@ -218,14 +229,15 @@ static void dump_pagetable(unsigned long address) * We must not directly access the pte in the highpte * case if the page table is located in highmem. * And let's rather not kmap-atomic the pte, just in case - * it's allocated already. + * it's allocated already: */ if ((page >> PAGE_SHIFT) < max_low_pfn && (page & _PAGE_PRESENT) && !(page & _PAGE_PSE)) { + page &= PAGE_MASK; page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) - & (PTRS_PER_PTE - 1)]; + & (PTRS_PER_PTE - 1)]; printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); } @@ -239,26 +251,38 @@ static void dump_pagetable(unsigned long address) pgd = (pgd_t *)read_cr3(); pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); + pgd += pgd_index(address); - if (bad_address(pgd)) goto bad; + if (bad_address(pgd)) + goto bad; + printk("PGD %lx ", pgd_val(*pgd)); - if (!pgd_present(*pgd)) goto ret; + + if (!pgd_present(*pgd)) + goto out; pud = pud_offset(pgd, address); - if (bad_address(pud)) goto bad; + if (bad_address(pud)) + goto bad; + printk("PUD %lx ", pud_val(*pud)); if (!pud_present(*pud) || pud_large(*pud)) - goto ret; + goto out; pmd = pmd_offset(pud, address); - if (bad_address(pmd)) goto bad; + if (bad_address(pmd)) + goto bad; + printk("PMD %lx ", pmd_val(*pmd)); - if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; + if (!pmd_present(*pmd) || pmd_large(*pmd)) + goto out; pte = pte_offset_kernel(pmd, address); - if (bad_address(pte)) goto bad; + if (bad_address(pte)) + goto bad; + printk("PTE %lx", pte_val(*pte)); -ret: +out: printk("\n"); return; bad: @@ -285,7 +309,6 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) * and redundant with the set_pmd() on non-PAE. As would * set_pud. */ - pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) @@ -295,11 +318,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) return NULL; + if (!pmd_present(*pmd)) { set_pmd(pmd, *pmd_k); arch_flush_lazy_mmu_mode(); - } else + } else { BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); + } + return pmd_k; } #endif @@ -312,29 +338,37 @@ KERN_ERR "******* Please consider a BIOS update.\n" KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; #endif -/* Workaround for K8 erratum #93 & buggy BIOS. - BIOS SMM functions are required to use a specific workaround - to avoid corruption of the 64bit RIP register on C stepping K8. - A lot of BIOS that didn't get tested properly miss this. - The OS sees this as a page fault with the upper 32bits of RIP cleared. - Try to work around it here. - Note we only handle faults in kernel here. - Does nothing for X86_32 +/* + * Workaround for K8 erratum #93 & buggy BIOS. + * + * BIOS SMM functions are required to use a specific workaround + * to avoid corruption of the 64bit RIP register on C stepping K8. + * + * A lot of BIOS that didn't get tested properly miss this. + * + * The OS sees this as a page fault with the upper 32bits of RIP cleared. + * Try to work around it here. + * + * Note we only handle faults in kernel here. + * Does nothing on 32-bit. */ static int is_errata93(struct pt_regs *regs, unsigned long address) { #ifdef CONFIG_X86_64 - static int warned; + static int once; + if (address != regs->ip) return 0; + if ((address >> 32) != 0) return 0; + address |= 0xffffffffUL << 32; if ((address >= (u64)_stext && address <= (u64)_etext) || (address >= MODULES_VADDR && address <= MODULES_END)) { - if (!warned) { + if (!once) { printk(errata93_warning); - warned = 1; + once = 1; } regs->ip = address; return 1; @@ -344,16 +378,17 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) } /* - * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal - * addresses >4GB. We catch this in the page fault handler because these - * addresses are not reachable. Just detect this case and return. Any code + * Work around K8 erratum #100 K8 in compat mode occasionally jumps + * to illegal addresses >4GB. + * + * We catch this in the page fault handler because these addresses + * are not reachable. Just detect this case and return. Any code * segment in LDT is compatibility mode. */ static int is_errata100(struct pt_regs *regs, unsigned long address) { #ifdef CONFIG_X86_64 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && - (address >> 32)) + if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) return 1; #endif return 0; @@ -363,8 +398,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) { #ifdef CONFIG_X86_F00F_BUG unsigned long nr; + /* - * Pentium F0 0F C7 C8 bug workaround. + * Pentium F0 0F C7 C8 bug workaround: */ if (boot_cpu_data.f00f_bug) { nr = (address - idt_descr.address) >> 3; @@ -378,8 +414,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) return 0; } -static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, - unsigned long address) +static void +show_fault_oops(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { #ifdef CONFIG_X86_32 if (!oops_may_print()) @@ -389,12 +426,14 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, #ifdef CONFIG_X86_PAE if (error_code & PF_INSTR) { unsigned int level; + pte_t *pte = lookup_address(address, &level); - if (pte && pte_present(*pte) && !pte_exec(*pte)) + if (pte && pte_present(*pte) && !pte_exec(*pte)) { printk(KERN_CRIT "kernel tried to execute " "NX-protected page - exploit attempt? " "(uid: %d)\n", current_uid()); + } } #endif @@ -403,34 +442,45 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, printk(KERN_CONT "NULL pointer dereference"); else printk(KERN_CONT "paging request"); + printk(KERN_CONT " at %p\n", (void *) address); printk(KERN_ALERT "IP:"); printk_address(regs->ip, 1); + dump_pagetable(address); } #ifdef CONFIG_X86_64 -static noinline void pgtable_bad(struct pt_regs *regs, - unsigned long error_code, unsigned long address) +static noinline void +pgtable_bad(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { - unsigned long flags = oops_begin(); - int sig = SIGKILL; - struct task_struct *tsk = current; + struct task_struct *tsk; + unsigned long flags; + int sig; + + flags = oops_begin(); + tsk = current; + sig = SIGKILL; printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", tsk->comm, address); dump_pagetable(address); - tsk->thread.cr2 = address; - tsk->thread.trap_no = 14; - tsk->thread.error_code = error_code; + + tsk->thread.cr2 = address; + tsk->thread.trap_no = 14; + tsk->thread.error_code = error_code; + if (__die("Bad pagetable", regs, error_code)) sig = 0; + oops_end(flags, regs, sig); } #endif -static noinline void no_context(struct pt_regs *regs, - unsigned long error_code, unsigned long address) +static noinline void +no_context(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { struct task_struct *tsk = current; unsigned long *stackend; @@ -440,18 +490,20 @@ static noinline void no_context(struct pt_regs *regs, int sig; #endif - /* Are we prepared to handle this kernel fault? */ + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; /* - * X86_32 - * Valid to do another page fault here, because if this fault - * had been triggered by is_prefetch fixup_exception would have - * handled it. + * 32-bit: + * + * Valid to do another page fault here, because if this fault + * had been triggered by is_prefetch fixup_exception would have + * handled it. + * + * 64-bit: * - * X86_64 - * Hall of shame of CPU/BIOS bugs. + * Hall of shame of CPU/BIOS bugs. */ if (is_prefetch(regs, error_code, address)) return; @@ -461,7 +513,7 @@ static noinline void no_context(struct pt_regs *regs, /* * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. + * terminate things with extreme prejudice: */ #ifdef CONFIG_X86_32 bust_spinlocks(1); @@ -471,7 +523,7 @@ static noinline void no_context(struct pt_regs *regs, show_fault_oops(regs, error_code, address); - stackend = end_of_stack(tsk); + stackend = end_of_stack(tsk); if (*stackend != STACK_END_MAGIC) printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); @@ -487,28 +539,54 @@ static noinline void no_context(struct pt_regs *regs, sig = SIGKILL; if (__die("Oops", regs, error_code)) sig = 0; + /* Executive summary in case the body of the oops scrolled away */ printk(KERN_EMERG "CR2: %016lx\n", address); + oops_end(flags, regs, sig); #endif } -static void __bad_area_nosemaphore(struct pt_regs *regs, - unsigned long error_code, unsigned long address, - int si_code) +/* + * Print out info about fatal segfaults, if the show_unhandled_signals + * sysctl is set: + */ +static inline void +show_signal_msg(struct pt_regs *regs, unsigned long error_code, + unsigned long address, struct task_struct *tsk) +{ + if (!unhandled_signal(tsk, SIGSEGV)) + return; + + if (!printk_ratelimit()) + return; + + printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", + task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, + tsk->comm, task_pid_nr(tsk), address, + (void *)regs->ip, (void *)regs->sp, error_code); + + print_vma_addr(KERN_CONT " in ", regs->ip); + + printk(KERN_CONT "\n"); +} + +static void +__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, + unsigned long address, int si_code) { struct task_struct *tsk = current; /* User mode accesses just cause a SIGSEGV */ if (error_code & PF_USER) { /* - * It's possible to have interrupts off here. + * It's possible to have interrupts off here: */ local_irq_enable(); /* * Valid to do another page fault here because this one came - * from user space. + * from user space: */ if (is_prefetch(regs, error_code, address)) return; @@ -516,22 +594,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs, if (is_errata100(regs, address)) return; - if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && - printk_ratelimit()) { - printk( - "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", - task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, - tsk->comm, task_pid_nr(tsk), address, - (void *) regs->ip, (void *) regs->sp, error_code); - print_vma_addr(" in ", regs->ip); - printk("\n"); - } + if (unlikely(show_unhandled_signals)) + show_signal_msg(regs, error_code, address, tsk); + + /* Kernel addresses are always protection faults: */ + tsk->thread.cr2 = address; + tsk->thread.error_code = error_code | (address >= TASK_SIZE); + tsk->thread.trap_no = 14; - tsk->thread.cr2 = address; - /* Kernel addresses are always protection faults */ - tsk->thread.error_code = error_code | (address >= TASK_SIZE); - tsk->thread.trap_no = 14; force_sig_info_fault(SIGSEGV, si_code, address, tsk); + return; } @@ -541,15 +613,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs, no_context(regs, error_code, address); } -static noinline void bad_area_nosemaphore(struct pt_regs *regs, - unsigned long error_code, unsigned long address) +static noinline void +bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); } -static void __bad_area(struct pt_regs *regs, - unsigned long error_code, unsigned long address, - int si_code) +static void +__bad_area(struct pt_regs *regs, unsigned long error_code, + unsigned long address, int si_code) { struct mm_struct *mm = current->mm; @@ -562,67 +635,77 @@ static void __bad_area(struct pt_regs *regs, __bad_area_nosemaphore(regs, error_code, address, si_code); } -static noinline void bad_area(struct pt_regs *regs, - unsigned long error_code, unsigned long address) +static noinline void +bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) { __bad_area(regs, error_code, address, SEGV_MAPERR); } -static noinline void bad_area_access_error(struct pt_regs *regs, - unsigned long error_code, unsigned long address) +static noinline void +bad_area_access_error(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { __bad_area(regs, error_code, address, SEGV_ACCERR); } /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ -static void out_of_memory(struct pt_regs *regs, - unsigned long error_code, unsigned long address) +static void +out_of_memory(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { /* * We ran out of memory, call the OOM killer, and return the userspace - * (which will retry the fault, or kill us if we got oom-killed). + * (which will retry the fault, or kill us if we got oom-killed): */ up_read(¤t->mm->mmap_sem); + pagefault_out_of_memory(); } -static void do_sigbus(struct pt_regs *regs, - unsigned long error_code, unsigned long address) +static void +do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; up_read(&mm->mmap_sem); - /* Kernel mode? Handle exceptions or die */ + /* Kernel mode? Handle exceptions or die: */ if (!(error_code & PF_USER)) no_context(regs, error_code, address); + #ifdef CONFIG_X86_32 - /* User space => ok to do another page fault */ + /* User space => ok to do another page fault: */ if (is_prefetch(regs, error_code, address)) return; #endif - tsk->thread.cr2 = address; - tsk->thread.error_code = error_code; - tsk->thread.trap_no = 14; + + tsk->thread.cr2 = address; + tsk->thread.error_code = error_code; + tsk->thread.trap_no = 14; + force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); } -static noinline void mm_fault_error(struct pt_regs *regs, - unsigned long error_code, unsigned long address, unsigned int fault) +static noinline void +mm_fault_error(struct pt_regs *regs, unsigned long error_code, + unsigned long address, unsigned int fault) { - if (fault & VM_FAULT_OOM) + if (fault & VM_FAULT_OOM) { out_of_memory(regs, error_code, address); - else if (fault & VM_FAULT_SIGBUS) - do_sigbus(regs, error_code, address); - else - BUG(); + } else { + if (fault & VM_FAULT_SIGBUS) + do_sigbus(regs, error_code, address); + else + BUG(); + } } static int spurious_fault_check(unsigned long error_code, pte_t *pte) { if ((error_code & PF_WRITE) && !pte_write(*pte)) return 0; + if ((error_code & PF_INSTR) && !pte_exec(*pte)) return 0; @@ -630,16 +713,19 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) } /* - * Handle a spurious fault caused by a stale TLB entry. This allows - * us to lazily refresh the TLB when increasing the permissions of a - * kernel page (RO -> RW or NX -> X). Doing it eagerly is very - * expensive since that implies doing a full cross-processor TLB - * flush, even if no stale TLB entries exist on other processors. + * Handle a spurious fault caused by a stale TLB entry. + * + * This allows us to lazily refresh the TLB when increasing the + * permissions of a kernel page (RO -> RW or NX -> X). Doing it + * eagerly is very expensive since that implies doing a full + * cross-processor TLB flush, even if no stale TLB entries exist + * on other processors. + * * There are no security implications to leaving a stale TLB when * increasing the permissions on a page. */ -static noinline int spurious_fault(unsigned long error_code, - unsigned long address) +static noinline int +spurious_fault(unsigned long error_code, unsigned long address) { pgd_t *pgd; pud_t *pud; @@ -678,20 +764,23 @@ static noinline int spurious_fault(unsigned long error_code, return 0; /* - * Make sure we have permissions in PMD - * If not, then there's a bug in the page tables. + * Make sure we have permissions in PMD. + * If not, then there's a bug in the page tables: */ ret = spurious_fault_check(error_code, (pte_t *) pmd); WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); + return ret; } /* - * X86_32 - * Handle a fault on the vmalloc or module mapping area + * 32-bit: + * + * Handle a fault on the vmalloc or module mapping area * - * X86_64 - * Handle a fault on the vmalloc area + * 64-bit: + * + * Handle a fault on the vmalloc area * * This assumes no large pages in there. */ @@ -702,7 +791,7 @@ static noinline int vmalloc_fault(unsigned long address) pmd_t *pmd_k; pte_t *pte_k; - /* Make sure we are in vmalloc area */ + /* Make sure we are in vmalloc area: */ if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; @@ -717,9 +806,11 @@ static noinline int vmalloc_fault(unsigned long address) pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); if (!pmd_k) return -1; + pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) return -1; + return 0; #else pgd_t *pgd, *pgd_ref; @@ -727,69 +818,84 @@ static noinline int vmalloc_fault(unsigned long address) pmd_t *pmd, *pmd_ref; pte_t *pte, *pte_ref; - /* Make sure we are in vmalloc area */ + /* Make sure we are in vmalloc area: */ if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; - /* Copy kernel mappings over when needed. This can also - happen within a race in page table update. In the later - case just flush. */ - + /* + * Copy kernel mappings over when needed. This can also + * happen within a race in page table update. In the later + * case just flush: + */ pgd = pgd_offset(current->active_mm, address); pgd_ref = pgd_offset_k(address); if (pgd_none(*pgd_ref)) return -1; + if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); else BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); - /* Below here mismatches are bugs because these lower tables - are shared */ + /* + * Below here mismatches are bugs because these lower tables + * are shared: + */ pud = pud_offset(pgd, address); pud_ref = pud_offset(pgd_ref, address); if (pud_none(*pud_ref)) return -1; + if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) BUG(); + pmd = pmd_offset(pud, address); pmd_ref = pmd_offset(pud_ref, address); if (pmd_none(*pmd_ref)) return -1; + if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) BUG(); + pte_ref = pte_offset_kernel(pmd_ref, address); if (!pte_present(*pte_ref)) return -1; + pte = pte_offset_kernel(pmd, address); - /* Don't use pte_page here, because the mappings can point - outside mem_map, and the NUMA hash lookup cannot handle - that. */ + + /* + * Don't use pte_page here, because the mappings can point + * outside mem_map, and the NUMA hash lookup cannot handle + * that: + */ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) BUG(); + return 0; #endif } int show_unhandled_signals = 1; -static inline int access_error(unsigned long error_code, int write, - struct vm_area_struct *vma) +static inline int +access_error(unsigned long error_code, int write, struct vm_area_struct *vma) { if (write) { - /* write, present and write, not present */ + /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) return 1; - } else if (unlikely(error_code & PF_PROT)) { - /* read, present */ - return 1; - } else { - /* read, not present */ - if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) - return 1; + return 0; } + /* read, present: */ + if (unlikely(error_code & PF_PROT)) + return 1; + + /* read, not present: */ + if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) + return 1; + return 0; } @@ -797,9 +903,9 @@ static int fault_in_kernel_space(unsigned long address) { #ifdef CONFIG_X86_32 return address >= TASK_SIZE; -#else /* !CONFIG_X86_32 */ +#else return address >= TASK_SIZE64; -#endif /* CONFIG_X86_32 */ +#endif } /* @@ -812,18 +918,19 @@ asmlinkage #endif void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) { - unsigned long address; + struct vm_area_struct *vma; struct task_struct *tsk; + unsigned long address; struct mm_struct *mm; - struct vm_area_struct *vma; int write; int fault; tsk = current; mm = tsk->mm; + prefetchw(&mm->mmap_sem); - /* get the address */ + /* Get the faulting address: */ address = read_cr2(); if (unlikely(kmmio_fault(regs, address))) @@ -847,22 +954,23 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) vmalloc_fault(address) >= 0) return; - /* Can handle a stale RO->RW TLB */ + /* Can handle a stale RO->RW TLB: */ if (spurious_fault(error_code, address)) return; - /* kprobes don't want to hook the spurious faults. */ + /* kprobes don't want to hook the spurious faults: */ if (notify_page_fault(regs)) return; /* * Don't take the mm semaphore here. If we fixup a prefetch - * fault we could otherwise deadlock. + * fault we could otherwise deadlock: */ bad_area_nosemaphore(regs, error_code, address); + return; } - /* kprobes don't want to hook the spurious faults. */ + /* kprobes don't want to hook the spurious faults: */ if (unlikely(notify_page_fault(regs))) return; /* @@ -870,13 +978,15 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) * vmalloc fault has been handled. * * User-mode registers count as a user access even for any - * potential system fault or CPU buglet. + * potential system fault or CPU buglet: */ if (user_mode_vm(regs)) { local_irq_enable(); error_code |= PF_USER; - } else if (regs->flags & X86_EFLAGS_IF) - local_irq_enable(); + } else { + if (regs->flags & X86_EFLAGS_IF) + local_irq_enable(); + } #ifdef CONFIG_X86_64 if (unlikely(error_code & PF_RSVD)) @@ -884,8 +994,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) #endif /* - * If we're in an interrupt, have no user context or are running in an - * atomic region then we must not take the fault. + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: */ if (unlikely(in_atomic() || !mm)) { bad_area_nosemaphore(regs, error_code, address); @@ -894,19 +1004,19 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) /* * When running in the kernel we expect faults to occur only to - * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem - * we will deadlock attempting to validate the fault against the - * address space. Luckily the kernel only validly references user - * space from well defined areas of code, which are listed in the - * exceptions table. + * addresses in user space. All other faults represent errors in + * the kernel and should generate an OOPS. Unfortunately, in the + * case of an erroneous fault occurring in a code path which already + * holds mmap_sem we will deadlock attempting to validate the fault + * against the address space. Luckily the kernel only validly + * references user space from well defined areas of code, which are + * listed in the exceptions table. * * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibility of a deadlock. - * Attempt to lock the address space, if we cannot we then validate the - * source. If this is invalid we can skip the address space check, - * thus avoiding the deadlock. + * the source reference check when there is a possibility of a + * deadlock. Attempt to lock the address space, if we cannot we then + * validate the source. If this is invalid we can skip the address + * space check, thus avoiding the deadlock: */ if (unlikely(!down_read_trylock(&mm->mmap_sem))) { if ((error_code & PF_USER) == 0 && @@ -917,8 +1027,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) down_read(&mm->mmap_sem); } else { /* - * The above down_read_trylock() might have succeeded in which - * case we'll have missed the might_sleep() from down_read(). + * The above down_read_trylock() might have succeeded in + * which case we'll have missed the might_sleep() from + * down_read(): */ might_sleep(); } @@ -938,7 +1049,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) /* * Accessing the stack below %sp is always a bug. * The large cushion allows instructions like enter - * and pusha to work. ("enter $65535,$31" pushes + * and pusha to work. ("enter $65535, $31" pushes * 32 pointers and then decrements %sp by 65535.) */ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { @@ -957,6 +1068,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) */ good_area: write = error_code & PF_WRITE; + if (unlikely(access_error(error_code, write, vma))) { bad_area_access_error(regs, error_code, address); return; @@ -965,13 +1077,15 @@ good_area: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo - * the fault. + * the fault: */ fault = handle_mm_fault(mm, vma, address, write); + if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, error_code, address, fault); return; } + if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else @@ -1004,13 +1118,13 @@ void vmalloc_sync_all(void) for (address = VMALLOC_START & PMD_MASK; address >= TASK_SIZE && address < FIXADDR_TOP; address += PMD_SIZE) { + unsigned long flags; struct page *page; spin_lock_irqsave(&pgd_lock, flags); list_for_each_entry(page, &pgd_list, lru) { - if (!vmalloc_sync_one(page_address(page), - address)) + if (!vmalloc_sync_one(page_address(page), address)) break; } spin_unlock_irqrestore(&pgd_lock, flags); @@ -1018,12 +1132,14 @@ void vmalloc_sync_all(void) #else /* CONFIG_X86_64 */ for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; address += PGDIR_SIZE) { + const pgd_t *pgd_ref = pgd_offset_k(address); unsigned long flags; struct page *page; if (pgd_none(*pgd_ref)) continue; + spin_lock_irqsave(&pgd_lock, flags); list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; -- cgit v1.2.3 From 107a03678cac0dd6cf7095f81442a4fa477e4700 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 20:37:05 +0100 Subject: x86, mm: fault.c, refactor/simplify the is_prefetch() code Impact: no functionality changed Factor out the opcode checker into a helper inline. The code got a tiny bit smaller: text data bss dec hex filename 4632 32 24 4688 1250 fault.o.before 4618 32 24 4674 1242 fault.o.after And it got cleaner / easier to review as well. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 100 ++++++++++++++++++++++++++-------------------------- 1 file changed, 50 insertions(+), 50 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 351d679bf97..72973c7682b 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -99,12 +99,58 @@ static inline int notify_page_fault(struct pt_regs *regs) * * Opcode checker based on code by Richard Brunner. */ +static inline int +check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, + unsigned char opcode, int *prefetch) +{ + unsigned char instr_hi = opcode & 0xf0; + unsigned char instr_lo = opcode & 0x0f; + + switch (instr_hi) { + case 0x20: + case 0x30: + /* + * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. + * In X86_64 long mode, the CPU will signal invalid + * opcode if some of these prefixes are present so + * X86_64 will never get here anyway + */ + return ((instr_lo & 7) == 0x6); +#ifdef CONFIG_X86_64 + case 0x40: + /* + * In AMD64 long mode 0x40..0x4F are valid REX prefixes + * Need to figure out under what instruction mode the + * instruction was issued. Could check the LDT for lm, + * but for now it's good enough to assume that long + * mode only uses well known segments or kernel. + */ + return (!user_mode(regs)) || (regs->cs == __USER_CS); +#endif + case 0x60: + /* 0x64 thru 0x67 are valid prefixes in all modes. */ + return (instr_lo & 0xC) == 0x4; + case 0xF0: + /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ + return !instr_lo || (instr_lo>>1) == 1; + case 0x00: + /* Prefetch instruction is 0x0F0D or 0x0F18 */ + if (probe_kernel_address(instr, opcode)) + return 0; + + *prefetch = (instr_lo == 0xF) && + (opcode == 0x0D || opcode == 0x18); + return 0; + default: + return 0; + } +} + static int is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) { unsigned char *max_instr; unsigned char *instr; - int scan_more = 1; int prefetch = 0; /* @@ -114,68 +160,22 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) if (error_code & PF_INSTR) return 0; - instr = (unsigned char *)convert_ip_to_linear(current, regs); + instr = (void *)convert_ip_to_linear(current, regs); max_instr = instr + 15; if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) return 0; - while (scan_more && instr < max_instr) { - unsigned char instr_hi; - unsigned char instr_lo; + while (instr < max_instr) { unsigned char opcode; if (probe_kernel_address(instr, opcode)) break; - instr_hi = opcode & 0xf0; - instr_lo = opcode & 0x0f; instr++; - switch (instr_hi) { - case 0x20: - case 0x30: - /* - * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. - * In X86_64 long mode, the CPU will signal invalid - * opcode if some of these prefixes are present so - * X86_64 will never get here anyway - */ - scan_more = ((instr_lo & 7) == 0x6); - break; -#ifdef CONFIG_X86_64 - case 0x40: - /* - * In AMD64 long mode 0x40..0x4F are valid REX prefixes - * Need to figure out under what instruction mode the - * instruction was issued. Could check the LDT for lm, - * but for now it's good enough to assume that long - * mode only uses well known segments or kernel. - */ - scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS); - break; -#endif - case 0x60: - /* 0x64 thru 0x67 are valid prefixes in all modes. */ - scan_more = (instr_lo & 0xC) == 0x4; + if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) break; - case 0xF0: - /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ - scan_more = !instr_lo || (instr_lo>>1) == 1; - break; - case 0x00: - /* Prefetch instruction is 0x0F0D or 0x0F18 */ - scan_more = 0; - - if (probe_kernel_address(instr, opcode)) - break; - prefetch = (instr_lo == 0xF) && - (opcode == 0x0D || opcode == 0x18); - break; - default: - scan_more = 0; - break; - } } return prefetch; } -- cgit v1.2.3 From 8c938f9fae887f6e180bf802aa1c33cf74712aff Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 22:12:18 +0100 Subject: x86, mm: fault.c, factor out the vm86 fault check Impact: cleanup Instead of an ugly, open-coded, #ifdef-ed vm86 related legacy check in do_page_fault(), put it into the check_v8086_mode() helper function and merge it with an existing #ifdef. Also, simplify the code flow a tiny bit in the helper. No code changed: arch/x86/mm/fault.o: text data bss dec hex filename 2711 12 12 2735 aaf fault.o.before 2711 12 12 2735 aaf fault.o.after Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 43 +++++++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 72973c7682b..7dc0615c3cf 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -328,14 +328,41 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) return pmd_k; } -#endif -#ifdef CONFIG_X86_64 +/* + * Did it hit the DOS screen memory VA from vm86 mode? + */ +static inline void +check_v8086_mode(struct pt_regs *regs, unsigned long address, + struct task_struct *tsk) +{ + unsigned long bit; + + if (!v8086_mode(regs)) + return; + + bit = (address - 0xA0000) >> PAGE_SHIFT; + if (bit < 32) + tsk->thread.screen_bitmap |= 1 << bit; +} + +#else /* CONFIG_X86_64: */ + static const char errata93_warning[] = KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" KERN_ERR "******* Please consider a BIOS update.\n" KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; + +/* + * No vm86 mode in 64-bit mode: + */ +static inline void +check_v8086_mode(struct pt_regs *regs, unsigned long address, + struct task_struct *tsk) +{ +} + #endif /* @@ -1091,16 +1118,8 @@ good_area: else tsk->min_flt++; -#ifdef CONFIG_X86_32 - /* - * Did it hit the DOS screen memory VA from vm86 mode? - */ - if (v8086_mode(regs)) { - unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; - if (bit < 32) - tsk->thread.screen_bitmap |= 1 << bit; - } -#endif + check_v8086_mode(regs, address, tsk); + up_read(&mm->mmap_sem); } -- cgit v1.2.3 From 121d5d0a7e5808fbcfda484efd7ba840ac93450f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 22:18:08 +0100 Subject: x86, mm: fault.c, enable PF_RSVD checks on 32-bit too Impact: improve page fault handling robustness The 'PF_RSVD' flag (bit 3) of the page-fault error_code is a relatively recent addition to x86 CPUs, so the 32-bit do_fault() implementation never had it. This flag gets set when the CPU detects nonzero values in any reserved bits of the page directory entries. Extend the existing 64-bit check for PF_RSVD in do_page_fault() to 32-bit too. If we detect such a fault then we print a more informative oops and the pagetables. This unifies the code some more, removes an ugly #ifdef and improves the 32-bit page fault code robustness a bit. It slightly increases the 32-bit kernel text size. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7dc0615c3cf..3e366146273 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -477,7 +477,6 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, dump_pagetable(address); } -#ifdef CONFIG_X86_64 static noinline void pgtable_bad(struct pt_regs *regs, unsigned long error_code, unsigned long address) @@ -503,7 +502,6 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code, oops_end(flags, regs, sig); } -#endif static noinline void no_context(struct pt_regs *regs, unsigned long error_code, @@ -1015,10 +1013,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) local_irq_enable(); } -#ifdef CONFIG_X86_64 if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); -#endif /* * If we're in an interrupt, have no user context or are running -- cgit v1.2.3 From b814d41f0987c7648d7ed07471258101c95c026b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 22:32:10 +0100 Subject: x86, mm: fault.c, simplify kmmio_fault() Impact: cleanup Remove an #ifdef from kmmio_fault() - we can do this by providing default implementations for is_kmmio_active() and kmmio_handler(). The compiler optimizes it all away in the !CONFIG_MMIOTRACE case. Also, while at it, clean up mmiotrace.h a bit: - standard header guards - standard vertical spaces for structure definitions No code changed (both with mmiotrace on and off in the config): text data bss dec hex filename 2947 12 12 2971 b9b fault.o.before 2947 12 12 2971 b9b fault.o.after Cc: Pekka Paalanen Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 3e366146273..fe99af4b86d 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -55,13 +55,14 @@ enum x86_pf_error_code { PF_INSTR = 1 << 4, }; +/* + * (returns 0 if mmiotrace is disabled) + */ static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) { -#ifdef CONFIG_MMIOTRACE if (unlikely(is_kmmio_active())) if (kmmio_handler(regs, addr) == 1) return -1; -#endif return 0; } -- cgit v1.2.3 From b18018126f422f5b706fd750373425e10e84b486 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 22:42:57 +0100 Subject: x86, mm, kprobes: fault.c, simplify notify_page_fault() Impact: cleanup Remove an #ifdef from notify_page_fault(). The function still compiles to nothing in the !CONFIG_KPROBES case. Introduce kprobes_built_in() and kprobe_fault_handler() helpers to allow this - they returns 0 if !CONFIG_KPROBES. No code changed: text data bss dec hex filename 4618 32 24 4674 1242 fault.o.before 4618 32 24 4674 1242 fault.o.after Cc: Masami Hiramatsu Cc: Andrew Morton Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index fe99af4b86d..379beaec6ca 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -68,11 +68,10 @@ static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) static inline int notify_page_fault(struct pt_regs *regs) { -#ifdef CONFIG_KPROBES int ret = 0; /* kprobe_running() needs smp_processor_id() */ - if (!user_mode_vm(regs)) { + if (kprobes_built_in() && !user_mode_vm(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 14)) ret = 1; @@ -80,9 +79,6 @@ static inline int notify_page_fault(struct pt_regs *regs) } return ret; -#else - return 0; -#endif } /* -- cgit v1.2.3 From f2f13a8535174dbb813a0607a9d4737cfba98f6c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 22:50:24 +0100 Subject: x86, mm: fault.c, reorder functions Impact: cleanup Avoid a couple more #ifdefs by moving fundamentally non-unifiable functions into a single #ifdef 32-bit / #else / #endif block in fault.c: vmalloc*(), dump_pagetable(), check_vm8086_mode(). No code changed: text data bss dec hex filename 4618 32 24 4674 1242 fault.o.before 4618 32 24 4674 1242 fault.o.after Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 474 ++++++++++++++++++++++++++-------------------------- 1 file changed, 239 insertions(+), 235 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 379beaec6ca..4ce62fb80da 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -191,18 +191,124 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, force_sig_info(si_signo, &info, tsk); } -#ifdef CONFIG_X86_64 -static int bad_address(void *p) +DEFINE_SPINLOCK(pgd_lock); +LIST_HEAD(pgd_list); + +#ifdef CONFIG_X86_32 +static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) { - unsigned long dummy; + unsigned index = pgd_index(address); + pgd_t *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; - return probe_kernel_address((unsigned long *)p, dummy); + pgd += index; + pgd_k = init_mm.pgd + index; + + if (!pgd_present(*pgd_k)) + return NULL; + + /* + * set_pgd(pgd, *pgd_k); here would be useless on PAE + * and redundant with the set_pmd() on non-PAE. As would + * set_pud. + */ + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + return NULL; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + return NULL; + + if (!pmd_present(*pmd)) { + set_pmd(pmd, *pmd_k); + arch_flush_lazy_mmu_mode(); + } else { + BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); + } + + return pmd_k; +} + +void vmalloc_sync_all(void) +{ + unsigned long address; + + if (SHARED_KERNEL_PMD) + return; + + for (address = VMALLOC_START & PMD_MASK; + address >= TASK_SIZE && address < FIXADDR_TOP; + address += PMD_SIZE) { + + unsigned long flags; + struct page *page; + + spin_lock_irqsave(&pgd_lock, flags); + list_for_each_entry(page, &pgd_list, lru) { + if (!vmalloc_sync_one(page_address(page), address)) + break; + } + spin_unlock_irqrestore(&pgd_lock, flags); + } +} + +/* + * 32-bit: + * + * Handle a fault on the vmalloc or module mapping area + */ +static noinline int vmalloc_fault(unsigned long address) +{ + unsigned long pgd_paddr; + pmd_t *pmd_k; + pte_t *pte_k; + + /* Make sure we are in vmalloc area: */ + if (!(address >= VMALLOC_START && address < VMALLOC_END)) + return -1; + + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "current" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + pgd_paddr = read_cr3(); + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); + if (!pmd_k) + return -1; + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + return -1; + + return 0; +} + +/* + * Did it hit the DOS screen memory VA from vm86 mode? + */ +static inline void +check_v8086_mode(struct pt_regs *regs, unsigned long address, + struct task_struct *tsk) +{ + unsigned long bit; + + if (!v8086_mode(regs)) + return; + + bit = (address - 0xA0000) >> PAGE_SHIFT; + if (bit < 32) + tsk->thread.screen_bitmap |= 1 << bit; } -#endif static void dump_pagetable(unsigned long address) { -#ifdef CONFIG_X86_32 __typeof__(pte_val(__pte(0))) page; page = read_cr3(); @@ -239,7 +345,132 @@ static void dump_pagetable(unsigned long address) } printk("\n"); -#else /* CONFIG_X86_64 */ +} + +#else /* CONFIG_X86_64: */ + +void vmalloc_sync_all(void) +{ + unsigned long address; + + for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; + address += PGDIR_SIZE) { + + const pgd_t *pgd_ref = pgd_offset_k(address); + unsigned long flags; + struct page *page; + + if (pgd_none(*pgd_ref)) + continue; + + spin_lock_irqsave(&pgd_lock, flags); + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + pgd = (pgd_t *)page_address(page) + pgd_index(address); + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + } + spin_unlock_irqrestore(&pgd_lock, flags); + } +} + +/* + * 64-bit: + * + * Handle a fault on the vmalloc area + * + * This assumes no large pages in there. + */ +static noinline int vmalloc_fault(unsigned long address) +{ + pgd_t *pgd, *pgd_ref; + pud_t *pud, *pud_ref; + pmd_t *pmd, *pmd_ref; + pte_t *pte, *pte_ref; + + /* Make sure we are in vmalloc area: */ + if (!(address >= VMALLOC_START && address < VMALLOC_END)) + return -1; + + /* + * Copy kernel mappings over when needed. This can also + * happen within a race in page table update. In the later + * case just flush: + */ + pgd = pgd_offset(current->active_mm, address); + pgd_ref = pgd_offset_k(address); + if (pgd_none(*pgd_ref)) + return -1; + + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + + /* + * Below here mismatches are bugs because these lower tables + * are shared: + */ + + pud = pud_offset(pgd, address); + pud_ref = pud_offset(pgd_ref, address); + if (pud_none(*pud_ref)) + return -1; + + if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) + BUG(); + + pmd = pmd_offset(pud, address); + pmd_ref = pmd_offset(pud_ref, address); + if (pmd_none(*pmd_ref)) + return -1; + + if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) + BUG(); + + pte_ref = pte_offset_kernel(pmd_ref, address); + if (!pte_present(*pte_ref)) + return -1; + + pte = pte_offset_kernel(pmd, address); + + /* + * Don't use pte_page here, because the mappings can point + * outside mem_map, and the NUMA hash lookup cannot handle + * that: + */ + if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) + BUG(); + + return 0; +} + +static const char errata93_warning[] = +KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" +KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" +KERN_ERR "******* Please consider a BIOS update.\n" +KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; + +/* + * No vm86 mode in 64-bit mode: + */ +static inline void +check_v8086_mode(struct pt_regs *regs, unsigned long address, + struct task_struct *tsk) +{ +} + +static int bad_address(void *p) +{ + unsigned long dummy; + + return probe_kernel_address((unsigned long *)p, dummy); +} + +static void dump_pagetable(unsigned long address) +{ pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -284,83 +515,9 @@ out: return; bad: printk("BAD\n"); -#endif -} - -#ifdef CONFIG_X86_32 -static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) -{ - unsigned index = pgd_index(address); - pgd_t *pgd_k; - pud_t *pud, *pud_k; - pmd_t *pmd, *pmd_k; - - pgd += index; - pgd_k = init_mm.pgd + index; - - if (!pgd_present(*pgd_k)) - return NULL; - - /* - * set_pgd(pgd, *pgd_k); here would be useless on PAE - * and redundant with the set_pmd() on non-PAE. As would - * set_pud. - */ - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); - if (!pud_present(*pud_k)) - return NULL; - - pmd = pmd_offset(pud, address); - pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) - return NULL; - - if (!pmd_present(*pmd)) { - set_pmd(pmd, *pmd_k); - arch_flush_lazy_mmu_mode(); - } else { - BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); - } - - return pmd_k; -} - -/* - * Did it hit the DOS screen memory VA from vm86 mode? - */ -static inline void -check_v8086_mode(struct pt_regs *regs, unsigned long address, - struct task_struct *tsk) -{ - unsigned long bit; - - if (!v8086_mode(regs)) - return; - - bit = (address - 0xA0000) >> PAGE_SHIFT; - if (bit < 32) - tsk->thread.screen_bitmap |= 1 << bit; -} - -#else /* CONFIG_X86_64: */ - -static const char errata93_warning[] = -KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" -KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" -KERN_ERR "******* Please consider a BIOS update.\n" -KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; - -/* - * No vm86 mode in 64-bit mode: - */ -static inline void -check_v8086_mode(struct pt_regs *regs, unsigned long address, - struct task_struct *tsk) -{ } -#endif +#endif /* CONFIG_X86_64 */ /* * Workaround for K8 erratum #93 & buggy BIOS. @@ -795,109 +952,6 @@ spurious_fault(unsigned long error_code, unsigned long address) return ret; } -/* - * 32-bit: - * - * Handle a fault on the vmalloc or module mapping area - * - * 64-bit: - * - * Handle a fault on the vmalloc area - * - * This assumes no large pages in there. - */ -static noinline int vmalloc_fault(unsigned long address) -{ -#ifdef CONFIG_X86_32 - unsigned long pgd_paddr; - pmd_t *pmd_k; - pte_t *pte_k; - - /* Make sure we are in vmalloc area: */ - if (!(address >= VMALLOC_START && address < VMALLOC_END)) - return -1; - - /* - * Synchronize this task's top level page-table - * with the 'reference' page table. - * - * Do _not_ use "current" here. We might be inside - * an interrupt in the middle of a task switch.. - */ - pgd_paddr = read_cr3(); - pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); - if (!pmd_k) - return -1; - - pte_k = pte_offset_kernel(pmd_k, address); - if (!pte_present(*pte_k)) - return -1; - - return 0; -#else - pgd_t *pgd, *pgd_ref; - pud_t *pud, *pud_ref; - pmd_t *pmd, *pmd_ref; - pte_t *pte, *pte_ref; - - /* Make sure we are in vmalloc area: */ - if (!(address >= VMALLOC_START && address < VMALLOC_END)) - return -1; - - /* - * Copy kernel mappings over when needed. This can also - * happen within a race in page table update. In the later - * case just flush: - */ - pgd = pgd_offset(current->active_mm, address); - pgd_ref = pgd_offset_k(address); - if (pgd_none(*pgd_ref)) - return -1; - - if (pgd_none(*pgd)) - set_pgd(pgd, *pgd_ref); - else - BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); - - /* - * Below here mismatches are bugs because these lower tables - * are shared: - */ - - pud = pud_offset(pgd, address); - pud_ref = pud_offset(pgd_ref, address); - if (pud_none(*pud_ref)) - return -1; - - if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) - BUG(); - - pmd = pmd_offset(pud, address); - pmd_ref = pmd_offset(pud_ref, address); - if (pmd_none(*pmd_ref)) - return -1; - - if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) - BUG(); - - pte_ref = pte_offset_kernel(pmd_ref, address); - if (!pte_present(*pte_ref)) - return -1; - - pte = pte_offset_kernel(pmd, address); - - /* - * Don't use pte_page here, because the mappings can point - * outside mem_map, and the NUMA hash lookup cannot handle - * that: - */ - if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) - BUG(); - - return 0; -#endif -} - int show_unhandled_signals = 1; static inline int @@ -1115,53 +1169,3 @@ good_area: up_read(&mm->mmap_sem); } - -DEFINE_SPINLOCK(pgd_lock); -LIST_HEAD(pgd_list); - -void vmalloc_sync_all(void) -{ - unsigned long address; - -#ifdef CONFIG_X86_32 - if (SHARED_KERNEL_PMD) - return; - - for (address = VMALLOC_START & PMD_MASK; - address >= TASK_SIZE && address < FIXADDR_TOP; - address += PMD_SIZE) { - - unsigned long flags; - struct page *page; - - spin_lock_irqsave(&pgd_lock, flags); - list_for_each_entry(page, &pgd_list, lru) { - if (!vmalloc_sync_one(page_address(page), address)) - break; - } - spin_unlock_irqrestore(&pgd_lock, flags); - } -#else /* CONFIG_X86_64 */ - for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; - address += PGDIR_SIZE) { - - const pgd_t *pgd_ref = pgd_offset_k(address); - unsigned long flags; - struct page *page; - - if (pgd_none(*pgd_ref)) - continue; - - spin_lock_irqsave(&pgd_lock, flags); - list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); - if (pgd_none(*pgd)) - set_pgd(pgd, *pgd_ref); - else - BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); - } - spin_unlock_irqrestore(&pgd_lock, flags); - } -#endif -} -- cgit v1.2.3 From 8f7661496cece8320137d5e26808825498fd2b26 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 23:00:29 +0100 Subject: x86, mm: fault.c, unify oops printing Impact: refine/extend page fault related oops printing on 64-bit - honor the pause_on_oops logic on 64-bit too - print out NX fault warnings on 64-bit as well - factor out the NX fault message to make it git-greppable and readable Note that this means that we do the PF_INSTR check on 32-bit non-PAE as well where it should not occur ... normally. Cannot hurt. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 4ce62fb80da..ebfaca3bbb1 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -595,28 +595,24 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) return 0; } +static const char nx_warning[] = KERN_CRIT +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; + static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address) { -#ifdef CONFIG_X86_32 if (!oops_may_print()) return; -#endif -#ifdef CONFIG_X86_PAE if (error_code & PF_INSTR) { unsigned int level; pte_t *pte = lookup_address(address, &level); - if (pte && pte_present(*pte) && !pte_exec(*pte)) { - printk(KERN_CRIT "kernel tried to execute " - "NX-protected page - exploit attempt? " - "(uid: %d)\n", current_uid()); - } + if (pte && pte_present(*pte) && !pte_exec(*pte)) + printk(nx_warning, current_uid()); } -#endif printk(KERN_ALERT "BUG: unable to handle kernel "); if (address < PAGE_SIZE) -- cgit v1.2.3 From 1cc99544dde9e48602979f16b9309fade6e93051 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 23:07:48 +0100 Subject: x86, mm: fault.c, unify oops handling Impact: add oops-recursion check to 32-bit Unify the oops state-machine, to the 64-bit version. It is slightly more careful in that it does a recursion check in oops_begin(), and is thus more likely to show the relevant oops. It also means that 32-bit will print one more line at the end of pagefault triggered oopses: printk(KERN_EMERG "CR2: %016lx\n", address); Which is generally good information to be seen in partial-dump digital-camera jpegs ;-) The downside is the somewhat more complex critical path. Both variants have been tested well meanwhile by kernel developers crashing their boxes so i dont think this is a practical worry. This removes 3 ugly #ifdefs from no_context() and makes the function a lot nicer read. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index ebfaca3bbb1..8fe2dd254df 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -659,11 +659,8 @@ no_context(struct pt_regs *regs, unsigned long error_code, { struct task_struct *tsk = current; unsigned long *stackend; - -#ifdef CONFIG_X86_64 unsigned long flags; int sig; -#endif /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) @@ -690,11 +687,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice: */ -#ifdef CONFIG_X86_32 - bust_spinlocks(1); -#else flags = oops_begin(); -#endif show_fault_oops(regs, error_code, address); @@ -702,15 +695,10 @@ no_context(struct pt_regs *regs, unsigned long error_code, if (*stackend != STACK_END_MAGIC) printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); - tsk->thread.cr2 = address; - tsk->thread.trap_no = 14; - tsk->thread.error_code = error_code; + tsk->thread.cr2 = address; + tsk->thread.trap_no = 14; + tsk->thread.error_code = error_code; -#ifdef CONFIG_X86_32 - die("Oops", regs, error_code); - bust_spinlocks(0); - do_exit(SIGKILL); -#else sig = SIGKILL; if (__die("Oops", regs, error_code)) sig = 0; @@ -719,7 +707,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, printk(KERN_EMERG "CR2: %016lx\n", address); oops_end(flags, regs, sig); -#endif } /* -- cgit v1.2.3 From c3731c68668325abddee8665018c74c7156a57be Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 23:22:34 +0100 Subject: x86, mm: fault.c, remove #ifdef from do_page_fault() Impact: cleanup do_page_fault() has this ugly #ifdef in its prototype: #ifdef CONFIG_X86_64 asmlinkage #endif void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) Replace it with 'dotraplinkage' which maps to exactly the above construct: nothing on 32-bit and asmlinkage on 64-bit. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8fe2dd254df..9c2dc5d7953 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -972,10 +972,8 @@ static int fault_in_kernel_space(unsigned long address) * and the problem, and then passes it off to one of the appropriate * routines. */ -#ifdef CONFIG_X86_64 -asmlinkage -#endif -void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) +dotraplinkage void __kprobes +do_page_fault(struct pt_regs *regs, unsigned long error_code) { struct vm_area_struct *vma; struct task_struct *tsk; -- cgit v1.2.3 From d951734654f76a370a89b4e531af9b765bd13541 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 23:32:28 +0100 Subject: x86, mm: rename TASK_SIZE64 => TASK_SIZE_MAX Impact: cleanup Rename TASK_SIZE64 to TASK_SIZE_MAX, and provide the define on 32-bit too. (mapped to TASK_SIZE) This allows 32-bit code to make use of the (former-) TASK_SIZE64 symbol as well, in a clean way. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/include/asm/processor.h | 9 +++++---- arch/x86/kernel/ptrace.c | 2 +- arch/x86/mm/fault.c | 2 +- arch/x86/vdso/vma.c | 4 ++-- 4 files changed, 9 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 72914d0315e..c7a98f73821 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -861,6 +861,7 @@ static inline void spin_lock_prefetch(const void *x) * User space process size: 3GB (default). */ #define TASK_SIZE PAGE_OFFSET +#define TASK_SIZE_MAX TASK_SIZE #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX STACK_TOP @@ -920,7 +921,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); /* * User space process size. 47bits minus one guard page. */ -#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) +#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. @@ -929,12 +930,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); 0xc0000000 : 0xFFFFe000) #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ - IA32_PAGE_OFFSET : TASK_SIZE64) + IA32_PAGE_OFFSET : TASK_SIZE_MAX) #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ - IA32_PAGE_OFFSET : TASK_SIZE64) + IA32_PAGE_OFFSET : TASK_SIZE_MAX) #define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX TASK_SIZE64 +#define STACK_TOP_MAX TASK_SIZE_MAX #define INIT_THREAD { \ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index d2f7cd5b2c8..fb2159a5c81 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -268,7 +268,7 @@ static unsigned long debugreg_addr_limit(struct task_struct *task) if (test_tsk_thread_flag(task, TIF_IA32)) return IA32_PAGE_OFFSET - 3; #endif - return TASK_SIZE64 - 7; + return TASK_SIZE_MAX - 7; } #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9c2dc5d7953..6fa9f175cba 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -963,7 +963,7 @@ static int fault_in_kernel_space(unsigned long address) #ifdef CONFIG_X86_32 return address >= TASK_SIZE; #else - return address >= TASK_SIZE64; + return address >= TASK_SIZE_MAX; #endif } diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 9c98cc6ba97..7133cdf9098 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -85,8 +85,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) unsigned long addr, end; unsigned offset; end = (start + PMD_SIZE - 1) & PMD_MASK; - if (end >= TASK_SIZE64) - end = TASK_SIZE64; + if (end >= TASK_SIZE_MAX) + end = TASK_SIZE_MAX; end -= len; /* This loses some more bits than a modulo, but is cheaper */ offset = get_random_int() & (PTRS_PER_PTE - 1); -- cgit v1.2.3 From 7c178a26d3e753d2a4346d3e4b8aa549d387f698 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 23:27:29 +0100 Subject: x86, mm: fault.c, remove #ifdef from fault_in_kernel_space() Impact: cleanup Removal of an #ifdef in fault_in_kernel_space(), by making use of the new TASK_SIZE_MAX symbol which is now available on 32-bit too. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6fa9f175cba..f195691ec26 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -960,11 +960,7 @@ access_error(unsigned long error_code, int write, struct vm_area_struct *vma) static int fault_in_kernel_space(unsigned long address) { -#ifdef CONFIG_X86_32 - return address >= TASK_SIZE; -#else return address >= TASK_SIZE_MAX; -#endif } /* -- cgit v1.2.3 From cd1b68f08f6328737928e5b8ba8ef80394680ff0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 23:39:02 +0100 Subject: x86, mm: fault.c, give another attempt at prefetch handing before SIGBUS Impact: extend prefetch handling on 64-bit Currently there's an extra is_prefetch() check done in do_sigbus(), which we only do on 32 bits. This is a last-ditch check before we terminate a task, so it's worth giving prefetch instructions another chance - should none of our existing quirks have caught a prefetch instruction related spurious fault. The only risk is if a prefetch causes a real sigbus, in that case we'll not OOM but try another fault. But this code has been on 32-bit for a long time, so it should be fine in practice. So do this on 64-bit too - and thus remove one more #ifdef. Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index f195691ec26..413e835e4a8 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -836,11 +836,9 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) if (!(error_code & PF_USER)) no_context(regs, error_code, address); -#ifdef CONFIG_X86_32 - /* User space => ok to do another page fault: */ + /* User-space => ok to do another page fault: */ if (is_prefetch(regs, error_code, address)) return; -#endif tsk->thread.cr2 = address; tsk->thread.error_code = error_code; -- cgit v1.2.3 From f8eeb2e6be367d79be3617f0a12646bced8b2fe6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 20 Feb 2009 23:13:36 +0100 Subject: x86, mm: fault.c, update copyrights Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 413e835e4a8..9bb07d331c3 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1,6 +1,7 @@ /* * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. + * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar */ #include #include -- cgit v1.2.3 From fc6fcdfbb8f1b2553255170f221cd57a4108591f Mon Sep 17 00:00:00 2001 From: Hannes Eder Date: Sun, 22 Feb 2009 01:00:57 +0100 Subject: x86: kexec/i386: fix sparse warnings: Using plain integer as NULL pointer Fix these sparse warnings: arch/x86/kernel/machine_kexec_32.c:124:22: warning: Using plain integer as NULL pointer arch/x86/kernel/traps.c:950:24: warning: Using plain integer as NULL pointer Signed-off-by: Hannes Eder Cc: trivial@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/machine_kexec_32.c | 2 +- arch/x86/kernel/traps.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 37f420018a4..f5fc8c781a6 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -121,7 +121,7 @@ static void machine_kexec_page_table_set_one( static void machine_kexec_prepare_page_tables(struct kimage *image) { void *control_page; - pmd_t *pmd = 0; + pmd_t *pmd = NULL; control_page = page_address(image->control_code_page); #ifdef CONFIG_X86_PAE diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index acb8c0585ab..2df927b5003 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -942,7 +942,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_BADSTK; - info.si_addr = 0; + info.si_addr = NULL; if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 32, SIGILL) == NOTIFY_STOP) return; -- cgit v1.2.3 From 2366c298b5afe52e635afd5604b69ce9fd4471fc Mon Sep 17 00:00:00 2001 From: Hannes Eder Date: Sun, 22 Feb 2009 01:01:13 +0100 Subject: x86: numa_32.c: fix sparse warning: Using plain integer as NULL pointer Fix this sparse warning: arch/x86/mm/numa_32.c:197:24: warning: Using plain integer as NULL pointer Signed-off-by: Hannes Eder Cc: trivial@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/numa_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index d1f7439d173..3957cd6d645 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -194,7 +194,7 @@ void *alloc_remap(int nid, unsigned long size) size = ALIGN(size, L1_CACHE_BYTES); if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) - return 0; + return NULL; node_remap_alloc_vaddr[nid] += size; memset(allocation, 0, size); -- cgit v1.2.3 From b319eed0aa0a6d710887350a3cb734c572aa64c4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 22 Feb 2009 10:24:18 +0100 Subject: x86, mm: fault.c, simplify kmmio_fault(), cleanup Clarify the kmmio_fault() comment. Acked-by: Pekka Paalanen Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9bb07d331c3..a03b7279efa 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -57,7 +57,8 @@ enum x86_pf_error_code { }; /* - * (returns 0 if mmiotrace is disabled) + * Returns 0 if mmiotrace is disabled, or if the fault is not + * handled by mmiotrace: */ static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) { -- cgit v1.2.3 From a47e3ec197f515e25c77805f02d26f9e86456f65 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 20 Feb 2009 18:57:58 -0800 Subject: x86: ia32_signal: remove unused debug code Impact: cleanup DEBUG_SIG will not be used. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32_signal.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index dd77ac0cac4..adc63f81cb8 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -33,8 +33,6 @@ #include #include -#define DEBUG_SIG 0 - #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ @@ -220,12 +218,6 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; -#if DEBUG_SIG - printk(KERN_DEBUG "SIG restore_sigcontext: " - "sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n", - sc, sc->err, sc->ip, sc->cs, sc->flags); -#endif - get_user_try { /* * Reload fs and gs if they have changed in the signal @@ -488,11 +480,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, regs->cs = __USER32_CS; regs->ss = __USER32_DS; -#if DEBUG_SIG - printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", - current->comm, current->pid, frame, regs->ip, frame->pretcode); -#endif - return 0; } @@ -574,10 +561,5 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->cs = __USER32_CS; regs->ss = __USER32_DS; -#if DEBUG_SIG - printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", - current->comm, current->pid, frame, regs->ip, frame->pretcode); -#endif - return 0; } -- cgit v1.2.3 From 8801ead40caa8ba9c7d47a06ff1247c166cbfd5a Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 20 Feb 2009 19:00:13 -0800 Subject: x86: ia32_signal: introduce GET_SEG() macro Impact: cleanup introduce GET_SEG() macro like arch/x86/kernel/signal.c. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32_signal.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index adc63f81cb8..8dd0903da08 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -192,11 +192,15 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, get_user_ex(regs->x, &sc->x); \ } -#define COPY_SEG_CPL3(seg) { \ - unsigned short tmp; \ - get_user_ex(tmp, &sc->seg); \ - regs->seg = tmp | 3; \ -} +#define GET_SEG(seg) ({ \ + unsigned short tmp; \ + get_user_ex(tmp, &sc->seg); \ + tmp; \ +}) + +#define COPY_SEG_CPL3(seg) do { \ + regs->seg = GET_SEG(seg) | 3; \ +} while (0) #define RELOAD_SEG(seg) { \ unsigned int cur, pre; \ -- cgit v1.2.3 From a967bb3fbe640056bacb0357722f51676bb06e3c Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 20 Feb 2009 19:00:54 -0800 Subject: x86: ia32_signal: introduce {get|set}_user_seg() Impact: cleanup Introduce {get|set}_user_seg() and loadsegment_xx() macros to make code clean. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32_signal.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 8dd0903da08..588a7aa937e 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -188,6 +188,14 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, /* * Do a signal return; undo the signal stack. */ +#define loadsegment_gs(v) load_gs_index(v) +#define loadsegment_fs(v) loadsegment(fs, v) +#define loadsegment_ds(v) loadsegment(ds, v) +#define loadsegment_es(v) loadsegment(es, v) + +#define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; }) +#define set_user_seg(seg, v) loadsegment_##seg(v) + #define COPY(x) { \ get_user_ex(regs->x, &sc->x); \ } @@ -203,19 +211,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, } while (0) #define RELOAD_SEG(seg) { \ - unsigned int cur, pre; \ - get_user_ex(pre, &sc->seg); \ - savesegment(seg, cur); \ + unsigned int pre = GET_SEG(seg); \ + unsigned int cur = get_user_seg(seg); \ pre |= 3; \ if (pre != cur) \ - loadsegment(seg, pre); \ + set_user_seg(seg, pre); \ } static int ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_ia32 __user *sc, unsigned int *pax) { - unsigned int tmpflags, gs, oldgs, err = 0; + unsigned int tmpflags, err = 0; void __user *buf; u32 tmp; @@ -229,12 +236,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, * the handler, but does not clobber them at least in the * normal case. */ - get_user_ex(gs, &sc->gs); - gs |= 3; - savesegment(gs, oldgs); - if (gs != oldgs) - load_gs_index(gs); - + RELOAD_SEG(gs); RELOAD_SEG(fs); RELOAD_SEG(ds); RELOAD_SEG(es); @@ -333,17 +335,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned int mask) { - int tmp, err = 0; + int err = 0; put_user_try { - savesegment(gs, tmp); - put_user_ex(tmp, (unsigned int __user *)&sc->gs); - savesegment(fs, tmp); - put_user_ex(tmp, (unsigned int __user *)&sc->fs); - savesegment(ds, tmp); - put_user_ex(tmp, (unsigned int __user *)&sc->ds); - savesegment(es, tmp); - put_user_ex(tmp, (unsigned int __user *)&sc->es); + put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs); + put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs); + put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds); + put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es); put_user_ex(regs->di, &sc->di); put_user_ex(regs->si, &sc->si); -- cgit v1.2.3 From b98103a5597b87211a1c74077b06faeac554bedc Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Sat, 21 Feb 2009 00:09:47 +0100 Subject: x86: hpet: print HPET registers during setup (if hpet=verbose is used) Signed-off-by: Andreas Herrmann Cc: Mark Hounschell Cc: Borislav Petkov Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index a00545fe5cd..1d86ca3c1f9 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -80,6 +80,7 @@ static inline void hpet_clear_mapping(void) */ static int boot_hpet_disable; int hpet_force_user; +static int hpet_verbose; static int __init hpet_setup(char *str) { @@ -88,6 +89,8 @@ static int __init hpet_setup(char *str) boot_hpet_disable = 1; if (!strncmp("force", str, 5)) hpet_force_user = 1; + if (!strncmp("verbose", str, 7)) + hpet_verbose = 1; } return 1; } @@ -119,6 +122,43 @@ int is_hpet_enabled(void) } EXPORT_SYMBOL_GPL(is_hpet_enabled); +static void _hpet_print_config(const char *function, int line) +{ + u32 i, timers, l, h; + printk(KERN_INFO "hpet: %s(%d):\n", function, line); + l = hpet_readl(HPET_ID); + h = hpet_readl(HPET_PERIOD); + timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; + printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h); + l = hpet_readl(HPET_CFG); + h = hpet_readl(HPET_STATUS); + printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h); + l = hpet_readl(HPET_COUNTER); + h = hpet_readl(HPET_COUNTER+4); + printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); + + for (i = 0; i < timers; i++) { + l = hpet_readl(HPET_Tn_CFG(i)); + h = hpet_readl(HPET_Tn_CFG(i)+4); + printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", + i, l, h); + l = hpet_readl(HPET_Tn_CMP(i)); + h = hpet_readl(HPET_Tn_CMP(i)+4); + printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", + i, l, h); + l = hpet_readl(HPET_Tn_ROUTE(i)); + h = hpet_readl(HPET_Tn_ROUTE(i)+4); + printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", + i, l, h); + } +} + +#define hpet_print_config() \ +do { \ + if (hpet_verbose) \ + _hpet_print_config(__FUNCTION__, __LINE__); \ +} while (0) + /* * When the hpet driver (/dev/hpet) is enabled, we need to reserve * timer 0 and timer 1 in case of RTC emulation. @@ -282,6 +322,7 @@ static void hpet_set_mode(enum clock_event_mode mode, hpet_writel(cmp, HPET_Tn_CMP(timer)); udelay(1); hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); + hpet_print_config(); break; case CLOCK_EVT_MODE_ONESHOT: @@ -308,6 +349,7 @@ static void hpet_set_mode(enum clock_event_mode mode, irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); enable_irq(hdev->irq); } + hpet_print_config(); break; } } @@ -526,6 +568,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); num_timers++; /* Value read out starts from 0 */ + hpet_print_config(); hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); if (!hpet_devs) @@ -793,6 +836,7 @@ int __init hpet_enable(void) * information and the number of channels */ id = hpet_readl(HPET_ID); + hpet_print_config(); #ifdef CONFIG_HPET_EMULATE_RTC /* @@ -845,6 +889,7 @@ static __init int hpet_late_init(void) return -ENODEV; hpet_reserve_platform_timers(hpet_readl(HPET_ID)); + hpet_print_config(); for_each_online_cpu(cpu) { hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); -- cgit v1.2.3 From 8d6f0c8214928f7c5083dd54ecb69c5d615b516e Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Sat, 21 Feb 2009 00:10:44 +0100 Subject: x86: hpet: provide separate functions to stop and start the counter By splitting up existing hpet_start_counter function. Signed-off-by: Andreas Herrmann Cc: Mark Hounschell Cc: Borislav Petkov Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 1d86ca3c1f9..7ae1f1e8b9b 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -231,27 +231,37 @@ static struct clock_event_device hpet_clockevent = { .rating = 50, }; -static void hpet_start_counter(void) +static void hpet_stop_counter(void) { unsigned long cfg = hpet_readl(HPET_CFG); - cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); hpet_writel(0, HPET_COUNTER); hpet_writel(0, HPET_COUNTER + 4); +} + +static void hpet_start_counter(void) +{ + unsigned long cfg = hpet_readl(HPET_CFG); cfg |= HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); } +static void hpet_restart_counter(void) +{ + hpet_stop_counter(); + hpet_start_counter(); +} + static void hpet_resume_device(void) { force_hpet_resume(); } -static void hpet_restart_counter(void) +static void hpet_resume_counter(void) { hpet_resume_device(); - hpet_start_counter(); + hpet_restart_counter(); } static void hpet_enable_legacy_int(void) @@ -738,7 +748,7 @@ static struct clocksource clocksource_hpet = { .mask = HPET_MASK, .shift = HPET_SHIFT, .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .resume = hpet_restart_counter, + .resume = hpet_resume_counter, #ifdef CONFIG_X86_64 .vread = vread_hpet, #endif @@ -750,7 +760,7 @@ static int hpet_clocksource_register(void) cycle_t t1; /* Start the counter */ - hpet_start_counter(); + hpet_restart_counter(); /* Verify whether hpet counter works */ t1 = read_hpet(); -- cgit v1.2.3 From c23e253e67c9d8a91a0ffa33c1f571a17f0a2403 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Sat, 21 Feb 2009 00:16:35 +0100 Subject: x86: hpet: stop HPET_COUNTER when programming periodic mode Impact: fix system hang on some systems operating with HZ_1000 On a system that stalled with HZ_1000, the first value written to T0_CMP (when the main counter was not stopped) did not trigger an interrupt. Instead after the main counter wrapped around (after several minutes) an interrupt was triggered and afterwards the periodic interrupt took effect. This can be fixed by implementing HPET spec recommendation for programming the periodic mode (i.e. stopping the main counter). Signed-off-by: Andreas Herrmann Cc: Mark Hounschell Cc: Borislav Petkov Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7ae1f1e8b9b..648b3a2a3a4 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -309,29 +309,22 @@ static int hpet_setup_msi_irq(unsigned int irq); static void hpet_set_mode(enum clock_event_mode mode, struct clock_event_device *evt, int timer) { - unsigned long cfg, cmp, now; + unsigned long cfg; uint64_t delta; switch (mode) { case CLOCK_EVT_MODE_PERIODIC: + hpet_stop_counter(); delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; delta >>= evt->shift; - now = hpet_readl(HPET_COUNTER); - cmp = now + (unsigned long) delta; cfg = hpet_readl(HPET_Tn_CFG(timer)); /* Make sure we use edge triggered interrupts */ cfg &= ~HPET_TN_LEVEL; cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | HPET_TN_32BIT; hpet_writel(cfg, HPET_Tn_CFG(timer)); - /* - * The first write after writing TN_SETVAL to the - * config register sets the counter value, the second - * write sets the period. - */ - hpet_writel(cmp, HPET_Tn_CMP(timer)); - udelay(1); hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); + hpet_start_counter(); hpet_print_config(); break; -- cgit v1.2.3 From ef1f87aa7ba6224bef1b750b3272ba281d8f43ed Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Sat, 21 Feb 2009 14:23:21 -0800 Subject: x86: select x2apic ops in early apic probe only if x2apic mode is enabled If BIOS hands over the control to OS in legacy xapic mode, select legacy xapic related ops in the early apic probe and shift to x2apic ops later in the boot sequence, only after enabling x2apic mode. If BIOS hands over the control in x2apic mode, select x2apic related ops in the early apic probe. This fixes the early boot panic, where we were selecting x2apic ops, while the cpu is still in legacy xapic mode. Signed-off-by: Suresh Siddha Cc: Yinghai Lu Cc: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 2 +- arch/x86/kernel/apic/apic.c | 9 +-------- arch/x86/kernel/apic/probe_64.c | 13 ++++++++++--- arch/x86/kernel/apic/x2apic_cluster.c | 5 +---- arch/x86/kernel/apic/x2apic_phys.c | 10 +++++----- 5 files changed, 18 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index dce1bf696cc..a6208dc7463 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -146,7 +146,7 @@ static inline u64 native_x2apic_icr_read(void) return val; } -extern int x2apic; +extern int x2apic, x2apic_phys; extern void check_x2apic(void); extern void enable_x2apic(void); extern void enable_IR_x2apic(void); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index c12823eb55b..47c2d12e5cf 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1265,14 +1265,7 @@ void __cpuinit end_local_APIC_setup(void) #ifdef CONFIG_X86_X2APIC void check_x2apic(void) { - int msr, msr2; - - if (!cpu_has_x2apic) - return; - - rdmsr(MSR_IA32_APICBASE, msr, msr2); - - if (msr & X2APIC_ENABLE) { + if (x2apic_enabled()) { pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); x2apic_preenabled = x2apic = 1; } diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 70935dd904d..e7c163661c7 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -50,9 +50,16 @@ static struct apic *apic_probe[] __initdata = { void __init default_setup_apic_routing(void) { #ifdef CONFIG_X86_X2APIC - if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { - if (!intr_remapping_enabled) - apic = &apic_flat; + if (x2apic && (apic != &apic_x2apic_phys && +#ifdef CONFIG_X86_UV + apic != &apic_x2apic_uv_x && +#endif + apic != &apic_x2apic_cluster)) { + if (x2apic_phys) + apic = &apic_x2apic_phys; + else + apic = &apic_x2apic_cluster; + printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); } #endif diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 4e39d9ad4d5..354b9c45601 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -14,10 +14,7 @@ DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { - if (cpu_has_x2apic) - return 1; - - return 0; + return x2apic_enabled(); } /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index d2d52eb9f7e..5bcb174409b 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -10,7 +10,7 @@ #include #include -static int x2apic_phys; +int x2apic_phys; static int set_x2apic_phys_mode(char *arg) { @@ -21,10 +21,10 @@ early_param("x2apic_phys", set_x2apic_phys_mode); static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { - if (cpu_has_x2apic && x2apic_phys) - return 1; - - return 0; + if (x2apic_phys) + return x2apic_enabled(); + else + return 0; } /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -- cgit v1.2.3 From 8425091ff8af2addae118fc510a523b84ce51115 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Fri, 20 Feb 2009 16:59:11 -0800 Subject: x86: improve the help text of X86_EXTENDED_PLATFORM Change the CONFIG_X86_EXTENDED_PLATFORM help text to display the 32bit/64bit extended platform list. This is as suggested by Ingo. Signed-off-by: Ravikiran Thirumalai Cc: shai@scalex86.org Cc: "Benzi Galili (Benzi@ScaleMP.com)" Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8955262caa3..35efba546e0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -285,6 +285,7 @@ config X86_BIGSMP ---help--- This option is needed for the systems that have more than 8 CPUs +if X86_32 config X86_EXTENDED_PLATFORM bool "Support for extended (non-PC) x86 platforms" default y @@ -293,12 +294,37 @@ config X86_EXTENDED_PLATFORM standard PC platforms. (which covers the vast majority of systems out there.) - If you enable this option then you'll be able to select a number - of non-PC x86 platforms. + If you enable this option then you'll be able to select support + for the following (non-PC) 32 bit x86 platforms: + AMD Elan + NUMAQ (IBM/Sequent) + RDC R-321x SoC + SGI 320/540 (Visual Workstation) + Summit/EXA (IBM x440) + Unisys ES7000 IA32 series + Voyager (NCR) If you have one of these systems, or if you want to build a generic distribution kernel, say Y here - otherwise say N. +endif + +if X86_64 +config X86_EXTENDED_PLATFORM + bool "Support for extended (non-PC) x86 platforms" + default y + ---help--- + If you disable this option then the kernel will only support + standard PC platforms. (which covers the vast majority of + systems out there.) + + If you enable this option then you'll be able to select support + for the following (non-PC) 64 bit x86 platforms: + ScaleMP vSMP + SGI Ultraviolet + If you have one of these systems, or if you want to build a + generic distribution kernel, say Y here - otherwise say N. +endif # This is an alphabetically sorted list of 64 bit extended platforms # Please maintain the alphabetic order if and when there are additions -- cgit v1.2.3 From 965c7ecaf2e2b083d711a01ab33735a4bdeee1a4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 22 Feb 2009 23:19:12 +0100 Subject: x86: remove the Voyager 32-bit subarch Impact: remove unused/broken code The Voyager subarch last built successfully on the v2.6.26 kernel and has been stale since then and does not build on the v2.6.27, v2.6.28 and v2.6.29-rc5 kernels. No actual users beyond the maintainer reported this breakage. Patches were sent and most of the fixes were accepted but the discussion around how to do a few remaining issues cleanly fizzled out with no resolution and the code remained broken. In the v2.6.30 x86 tree development cycle 32-bit subarch support has been reworked and removed - and the Voyager code, beyond the build problems already known, needs serious and significant changes and probably a rewrite to support it. CONFIG_X86_VOYAGER has been marked BROKEN then. The maintainer has been notified but no patches have been sent so far to fix it. While all other subarchs have been converted to the new scheme, voyager is still broken. We'd prefer to receive patches which clean up the current situation in a constructive way, but even in case of removal there is no obstacle to add that support back after the issues have been sorted out in a mutually acceptable fashion. So remove this inactive code for now. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 14 - arch/x86/boot/Makefile | 1 - arch/x86/boot/a20.c | 6 - arch/x86/boot/boot.h | 3 - arch/x86/boot/main.c | 5 - arch/x86/boot/voyager.c | 40 - arch/x86/configs/i386_defconfig | 1 - arch/x86/configs/x86_64_defconfig | 1 - arch/x86/include/asm/mach-voyager/do_timer.h | 17 - arch/x86/include/asm/mach-voyager/entry_arch.h | 26 - arch/x86/include/asm/mach-voyager/setup_arch.h | 12 - arch/x86/include/asm/vic.h | 61 - arch/x86/include/asm/voyager.h | 571 -------- arch/x86/lguest/Kconfig | 1 - arch/x86/mach-voyager/Makefile | 8 - arch/x86/mach-voyager/setup.c | 119 -- arch/x86/mach-voyager/voyager_basic.c | 317 ----- arch/x86/mach-voyager/voyager_cat.c | 1197 ---------------- arch/x86/mach-voyager/voyager_smp.c | 1805 ------------------------ arch/x86/mach-voyager/voyager_thread.c | 128 -- arch/x86/xen/Kconfig | 2 +- 21 files changed, 1 insertion(+), 4334 deletions(-) delete mode 100644 arch/x86/boot/voyager.c delete mode 100644 arch/x86/include/asm/mach-voyager/do_timer.h delete mode 100644 arch/x86/include/asm/mach-voyager/entry_arch.h delete mode 100644 arch/x86/include/asm/mach-voyager/setup_arch.h delete mode 100644 arch/x86/include/asm/vic.h delete mode 100644 arch/x86/include/asm/voyager.h delete mode 100644 arch/x86/mach-voyager/Makefile delete mode 100644 arch/x86/mach-voyager/setup.c delete mode 100644 arch/x86/mach-voyager/voyager_basic.c delete mode 100644 arch/x86/mach-voyager/voyager_cat.c delete mode 100644 arch/x86/mach-voyager/voyager_smp.c delete mode 100644 arch/x86/mach-voyager/voyager_thread.c (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 35efba546e0..5e2919c0ff9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -302,7 +302,6 @@ config X86_EXTENDED_PLATFORM SGI 320/540 (Visual Workstation) Summit/EXA (IBM x440) Unisys ES7000 IA32 series - Voyager (NCR) If you have one of these systems, or if you want to build a generic distribution kernel, say Y here - otherwise say N. @@ -423,19 +422,6 @@ config X86_ES7000 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is supposed to run on an IA32-based Unisys ES7000 system. -config X86_VOYAGER - bool "Voyager (NCR)" - depends on SMP && !PCI && BROKEN - depends on X86_32_NON_STANDARD - ---help--- - Voyager is an MCA-based 32-way capable SMP architecture proprietary - to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. - - *** WARNING *** - - If you do not specifically know you have a Voyager based machine, - say N here, otherwise the kernel you build will not be bootable. - config SCHED_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index cd48c721001..c70eff69a1f 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -32,7 +32,6 @@ setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o setup-y += header.o main.o mca.o memory.o pm.o pmjump.o setup-y += printf.o string.o tty.o video.o video-mode.o version.o setup-$(CONFIG_X86_APM_BOOT) += apm.o -setup-$(CONFIG_X86_VOYAGER) += voyager.o # The link order of the video-*.o modules can matter. In particular, # video-vga.o *must* be listed first, followed by video-vesa.o. diff --git a/arch/x86/boot/a20.c b/arch/x86/boot/a20.c index fba8e9c6a50..7c19ce8c244 100644 --- a/arch/x86/boot/a20.c +++ b/arch/x86/boot/a20.c @@ -126,11 +126,6 @@ static void enable_a20_fast(void) int enable_a20(void) { -#ifdef CONFIG_X86_VOYAGER - /* On Voyager, a20_test() is unsafe? */ - enable_a20_kbc(); - return 0; -#else int loops = A20_ENABLE_LOOPS; int kbc_err; @@ -164,5 +159,4 @@ int enable_a20(void) } return -1; -#endif } diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index cc0ef13fba7..7b2692e897e 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -302,9 +302,6 @@ void probe_cards(int unsafe); /* video-vesa.c */ void vesa_store_edid(void); -/* voyager.c */ -int query_voyager(void); - #endif /* __ASSEMBLY__ */ #endif /* BOOT_BOOT_H */ diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c index 197421db1af..58f0415d3ae 100644 --- a/arch/x86/boot/main.c +++ b/arch/x86/boot/main.c @@ -149,11 +149,6 @@ void main(void) /* Query MCA information */ query_mca(); - /* Voyager */ -#ifdef CONFIG_X86_VOYAGER - query_voyager(); -#endif - /* Query Intel SpeedStep (IST) information */ query_ist(); diff --git a/arch/x86/boot/voyager.c b/arch/x86/boot/voyager.c deleted file mode 100644 index 433909d61e5..00000000000 --- a/arch/x86/boot/voyager.c +++ /dev/null @@ -1,40 +0,0 @@ -/* -*- linux-c -*- ------------------------------------------------------- * - * - * Copyright (C) 1991, 1992 Linus Torvalds - * Copyright 2007 rPath, Inc. - All Rights Reserved - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2. - * - * ----------------------------------------------------------------------- */ - -/* - * Get the Voyager config information - */ - -#include "boot.h" - -int query_voyager(void) -{ - u8 err; - u16 es, di; - /* Abuse the apm_bios_info area for this */ - u8 *data_ptr = (u8 *)&boot_params.apm_bios_info; - - data_ptr[0] = 0xff; /* Flag on config not found(?) */ - - asm("pushw %%es ; " - "int $0x15 ; " - "setc %0 ; " - "movw %%es, %1 ; " - "popw %%es" - : "=q" (err), "=r" (es), "=D" (di) - : "a" (0xffc0)); - - if (err) - return -1; /* Not Voyager */ - - set_fs(es); - copy_from_fs(data_ptr, di, 7); /* Table is 7 bytes apparently */ - return 0; -} diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 096dd5359cd..5c023f6f652 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -197,7 +197,6 @@ CONFIG_SPARSE_IRQ=y CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y # CONFIG_X86_ELAN is not set -# CONFIG_X86_VOYAGER is not set # CONFIG_X86_GENERICARCH is not set # CONFIG_X86_VSMP is not set # CONFIG_X86_RDC321X is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 2efb5d5063f..4157cc4a2bd 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -199,7 +199,6 @@ CONFIG_SPARSE_IRQ=y CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y # CONFIG_X86_ELAN is not set -# CONFIG_X86_VOYAGER is not set # CONFIG_X86_GENERICARCH is not set # CONFIG_X86_VSMP is not set CONFIG_SCHED_OMIT_FRAME_POINTER=y diff --git a/arch/x86/include/asm/mach-voyager/do_timer.h b/arch/x86/include/asm/mach-voyager/do_timer.h deleted file mode 100644 index 9e5a459fd15..00000000000 --- a/arch/x86/include/asm/mach-voyager/do_timer.h +++ /dev/null @@ -1,17 +0,0 @@ -/* defines for inline arch setup functions */ -#include - -#include -#include - -/** - * do_timer_interrupt_hook - hook into timer tick - * - * Call the pit clock event handler. see asm/i8253.h - **/ -static inline void do_timer_interrupt_hook(void) -{ - global_clock_event->event_handler(global_clock_event); - voyager_timer_interrupt(); -} - diff --git a/arch/x86/include/asm/mach-voyager/entry_arch.h b/arch/x86/include/asm/mach-voyager/entry_arch.h deleted file mode 100644 index ae52624b593..00000000000 --- a/arch/x86/include/asm/mach-voyager/entry_arch.h +++ /dev/null @@ -1,26 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8 -*- */ - -/* Copyright (C) 2002 - * - * Author: James.Bottomley@HansenPartnership.com - * - * linux/arch/i386/voyager/entry_arch.h - * - * This file builds the VIC and QIC CPI gates - */ - -/* initialise the voyager interrupt gates - * - * This uses the macros in irq.h to set up assembly jump gates. The - * calls are then redirected to the same routine with smp_ prefixed */ -BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT) -BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT) -BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0); - -/* do all the QIC interrupts */ -BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI); -BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI); -BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); -BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); -BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); -BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI); diff --git a/arch/x86/include/asm/mach-voyager/setup_arch.h b/arch/x86/include/asm/mach-voyager/setup_arch.h deleted file mode 100644 index 71729ca05cd..00000000000 --- a/arch/x86/include/asm/mach-voyager/setup_arch.h +++ /dev/null @@ -1,12 +0,0 @@ -#include -#include -#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \ - (&boot_params.apm_bios_info)) - -/* Hook to call BIOS initialisation function */ - -/* for voyager, pass the voyager BIOS/SUS info area to the detection - * routines */ - -#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO); - diff --git a/arch/x86/include/asm/vic.h b/arch/x86/include/asm/vic.h deleted file mode 100644 index 53100f35361..00000000000 --- a/arch/x86/include/asm/vic.h +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (C) 1999,2001 - * - * Author: J.E.J.Bottomley@HansenPartnership.com - * - * Standard include definitions for the NCR Voyager Interrupt Controller */ - -/* The eight CPI vectors. To activate a CPI, you write a bit mask - * corresponding to the processor set to be interrupted into the - * relevant register. That set of CPUs will then be interrupted with - * the CPI */ -static const int VIC_CPI_Registers[] = - {0xFC00, 0xFC01, 0xFC08, 0xFC09, - 0xFC10, 0xFC11, 0xFC18, 0xFC19 }; - -#define VIC_PROC_WHO_AM_I 0xfc29 -# define QUAD_IDENTIFIER 0xC0 -# define EIGHT_SLOT_IDENTIFIER 0xE0 -#define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72 -#define VIC_CPI_BASE_REGISTER 0xFC41 -#define VIC_PROCESSOR_ID 0xFC21 -# define VIC_CPU_MASQUERADE_ENABLE 0x8 - -#define VIC_CLAIM_REGISTER_0 0xFC38 -#define VIC_CLAIM_REGISTER_1 0xFC39 -#define VIC_REDIRECT_REGISTER_0 0xFC60 -#define VIC_REDIRECT_REGISTER_1 0xFC61 -#define VIC_PRIORITY_REGISTER 0xFC20 - -#define VIC_PRIMARY_MC_BASE 0xFC48 -#define VIC_SECONDARY_MC_BASE 0xFC49 - -#define QIC_PROCESSOR_ID 0xFC71 -# define QIC_CPUID_ENABLE 0x08 - -#define QIC_VIC_CPI_BASE_REGISTER 0xFC79 -#define QIC_CPI_BASE_REGISTER 0xFC7A - -#define QIC_MASK_REGISTER0 0xFC80 -/* NOTE: these are masked high, enabled low */ -# define QIC_PERF_TIMER 0x01 -# define QIC_LPE 0x02 -# define QIC_SYS_INT 0x04 -# define QIC_CMN_INT 0x08 -/* at the moment, just enable CMN_INT, disable SYS_INT */ -# define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */)) -#define QIC_MASK_REGISTER1 0xFC81 -# define QIC_BOOT_CPI_MASK 0xFE -/* Enable CPI's 1-6 inclusive */ -# define QIC_CPI_ENABLE 0x81 - -#define QIC_INTERRUPT_CLEAR0 0xFC8A -#define QIC_INTERRUPT_CLEAR1 0xFC8B - -/* this is where we place the CPI vectors */ -#define VIC_DEFAULT_CPI_BASE 0xC0 -/* this is where we place the QIC CPI vectors */ -#define QIC_DEFAULT_CPI_BASE 0xD0 - -#define VIC_BOOT_INTERRUPT_MASK 0xfe - -extern void smp_vic_timer_interrupt(void); diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h deleted file mode 100644 index c1635d43616..00000000000 --- a/arch/x86/include/asm/voyager.h +++ /dev/null @@ -1,571 +0,0 @@ -/* Copyright (C) 1999,2001 - * - * Author: J.E.J.Bottomley@HansenPartnership.com - * - * Standard include definitions for the NCR Voyager system */ - -#undef VOYAGER_DEBUG -#undef VOYAGER_CAT_DEBUG - -#ifdef VOYAGER_DEBUG -#define VDEBUG(x) printk x -#else -#define VDEBUG(x) -#endif - -/* There are three levels of voyager machine: 3,4 and 5. The rule is - * if it's less than 3435 it's a Level 3 except for a 3360 which is - * a level 4. A 3435 or above is a Level 5 */ -#define VOYAGER_LEVEL5_AND_ABOVE 0x3435 -#define VOYAGER_LEVEL4 0x3360 - -/* The L4 DINO ASIC */ -#define VOYAGER_DINO 0x43 - -/* voyager ports in standard I/O space */ -#define VOYAGER_MC_SETUP 0x96 - - -#define VOYAGER_CAT_CONFIG_PORT 0x97 -# define VOYAGER_CAT_DESELECT 0xff -#define VOYAGER_SSPB_RELOCATION_PORT 0x98 - -/* Valid CAT controller commands */ -/* start instruction register cycle */ -#define VOYAGER_CAT_IRCYC 0x01 -/* start data register cycle */ -#define VOYAGER_CAT_DRCYC 0x02 -/* move to execute state */ -#define VOYAGER_CAT_RUN 0x0F -/* end operation */ -#define VOYAGER_CAT_END 0x80 -/* hold in idle state */ -#define VOYAGER_CAT_HOLD 0x90 -/* single step an "intest" vector */ -#define VOYAGER_CAT_STEP 0xE0 -/* return cat controller to CLEMSON mode */ -#define VOYAGER_CAT_CLEMSON 0xFF - -/* the default cat command header */ -#define VOYAGER_CAT_HEADER 0x7F - -/* the range of possible CAT module ids in the system */ -#define VOYAGER_MIN_MODULE 0x10 -#define VOYAGER_MAX_MODULE 0x1f - -/* The voyager registers per asic */ -#define VOYAGER_ASIC_ID_REG 0x00 -#define VOYAGER_ASIC_TYPE_REG 0x01 -/* the sub address registers can be made auto incrementing on reads */ -#define VOYAGER_AUTO_INC_REG 0x02 -# define VOYAGER_AUTO_INC 0x04 -# define VOYAGER_NO_AUTO_INC 0xfb -#define VOYAGER_SUBADDRDATA 0x03 -#define VOYAGER_SCANPATH 0x05 -# define VOYAGER_CONNECT_ASIC 0x01 -# define VOYAGER_DISCONNECT_ASIC 0xfe -#define VOYAGER_SUBADDRLO 0x06 -#define VOYAGER_SUBADDRHI 0x07 -#define VOYAGER_SUBMODSELECT 0x08 -#define VOYAGER_SUBMODPRESENT 0x09 - -#define VOYAGER_SUBADDR_LO 0xff -#define VOYAGER_SUBADDR_HI 0xffff - -/* the maximum size of a scan path -- used to form instructions */ -#define VOYAGER_MAX_SCAN_PATH 0x100 -/* the biggest possible register size (in bytes) */ -#define VOYAGER_MAX_REG_SIZE 4 - -/* Total number of possible modules (including submodules) */ -#define VOYAGER_MAX_MODULES 16 -/* Largest number of asics per module */ -#define VOYAGER_MAX_ASICS_PER_MODULE 7 - -/* the CAT asic of each module is always the first one */ -#define VOYAGER_CAT_ID 0 -#define VOYAGER_PSI 0x1a - -/* voyager instruction operations and registers */ -#define VOYAGER_READ_CONFIG 0x1 -#define VOYAGER_WRITE_CONFIG 0x2 -#define VOYAGER_BYPASS 0xff - -typedef struct voyager_asic { - __u8 asic_addr; /* ASIC address; Level 4 */ - __u8 asic_type; /* ASIC type */ - __u8 asic_id; /* ASIC id */ - __u8 jtag_id[4]; /* JTAG id */ - __u8 asic_location; /* Location within scan path; start w/ 0 */ - __u8 bit_location; /* Location within bit stream; start w/ 0 */ - __u8 ireg_length; /* Instruction register length */ - __u16 subaddr; /* Amount of sub address space */ - struct voyager_asic *next; /* Next asic in linked list */ -} voyager_asic_t; - -typedef struct voyager_module { - __u8 module_addr; /* Module address */ - __u8 scan_path_connected; /* Scan path connected */ - __u16 ee_size; /* Size of the EEPROM */ - __u16 num_asics; /* Number of Asics */ - __u16 inst_bits; /* Instruction bits in the scan path */ - __u16 largest_reg; /* Largest register in the scan path */ - __u16 smallest_reg; /* Smallest register in the scan path */ - voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ - struct voyager_module *submodule; /* Submodule pointer */ - struct voyager_module *next; /* Next module in linked list */ -} voyager_module_t; - -typedef struct voyager_eeprom_hdr { - __u8 module_id[4]; - __u8 version_id; - __u8 config_id; - __u16 boundry_id; /* boundary scan id */ - __u16 ee_size; /* size of EEPROM */ - __u8 assembly[11]; /* assembly # */ - __u8 assembly_rev; /* assembly rev */ - __u8 tracer[4]; /* tracer number */ - __u16 assembly_cksum; /* asm checksum */ - __u16 power_consump; /* pwr requirements */ - __u16 num_asics; /* number of asics */ - __u16 bist_time; /* min. bist time */ - __u16 err_log_offset; /* error log offset */ - __u16 scan_path_offset;/* scan path offset */ - __u16 cct_offset; - __u16 log_length; /* length of err log */ - __u16 xsum_end; /* offset to end of - checksum */ - __u8 reserved[4]; - __u8 sflag; /* starting sentinal */ - __u8 part_number[13]; /* prom part number */ - __u8 version[10]; /* version number */ - __u8 signature[8]; - __u16 eeprom_chksum; - __u32 data_stamp_offset; - __u8 eflag ; /* ending sentinal */ -} __attribute__((packed)) voyager_eprom_hdr_t; - - - -#define VOYAGER_EPROM_SIZE_OFFSET \ - ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) -#define VOYAGER_XSUM_END_OFFSET 0x2a - -/* the following three definitions are for internal table layouts - * in the module EPROMs. We really only care about the IDs and - * offsets */ -typedef struct voyager_sp_table { - __u8 asic_id; - __u8 bypass_flag; - __u16 asic_data_offset; - __u16 config_data_offset; -} __attribute__((packed)) voyager_sp_table_t; - -typedef struct voyager_jtag_table { - __u8 icode[4]; - __u8 runbist[4]; - __u8 intest[4]; - __u8 samp_preld[4]; - __u8 ireg_len; -} __attribute__((packed)) voyager_jtt_t; - -typedef struct voyager_asic_data_table { - __u8 jtag_id[4]; - __u16 length_bsr; - __u16 length_bist_reg; - __u32 bist_clk; - __u16 subaddr_bits; - __u16 seed_bits; - __u16 sig_bits; - __u16 jtag_offset; -} __attribute__((packed)) voyager_at_t; - -/* Voyager Interrupt Controller (VIC) registers */ - -/* Base to add to Cross Processor Interrupts (CPIs) when triggering - * the CPU IRQ line */ -/* register defines for the WCBICs (one per processor) */ -#define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */ -#define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */ -#define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */ -#define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */ -#define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */ -#define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */ -#define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */ -#define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */ - - -/* top of memory registers */ -#define VOYAGER_WCBIC_TOM_L 0x4 -#define VOYAGER_WCBIC_TOM_H 0x5 - -/* register defines for Voyager Memory Contol (VMC) - * these are present on L4 machines only */ -#define VOYAGER_VMC1 0x81 -#define VOYAGER_VMC2 0x91 -#define VOYAGER_VMC3 0xa1 -#define VOYAGER_VMC4 0xb1 - -/* VMC Ports */ -#define VOYAGER_VMC_MEMORY_SETUP 0x9 -# define VMC_Interleaving 0x01 -# define VMC_4Way 0x02 -# define VMC_EvenCacheLines 0x04 -# define VMC_HighLine 0x08 -# define VMC_Start0_Enable 0x20 -# define VMC_Start1_Enable 0x40 -# define VMC_Vremap 0x80 -#define VOYAGER_VMC_BANK_DENSITY 0xa -# define VMC_BANK_EMPTY 0 -# define VMC_BANK_4MB 1 -# define VMC_BANK_16MB 2 -# define VMC_BANK_64MB 3 -# define VMC_BANK0_MASK 0x03 -# define VMC_BANK1_MASK 0x0C -# define VMC_BANK2_MASK 0x30 -# define VMC_BANK3_MASK 0xC0 - -/* Magellan Memory Controller (MMC) defines - present on L5 */ -#define VOYAGER_MMC_ASIC_ID 1 -/* the two memory modules corresponding to memory cards in the system */ -#define VOYAGER_MMC_MEMORY0_MODULE 0x14 -#define VOYAGER_MMC_MEMORY1_MODULE 0x15 -/* the Magellan Memory Address (MMA) defines */ -#define VOYAGER_MMA_ASIC_ID 2 - -/* Submodule number for the Quad Baseboard */ -#define VOYAGER_QUAD_BASEBOARD 1 - -/* ASIC defines for the Quad Baseboard */ -#define VOYAGER_QUAD_QDATA0 1 -#define VOYAGER_QUAD_QDATA1 2 -#define VOYAGER_QUAD_QABC 3 - -/* Useful areas in extended CMOS */ -#define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a -#define VOYAGER_MEMORY_CLICKMAP 0xa23 -#define VOYAGER_DUMP_LOCATION 0xb1a - -/* SUS In Control bit - used to tell SUS that we don't need to be - * babysat anymore */ -#define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff -# define VOYAGER_IN_CONTROL_FLAG 0x80 - -/* Voyager PSI defines */ -#define VOYAGER_PSI_STATUS_REG 0x08 -# define PSI_DC_FAIL 0x01 -# define PSI_MON 0x02 -# define PSI_FAULT 0x04 -# define PSI_ALARM 0x08 -# define PSI_CURRENT 0x10 -# define PSI_DVM 0x20 -# define PSI_PSCFAULT 0x40 -# define PSI_STAT_CHG 0x80 - -#define VOYAGER_PSI_SUPPLY_REG 0x8000 - /* read */ -# define PSI_FAIL_DC 0x01 -# define PSI_FAIL_AC 0x02 -# define PSI_MON_INT 0x04 -# define PSI_SWITCH_OFF 0x08 -# define PSI_HX_OFF 0x10 -# define PSI_SECURITY 0x20 -# define PSI_CMOS_BATT_LOW 0x40 -# define PSI_CMOS_BATT_FAIL 0x80 - /* write */ -# define PSI_CLR_SWITCH_OFF 0x13 -# define PSI_CLR_HX_OFF 0x14 -# define PSI_CLR_CMOS_BATT_FAIL 0x17 - -#define VOYAGER_PSI_MASK 0x8001 -# define PSI_MASK_MASK 0x10 - -#define VOYAGER_PSI_AC_FAIL_REG 0x8004 -#define AC_FAIL_STAT_CHANGE 0x80 - -#define VOYAGER_PSI_GENERAL_REG 0x8007 - /* read */ -# define PSI_SWITCH_ON 0x01 -# define PSI_SWITCH_ENABLED 0x02 -# define PSI_ALARM_ENABLED 0x08 -# define PSI_SECURE_ENABLED 0x10 -# define PSI_COLD_RESET 0x20 -# define PSI_COLD_START 0x80 - /* write */ -# define PSI_POWER_DOWN 0x10 -# define PSI_SWITCH_DISABLE 0x01 -# define PSI_SWITCH_ENABLE 0x11 -# define PSI_CLEAR 0x12 -# define PSI_ALARM_DISABLE 0x03 -# define PSI_ALARM_ENABLE 0x13 -# define PSI_CLEAR_COLD_RESET 0x05 -# define PSI_SET_COLD_RESET 0x15 -# define PSI_CLEAR_COLD_START 0x07 -# define PSI_SET_COLD_START 0x17 - - - -struct voyager_bios_info { - __u8 len; - __u8 major; - __u8 minor; - __u8 debug; - __u8 num_classes; - __u8 class_1; - __u8 class_2; -}; - -/* The following structures and definitions are for the Kernel/SUS - * interface these are needed to find out how SUS initialised any Quad - * boards in the system */ - -#define NUMBER_OF_MC_BUSSES 2 -#define SLOTS_PER_MC_BUS 8 -#define MAX_CPUS 16 /* 16 way CPU system */ -#define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */ -#define MAX_CACHE_LEVELS 4 /* # of cache levels supported */ -#define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */ -#define NUMBER_OF_POS_REGS 8 - -typedef struct { - __u8 MC_Slot; - __u8 POS_Values[NUMBER_OF_POS_REGS]; -} __attribute__((packed)) MC_SlotInformation_t; - -struct QuadDescription { - __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields - * will be zero except for slot */ - __u8 StructureVersion; - __u32 CPI_BaseAddress; - __u32 LARC_BankSize; - __u32 LocalMemoryStateBits; - __u8 Slot; /* Processor slots 1 - 4 */ -} __attribute__((packed)); - -struct ProcBoardInfo { - __u8 Type; - __u8 StructureVersion; - __u8 NumberOfBoards; - struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS]; -} __attribute__((packed)); - -struct CacheDescription { - __u8 Level; - __u32 TotalSize; - __u16 LineSize; - __u8 Associativity; - __u8 CacheType; - __u8 WriteType; - __u8 Number_CPUs_SharedBy; - __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS]; - -} __attribute__((packed)); - -struct CPU_Description { - __u8 CPU_HardwareId; - char *FRU_String; - __u8 NumberOfCacheLevels; - struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS]; -} __attribute__((packed)); - -struct CPU_Info { - __u8 Type; - __u8 StructureVersion; - __u8 NumberOf_CPUs; - struct CPU_Description CPU_Data[MAX_CPUS]; -} __attribute__((packed)); - - -/* - * This structure will be used by SUS and the OS. - * The assumption about this structure is that no blank space is - * packed in it by our friend the compiler. - */ -typedef struct { - __u8 Mailbox_SUS; /* Written to by SUS to give - commands/response to the OS */ - __u8 Mailbox_OS; /* Written to by the OS to give - commands/response to SUS */ - __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the - interface SUS supports */ - __u8 OS_MailboxVersion; /* Tells SUS which iteration of the - interface the OS supports */ - __u32 OS_Flags; /* Flags set by the OS as info for - SUS */ - __u32 SUS_Flags; /* Flags set by SUS as info - for the OS */ - __u32 WatchDogPeriod; /* Watchdog period (in seconds) which - the DP uses to see if the OS - is dead */ - __u32 WatchDogCount; /* Updated by the OS on every tic. */ - __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS - where to stuff the SUS error log - on a dump */ - MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; - /* Storage for MCA POS data */ - /* All new SECOND_PASS_INTERFACE fields added from this point */ - struct ProcBoardInfo *BoardData; - struct CPU_Info *CPU_Data; - /* All new fields must be added from this point */ -} Voyager_KernelSUS_Mbox_t; - -/* structure for finding the right memory address to send a QIC CPI to */ -struct voyager_qic_cpi { - /* Each cache line (32 bytes) can trigger a cpi. The cpi - * read/write may occur anywhere in the cache line---pick the - * middle to be safe */ - struct { - __u32 pad1[3]; - __u32 cpi; - __u32 pad2[4]; - } qic_cpi[8]; -}; - -struct voyager_status { - __u32 power_fail:1; - __u32 switch_off:1; - __u32 request_from_kernel:1; -}; - -struct voyager_psi_regs { - __u8 cat_id; - __u8 cat_dev; - __u8 cat_control; - __u8 subaddr; - __u8 dummy4; - __u8 checkbit; - __u8 subaddr_low; - __u8 subaddr_high; - __u8 intstatus; - __u8 stat1; - __u8 stat3; - __u8 fault; - __u8 tms; - __u8 gen; - __u8 sysconf; - __u8 dummy15; -}; - -struct voyager_psi_subregs { - __u8 supply; - __u8 mask; - __u8 present; - __u8 DCfail; - __u8 ACfail; - __u8 fail; - __u8 UPSfail; - __u8 genstatus; -}; - -struct voyager_psi { - struct voyager_psi_regs regs; - struct voyager_psi_subregs subregs; -}; - -struct voyager_SUS { -#define VOYAGER_DUMP_BUTTON_NMI 0x1 -#define VOYAGER_SUS_VALID 0x2 -#define VOYAGER_SYSINT_COMPLETE 0x3 - __u8 SUS_mbox; -#define VOYAGER_NO_COMMAND 0x0 -#define VOYAGER_IGNORE_DUMP 0x1 -#define VOYAGER_DO_DUMP 0x2 -#define VOYAGER_SYSINT_HANDSHAKE 0x3 -#define VOYAGER_DO_MEM_DUMP 0x4 -#define VOYAGER_SYSINT_WAS_RECOVERED 0x5 - __u8 kernel_mbox; -#define VOYAGER_MAILBOX_VERSION 0x10 - __u8 SUS_version; - __u8 kernel_version; -#define VOYAGER_OS_HAS_SYSINT 0x1 -#define VOYAGER_OS_IN_PROGRESS 0x2 -#define VOYAGER_UPDATING_WDPERIOD 0x4 - __u32 kernel_flags; -#define VOYAGER_SUS_BOOTING 0x1 -#define VOYAGER_SUS_IN_PROGRESS 0x2 - __u32 SUS_flags; - __u32 watchdog_period; - __u32 watchdog_count; - __u32 SUS_errorlog; - /* lots of system configuration stuff under here */ -}; - -/* Variables exported by voyager_smp */ -extern __u32 voyager_extended_vic_processors; -extern __u32 voyager_allowed_boot_processors; -extern __u32 voyager_quad_processors; -extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS]; -extern struct voyager_SUS *voyager_SUS; - -/* variables exported always */ -extern struct task_struct *voyager_thread; -extern int voyager_level; -extern struct voyager_status voyager_status; - -/* functions exported by the voyager and voyager_smp modules */ -extern int voyager_cat_readb(__u8 module, __u8 asic, int reg); -extern void voyager_cat_init(void); -extern void voyager_detect(struct voyager_bios_info *); -extern void voyager_trap_init(void); -extern void voyager_setup_irqs(void); -extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length); -extern void voyager_smp_intr_init(void); -extern __u8 voyager_extended_cmos_read(__u16 cmos_address); -extern void voyager_smp_dump(void); -extern void voyager_timer_interrupt(void); -extern void smp_local_timer_interrupt(void); -extern void voyager_power_off(void); -extern void smp_voyager_power_off(void *dummy); -extern void voyager_restart(void); -extern void voyager_cat_power_off(void); -extern void voyager_cat_do_common_interrupt(void); -extern void voyager_handle_nmi(void); -extern void voyager_smp_intr_init(void); -/* Commands for the following are */ -#define VOYAGER_PSI_READ 0 -#define VOYAGER_PSI_WRITE 1 -#define VOYAGER_PSI_SUBREAD 2 -#define VOYAGER_PSI_SUBWRITE 3 -extern void voyager_cat_psi(__u8, __u16, __u8 *); - -/* These define the CPIs we use in linux */ -#define VIC_CPI_LEVEL0 0 -#define VIC_CPI_LEVEL1 1 -/* now the fake CPIs */ -#define VIC_TIMER_CPI 2 -#define VIC_INVALIDATE_CPI 3 -#define VIC_RESCHEDULE_CPI 4 -#define VIC_ENABLE_IRQ_CPI 5 -#define VIC_CALL_FUNCTION_CPI 6 -#define VIC_CALL_FUNCTION_SINGLE_CPI 7 - -/* Now the QIC CPIs: Since we don't need the two initial levels, - * these are 2 less than the VIC CPIs */ -#define QIC_CPI_OFFSET 1 -#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET) -#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET) -#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) -#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) -#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) -#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET) - -#define VIC_START_FAKE_CPI VIC_TIMER_CPI -#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI - -/* this is the SYS_INT CPI. */ -#define VIC_SYS_INT 8 -#define VIC_CMN_INT 15 - -/* This is the boot CPI for alternate processors. It gets overwritten - * by the above once the system has activated all available processors */ -#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0 -#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) - -extern asmlinkage void vic_cpi_interrupt(void); -extern asmlinkage void vic_sys_interrupt(void); -extern asmlinkage void vic_cmn_interrupt(void); -extern asmlinkage void qic_timer_interrupt(void); -extern asmlinkage void qic_invalidate_interrupt(void); -extern asmlinkage void qic_reschedule_interrupt(void); -extern asmlinkage void qic_enable_irq_interrupt(void); -extern asmlinkage void qic_call_function_interrupt(void); diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig index c70e12b1a63..8dab8f7844d 100644 --- a/arch/x86/lguest/Kconfig +++ b/arch/x86/lguest/Kconfig @@ -3,7 +3,6 @@ config LGUEST_GUEST select PARAVIRT depends on X86_32 depends on !X86_PAE - depends on !X86_VOYAGER select VIRTIO select VIRTIO_RING select VIRTIO_CONSOLE diff --git a/arch/x86/mach-voyager/Makefile b/arch/x86/mach-voyager/Makefile deleted file mode 100644 index 15c250b371d..00000000000 --- a/arch/x86/mach-voyager/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# -# Makefile for the linux kernel. -# - -EXTRA_CFLAGS := -Iarch/x86/kernel -obj-y := setup.o voyager_basic.o voyager_thread.o - -obj-$(CONFIG_SMP) += voyager_smp.o voyager_cat.o diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c deleted file mode 100644 index 88c3c555634..00000000000 --- a/arch/x86/mach-voyager/setup.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Machine specific setup for generic - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -void __init pre_intr_init_hook(void) -{ - init_ISA_irqs(); -} - -/* - * IRQ2 is cascade interrupt to second interrupt controller - */ -static struct irqaction irq2 = { - .handler = no_action, - .mask = CPU_MASK_NONE, - .name = "cascade", -}; - -void __init intr_init_hook(void) -{ -#ifdef CONFIG_SMP - voyager_smp_intr_init(); -#endif - - setup_irq(2, &irq2); -} - -static void voyager_disable_tsc(void) -{ - /* Voyagers run their CPUs from independent clocks, so disable - * the TSC code because we can't sync them */ - setup_clear_cpu_cap(X86_FEATURE_TSC); -} - -void __init pre_setup_arch_hook(void) -{ - voyager_disable_tsc(); -} - -void __init pre_time_init_hook(void) -{ - voyager_disable_tsc(); -} - -void __init trap_init_hook(void) -{ -} - -static struct irqaction irq0 = { - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, - .mask = CPU_MASK_NONE, - .name = "timer" -}; - -void __init time_init_hook(void) -{ - irq0.mask = cpumask_of_cpu(safe_smp_processor_id()); - setup_irq(0, &irq0); -} - -/* Hook for machine specific memory setup. */ - -char *__init machine_specific_memory_setup(void) -{ - char *who; - int new_nr; - - who = "NOT VOYAGER"; - - if (voyager_level == 5) { - __u32 addr, length; - int i; - - who = "Voyager-SUS"; - - e820.nr_map = 0; - for (i = 0; voyager_memory_detect(i, &addr, &length); i++) { - e820_add_region(addr, length, E820_RAM); - } - return who; - } else if (voyager_level == 4) { - __u32 tom; - __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8; - /* select the DINO config space */ - outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT); - /* Read DINO top of memory register */ - tom = ((inb(catbase + 0x4) & 0xf0) << 16) - + ((inb(catbase + 0x5) & 0x7f) << 24); - - if (inb(catbase) != VOYAGER_DINO) { - printk(KERN_ERR - "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n"); - tom = (boot_params.screen_info.ext_mem_k) << 10; - } - who = "Voyager-TOM"; - e820_add_region(0, 0x9f000, E820_RAM); - /* map from 1M to top of memory */ - e820_add_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024, - E820_RAM); - /* FIXME: Should check the ASICs to see if I need to - * take out the 8M window. Just do it at the moment - * */ - e820_add_region(8 * 1024 * 1024, 8 * 1024 * 1024, - E820_RESERVED); - return who; - } - - return default_machine_specific_memory_setup(); -} diff --git a/arch/x86/mach-voyager/voyager_basic.c b/arch/x86/mach-voyager/voyager_basic.c deleted file mode 100644 index 46d6f806769..00000000000 --- a/arch/x86/mach-voyager/voyager_basic.c +++ /dev/null @@ -1,317 +0,0 @@ -/* Copyright (C) 1999,2001 - * - * Author: J.E.J.Bottomley@HansenPartnership.com - * - * This file contains all the voyager specific routines for getting - * initialisation of the architecture to function. For additional - * features see: - * - * voyager_cat.c - Voyager CAT bus interface - * voyager_smp.c - Voyager SMP hal (emulates linux smp.c) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Power off function, if any - */ -void (*pm_power_off) (void); -EXPORT_SYMBOL(pm_power_off); - -int voyager_level = 0; - -struct voyager_SUS *voyager_SUS = NULL; - -#ifdef CONFIG_SMP -static void voyager_dump(int dummy1, struct tty_struct *dummy3) -{ - /* get here via a sysrq */ - voyager_smp_dump(); -} - -static struct sysrq_key_op sysrq_voyager_dump_op = { - .handler = voyager_dump, - .help_msg = "Voyager", - .action_msg = "Dump Voyager Status", -}; -#endif - -void voyager_detect(struct voyager_bios_info *bios) -{ - if (bios->len != 0xff) { - int class = (bios->class_1 << 8) - | (bios->class_2 & 0xff); - - printk("Voyager System detected.\n" - " Class %x, Revision %d.%d\n", - class, bios->major, bios->minor); - if (class == VOYAGER_LEVEL4) - voyager_level = 4; - else if (class < VOYAGER_LEVEL5_AND_ABOVE) - voyager_level = 3; - else - voyager_level = 5; - printk(" Architecture Level %d\n", voyager_level); - if (voyager_level < 4) - printk - ("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n"); - /* install the power off handler */ - pm_power_off = voyager_power_off; -#ifdef CONFIG_SMP - register_sysrq_key('v', &sysrq_voyager_dump_op); -#endif - } else { - printk("\n\n**WARNING**: No Voyager Subsystem Found\n"); - } -} - -void voyager_system_interrupt(int cpl, void *dev_id) -{ - printk("Voyager: detected system interrupt\n"); -} - -/* Routine to read information from the extended CMOS area */ -__u8 voyager_extended_cmos_read(__u16 addr) -{ - outb(addr & 0xff, 0x74); - outb((addr >> 8) & 0xff, 0x75); - return inb(0x76); -} - -/* internal definitions for the SUS Click Map of memory */ - -#define CLICK_ENTRIES 16 -#define CLICK_SIZE 4096 /* click to byte conversion for Length */ - -typedef struct ClickMap { - struct Entry { - __u32 Address; - __u32 Length; - } Entry[CLICK_ENTRIES]; -} ClickMap_t; - -/* This routine is pretty much an awful hack to read the bios clickmap by - * mapping it into page 0. There are usually three regions in the map: - * Base Memory - * Extended Memory - * zero length marker for end of map - * - * Returns are 0 for failure and 1 for success on extracting region. - */ -int __init voyager_memory_detect(int region, __u32 * start, __u32 * length) -{ - int i; - int retval = 0; - __u8 cmos[4]; - ClickMap_t *map; - unsigned long map_addr; - unsigned long old; - - if (region >= CLICK_ENTRIES) { - printk("Voyager: Illegal ClickMap region %d\n", region); - return 0; - } - - for (i = 0; i < sizeof(cmos); i++) - cmos[i] = - voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i); - - map_addr = *(unsigned long *)cmos; - - /* steal page 0 for this */ - old = pg0[0]; - pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); - local_flush_tlb(); - /* now clear everything out but page 0 */ - map = (ClickMap_t *) (map_addr & (~PAGE_MASK)); - - /* zero length is the end of the clickmap */ - if (map->Entry[region].Length != 0) { - *length = map->Entry[region].Length * CLICK_SIZE; - *start = map->Entry[region].Address; - retval = 1; - } - - /* replace the mapping */ - pg0[0] = old; - local_flush_tlb(); - return retval; -} - -/* voyager specific handling code for timer interrupts. Used to hand - * off the timer tick to the SMP code, since the VIC doesn't have an - * internal timer (The QIC does, but that's another story). */ -void voyager_timer_interrupt(void) -{ - if ((jiffies & 0x3ff) == 0) { - - /* There seems to be something flaky in either - * hardware or software that is resetting the timer 0 - * count to something much higher than it should be - * This seems to occur in the boot sequence, just - * before root is mounted. Therefore, every 10 - * seconds or so, we sanity check the timer zero count - * and kick it back to where it should be. - * - * FIXME: This is the most awful hack yet seen. I - * should work out exactly what is interfering with - * the timer count settings early in the boot sequence - * and swiftly introduce it to something sharp and - * pointy. */ - __u16 val; - - spin_lock(&i8253_lock); - - outb_p(0x00, 0x43); - val = inb_p(0x40); - val |= inb(0x40) << 8; - spin_unlock(&i8253_lock); - - if (val > LATCH) { - printk - ("\nVOYAGER: countdown timer value too high (%d), resetting\n\n", - val); - spin_lock(&i8253_lock); - outb(0x34, 0x43); - outb_p(LATCH & 0xff, 0x40); /* LSB */ - outb(LATCH >> 8, 0x40); /* MSB */ - spin_unlock(&i8253_lock); - } - } -#ifdef CONFIG_SMP - smp_vic_timer_interrupt(); -#endif -} - -void voyager_power_off(void) -{ - printk("VOYAGER Power Off\n"); - - if (voyager_level == 5) { - voyager_cat_power_off(); - } else if (voyager_level == 4) { - /* This doesn't apparently work on most L4 machines, - * but the specs say to do this to get automatic power - * off. Unfortunately, if it doesn't power off the - * machine, it ends up doing a cold restart, which - * isn't really intended, so comment out the code */ -#if 0 - int port; - - /* enable the voyager Configuration Space */ - outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, VOYAGER_MC_SETUP); - /* the port for the power off flag is an offset from the - floating base */ - port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21; - /* set the power off flag */ - outb(inb(port) | 0x1, port); -#endif - } - /* and wait for it to happen */ - local_irq_disable(); - for (;;) - halt(); -} - -/* copied from process.c */ -static inline void kb_wait(void) -{ - int i; - - for (i = 0; i < 0x10000; i++) - if ((inb_p(0x64) & 0x02) == 0) - break; -} - -void machine_shutdown(void) -{ - /* Architecture specific shutdown needed before a kexec */ -} - -void machine_restart(char *cmd) -{ - printk("Voyager Warm Restart\n"); - kb_wait(); - - if (voyager_level == 5) { - /* write magic values to the RTC to inform system that - * shutdown is beginning */ - outb(0x8f, 0x70); - outb(0x5, 0x71); - - udelay(50); - outb(0xfe, 0x64); /* pull reset low */ - } else if (voyager_level == 4) { - __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8; - __u8 basebd = inb(VOYAGER_MC_SETUP); - - outb(basebd | 0x08, VOYAGER_MC_SETUP); - outb(0x02, catbase + 0x21); - } - local_irq_disable(); - for (;;) - halt(); -} - -void machine_emergency_restart(void) -{ - /*for now, just hook this to a warm restart */ - machine_restart(NULL); -} - -void mca_nmi_hook(void) -{ - __u8 dumpval __maybe_unused = inb(0xf823); - __u8 swnmi __maybe_unused = inb(0xf813); - - /* FIXME: assume dump switch pressed */ - /* check to see if the dump switch was pressed */ - VDEBUG(("VOYAGER: dumpval = 0x%x, swnmi = 0x%x\n", dumpval, swnmi)); - /* clear swnmi */ - outb(0xff, 0xf813); - /* tell SUS to ignore dump */ - if (voyager_level == 5 && voyager_SUS != NULL) { - if (voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) { - voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND; - voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS; - udelay(1000); - voyager_SUS->kernel_mbox = VOYAGER_IGNORE_DUMP; - voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS; - } - } - printk(KERN_ERR - "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n", - smp_processor_id()); - show_stack(NULL, NULL); - show_state(); -} - -void machine_halt(void) -{ - /* treat a halt like a power off */ - machine_power_off(); -} - -void machine_power_off(void) -{ - if (pm_power_off) - pm_power_off(); -} diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c deleted file mode 100644 index 2ad598c104a..00000000000 --- a/arch/x86/mach-voyager/voyager_cat.c +++ /dev/null @@ -1,1197 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8 -*- */ - -/* Copyright (C) 1999,2001 - * - * Author: J.E.J.Bottomley@HansenPartnership.com - * - * This file contains all the logic for manipulating the CAT bus - * in a level 5 machine. - * - * The CAT bus is a serial configuration and test bus. Its primary - * uses are to probe the initial configuration of the system and to - * diagnose error conditions when a system interrupt occurs. The low - * level interface is fairly primitive, so most of this file consists - * of bit shift manipulations to send and receive packets on the - * serial bus */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef VOYAGER_CAT_DEBUG -#define CDEBUG(x) printk x -#else -#define CDEBUG(x) -#endif - -/* the CAT command port */ -#define CAT_CMD (sspb + 0xe) -/* the CAT data port */ -#define CAT_DATA (sspb + 0xd) - -/* the internal cat functions */ -static void cat_pack(__u8 * msg, __u16 start_bit, __u8 * data, __u16 num_bits); -static void cat_unpack(__u8 * msg, __u16 start_bit, __u8 * data, - __u16 num_bits); -static void cat_build_header(__u8 * header, const __u16 len, - const __u16 smallest_reg_bits, - const __u16 longest_reg_bits); -static int cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp, - __u8 reg, __u8 op); -static int cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp, - __u8 reg, __u8 * value); -static int cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes, - __u8 pad_bits); -static int cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, - __u8 value); -static int cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, - __u8 * value); -static int cat_subread(voyager_module_t * modp, voyager_asic_t * asicp, - __u16 offset, __u16 len, void *buf); -static int cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp, - __u8 reg, __u8 value); -static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp); -static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp); - -static inline const char *cat_module_name(int module_id) -{ - switch (module_id) { - case 0x10: - return "Processor Slot 0"; - case 0x11: - return "Processor Slot 1"; - case 0x12: - return "Processor Slot 2"; - case 0x13: - return "Processor Slot 4"; - case 0x14: - return "Memory Slot 0"; - case 0x15: - return "Memory Slot 1"; - case 0x18: - return "Primary Microchannel"; - case 0x19: - return "Secondary Microchannel"; - case 0x1a: - return "Power Supply Interface"; - case 0x1c: - return "Processor Slot 5"; - case 0x1d: - return "Processor Slot 6"; - case 0x1e: - return "Processor Slot 7"; - case 0x1f: - return "Processor Slot 8"; - default: - return "Unknown Module"; - } -} - -static int sspb = 0; /* stores the super port location */ -int voyager_8slot = 0; /* set to true if a 51xx monster */ - -voyager_module_t *voyager_cat_list; - -/* the I/O port assignments for the VIC and QIC */ -static struct resource vic_res = { - .name = "Voyager Interrupt Controller", - .start = 0xFC00, - .end = 0xFC6F -}; -static struct resource qic_res = { - .name = "Quad Interrupt Controller", - .start = 0xFC70, - .end = 0xFCFF -}; - -/* This function is used to pack a data bit stream inside a message. - * It writes num_bits of the data buffer in msg starting at start_bit. - * Note: This function assumes that any unused bit in the data stream - * is set to zero so that the ors will work correctly */ -static void -cat_pack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits) -{ - /* compute initial shift needed */ - const __u16 offset = start_bit % BITS_PER_BYTE; - __u16 len = num_bits / BITS_PER_BYTE; - __u16 byte = start_bit / BITS_PER_BYTE; - __u16 residue = (num_bits % BITS_PER_BYTE) + offset; - int i; - - /* adjust if we have more than a byte of residue */ - if (residue >= BITS_PER_BYTE) { - residue -= BITS_PER_BYTE; - len++; - } - - /* clear out the bits. We assume here that if len==0 then - * residue >= offset. This is always true for the catbus - * operations */ - msg[byte] &= 0xff << (BITS_PER_BYTE - offset); - msg[byte++] |= data[0] >> offset; - if (len == 0) - return; - for (i = 1; i < len; i++) - msg[byte++] = (data[i - 1] << (BITS_PER_BYTE - offset)) - | (data[i] >> offset); - if (residue != 0) { - __u8 mask = 0xff >> residue; - __u8 last_byte = data[i - 1] << (BITS_PER_BYTE - offset) - | (data[i] >> offset); - - last_byte &= ~mask; - msg[byte] &= mask; - msg[byte] |= last_byte; - } - return; -} - -/* unpack the data again (same arguments as cat_pack()). data buffer - * must be zero populated. - * - * Function: given a message string move to start_bit and copy num_bits into - * data (starting at bit 0 in data). - */ -static void -cat_unpack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits) -{ - /* compute initial shift needed */ - const __u16 offset = start_bit % BITS_PER_BYTE; - __u16 len = num_bits / BITS_PER_BYTE; - const __u8 last_bits = num_bits % BITS_PER_BYTE; - __u16 byte = start_bit / BITS_PER_BYTE; - int i; - - if (last_bits != 0) - len++; - - /* special case: want < 8 bits from msg and we can get it from - * a single byte of the msg */ - if (len == 0 && BITS_PER_BYTE - offset >= num_bits) { - data[0] = msg[byte] << offset; - data[0] &= 0xff >> (BITS_PER_BYTE - num_bits); - return; - } - for (i = 0; i < len; i++) { - /* this annoying if has to be done just in case a read of - * msg one beyond the array causes a panic */ - if (offset != 0) { - data[i] = msg[byte++] << offset; - data[i] |= msg[byte] >> (BITS_PER_BYTE - offset); - } else { - data[i] = msg[byte++]; - } - } - /* do we need to truncate the final byte */ - if (last_bits != 0) { - data[i - 1] &= 0xff << (BITS_PER_BYTE - last_bits); - } - return; -} - -static void -cat_build_header(__u8 * header, const __u16 len, const __u16 smallest_reg_bits, - const __u16 longest_reg_bits) -{ - int i; - __u16 start_bit = (smallest_reg_bits - 1) % BITS_PER_BYTE; - __u8 *last_byte = &header[len - 1]; - - if (start_bit == 0) - start_bit = 1; /* must have at least one bit in the hdr */ - - for (i = 0; i < len; i++) - header[i] = 0; - - for (i = start_bit; i > 0; i--) - *last_byte = ((*last_byte) << 1) + 1; - -} - -static int -cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 op) -{ - __u8 parity, inst, inst_buf[4] = { 0 }; - __u8 iseq[VOYAGER_MAX_SCAN_PATH], hseq[VOYAGER_MAX_REG_SIZE]; - __u16 ibytes, hbytes, padbits; - int i; - - /* - * Parity is the parity of the register number + 1 (READ_REGISTER - * and WRITE_REGISTER always add '1' to the number of bits == 1) - */ - parity = (__u8) (1 + (reg & 0x01) + - ((__u8) (reg & 0x02) >> 1) + - ((__u8) (reg & 0x04) >> 2) + - ((__u8) (reg & 0x08) >> 3)) % 2; - - inst = ((parity << 7) | (reg << 2) | op); - - outb(VOYAGER_CAT_IRCYC, CAT_CMD); - if (!modp->scan_path_connected) { - if (asicp->asic_id != VOYAGER_CAT_ID) { - printk - ("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n"); - return 1; - } - outb(VOYAGER_CAT_HEADER, CAT_DATA); - outb(inst, CAT_DATA); - if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) { - CDEBUG(("VOYAGER CAT: cat_sendinst failed to get CAT_HEADER\n")); - return 1; - } - return 0; - } - ibytes = modp->inst_bits / BITS_PER_BYTE; - if ((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) { - padbits = BITS_PER_BYTE - padbits; - ibytes++; - } - hbytes = modp->largest_reg / BITS_PER_BYTE; - if (modp->largest_reg % BITS_PER_BYTE) - hbytes++; - CDEBUG(("cat_sendinst: ibytes=%d, hbytes=%d\n", ibytes, hbytes)); - /* initialise the instruction sequence to 0xff */ - for (i = 0; i < ibytes + hbytes; i++) - iseq[i] = 0xff; - cat_build_header(hseq, hbytes, modp->smallest_reg, modp->largest_reg); - cat_pack(iseq, modp->inst_bits, hseq, hbytes * BITS_PER_BYTE); - inst_buf[0] = inst; - inst_buf[1] = 0xFF >> (modp->largest_reg % BITS_PER_BYTE); - cat_pack(iseq, asicp->bit_location, inst_buf, asicp->ireg_length); -#ifdef VOYAGER_CAT_DEBUG - printk("ins = 0x%x, iseq: ", inst); - for (i = 0; i < ibytes + hbytes; i++) - printk("0x%x ", iseq[i]); - printk("\n"); -#endif - if (cat_shiftout(iseq, ibytes, hbytes, padbits)) { - CDEBUG(("VOYAGER CAT: cat_sendinst: cat_shiftout failed\n")); - return 1; - } - CDEBUG(("CAT SHIFTOUT DONE\n")); - return 0; -} - -static int -cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, - __u8 * value) -{ - if (!modp->scan_path_connected) { - if (asicp->asic_id != VOYAGER_CAT_ID) { - CDEBUG(("VOYAGER CAT: ERROR: cat_getdata to CAT asic with scan path connected\n")); - return 1; - } - if (reg > VOYAGER_SUBADDRHI) - outb(VOYAGER_CAT_RUN, CAT_CMD); - outb(VOYAGER_CAT_DRCYC, CAT_CMD); - outb(VOYAGER_CAT_HEADER, CAT_DATA); - *value = inb(CAT_DATA); - outb(0xAA, CAT_DATA); - if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) { - CDEBUG(("cat_getdata: failed to get VOYAGER_CAT_HEADER\n")); - return 1; - } - return 0; - } else { - __u16 sbits = modp->num_asics - 1 + asicp->ireg_length; - __u16 sbytes = sbits / BITS_PER_BYTE; - __u16 tbytes; - __u8 string[VOYAGER_MAX_SCAN_PATH], - trailer[VOYAGER_MAX_REG_SIZE]; - __u8 padbits; - int i; - - outb(VOYAGER_CAT_DRCYC, CAT_CMD); - - if ((padbits = sbits % BITS_PER_BYTE) != 0) { - padbits = BITS_PER_BYTE - padbits; - sbytes++; - } - tbytes = asicp->ireg_length / BITS_PER_BYTE; - if (asicp->ireg_length % BITS_PER_BYTE) - tbytes++; - CDEBUG(("cat_getdata: tbytes = %d, sbytes = %d, padbits = %d\n", - tbytes, sbytes, padbits)); - cat_build_header(trailer, tbytes, 1, asicp->ireg_length); - - for (i = tbytes - 1; i >= 0; i--) { - outb(trailer[i], CAT_DATA); - string[sbytes + i] = inb(CAT_DATA); - } - - for (i = sbytes - 1; i >= 0; i--) { - outb(0xaa, CAT_DATA); - string[i] = inb(CAT_DATA); - } - *value = 0; - cat_unpack(string, - padbits + (tbytes * BITS_PER_BYTE) + - asicp->asic_location, value, asicp->ireg_length); -#ifdef VOYAGER_CAT_DEBUG - printk("value=0x%x, string: ", *value); - for (i = 0; i < tbytes + sbytes; i++) - printk("0x%x ", string[i]); - printk("\n"); -#endif - - /* sanity check the rest of the return */ - for (i = 0; i < tbytes; i++) { - __u8 input = 0; - - cat_unpack(string, padbits + (i * BITS_PER_BYTE), - &input, BITS_PER_BYTE); - if (trailer[i] != input) { - CDEBUG(("cat_getdata: failed to sanity check rest of ret(%d) 0x%x != 0x%x\n", i, input, trailer[i])); - return 1; - } - } - CDEBUG(("cat_getdata DONE\n")); - return 0; - } -} - -static int -cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits) -{ - int i; - - for (i = data_bytes + header_bytes - 1; i >= header_bytes; i--) - outb(data[i], CAT_DATA); - - for (i = header_bytes - 1; i >= 0; i--) { - __u8 header = 0; - __u8 input; - - outb(data[i], CAT_DATA); - input = inb(CAT_DATA); - CDEBUG(("cat_shiftout: returned 0x%x\n", input)); - cat_unpack(data, ((data_bytes + i) * BITS_PER_BYTE) - pad_bits, - &header, BITS_PER_BYTE); - if (input != header) { - CDEBUG(("VOYAGER CAT: cat_shiftout failed to return header 0x%x != 0x%x\n", input, header)); - return 1; - } - } - return 0; -} - -static int -cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp, - __u8 reg, __u8 value) -{ - outb(VOYAGER_CAT_DRCYC, CAT_CMD); - if (!modp->scan_path_connected) { - if (asicp->asic_id != VOYAGER_CAT_ID) { - CDEBUG(("VOYAGER CAT: ERROR: scan path disconnected when asic != CAT\n")); - return 1; - } - outb(VOYAGER_CAT_HEADER, CAT_DATA); - outb(value, CAT_DATA); - if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) { - CDEBUG(("cat_senddata: failed to get correct header response to sent data\n")); - return 1; - } - if (reg > VOYAGER_SUBADDRHI) { - outb(VOYAGER_CAT_RUN, CAT_CMD); - outb(VOYAGER_CAT_END, CAT_CMD); - outb(VOYAGER_CAT_RUN, CAT_CMD); - } - - return 0; - } else { - __u16 hbytes = asicp->ireg_length / BITS_PER_BYTE; - __u16 dbytes = - (modp->num_asics - 1 + asicp->ireg_length) / BITS_PER_BYTE; - __u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH], - hseq[VOYAGER_MAX_REG_SIZE]; - int i; - - if ((padbits = (modp->num_asics - 1 - + asicp->ireg_length) % BITS_PER_BYTE) != 0) { - padbits = BITS_PER_BYTE - padbits; - dbytes++; - } - if (asicp->ireg_length % BITS_PER_BYTE) - hbytes++; - - cat_build_header(hseq, hbytes, 1, asicp->ireg_length); - - for (i = 0; i < dbytes + hbytes; i++) - dseq[i] = 0xff; - CDEBUG(("cat_senddata: dbytes=%d, hbytes=%d, padbits=%d\n", - dbytes, hbytes, padbits)); - cat_pack(dseq, modp->num_asics - 1 + asicp->ireg_length, - hseq, hbytes * BITS_PER_BYTE); - cat_pack(dseq, asicp->asic_location, &value, - asicp->ireg_length); -#ifdef VOYAGER_CAT_DEBUG - printk("dseq "); - for (i = 0; i < hbytes + dbytes; i++) { - printk("0x%x ", dseq[i]); - } - printk("\n"); -#endif - return cat_shiftout(dseq, dbytes, hbytes, padbits); - } -} - -static int -cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 value) -{ - if (cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG)) - return 1; - return cat_senddata(modp, asicp, reg, value); -} - -static int -cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, - __u8 * value) -{ - if (cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG)) - return 1; - return cat_getdata(modp, asicp, reg, value); -} - -static int -cat_subaddrsetup(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset, - __u16 len) -{ - __u8 val; - - if (len > 1) { - /* set auto increment */ - __u8 newval; - - if (cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) { - CDEBUG(("cat_subaddrsetup: read of VOYAGER_AUTO_INC_REG failed\n")); - return 1; - } - CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n", - val)); - newval = val | VOYAGER_AUTO_INC; - if (newval != val) { - if (cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) { - CDEBUG(("cat_subaddrsetup: write to VOYAGER_AUTO_INC_REG failed\n")); - return 1; - } - } - } - if (cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8) (offset & 0xff))) { - CDEBUG(("cat_subaddrsetup: write to SUBADDRLO failed\n")); - return 1; - } - if (asicp->subaddr > VOYAGER_SUBADDR_LO) { - if (cat_write - (modp, asicp, VOYAGER_SUBADDRHI, (__u8) (offset >> 8))) { - CDEBUG(("cat_subaddrsetup: write to SUBADDRHI failed\n")); - return 1; - } - cat_read(modp, asicp, VOYAGER_SUBADDRHI, &val); - CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset, - val)); - } - cat_read(modp, asicp, VOYAGER_SUBADDRLO, &val); - CDEBUG(("cat_subaddrsetup: offset = %d, lo = %d\n", offset, val)); - return 0; -} - -static int -cat_subwrite(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset, - __u16 len, void *buf) -{ - int i, retval; - - /* FIXME: need special actions for VOYAGER_CAT_ID here */ - if (asicp->asic_id == VOYAGER_CAT_ID) { - CDEBUG(("cat_subwrite: ATTEMPT TO WRITE TO CAT ASIC\n")); - /* FIXME -- This is supposed to be handled better - * There is a problem writing to the cat asic in the - * PSI. The 30us delay seems to work, though */ - udelay(30); - } - - if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) { - printk("cat_subwrite: cat_subaddrsetup FAILED\n"); - return retval; - } - - if (cat_sendinst - (modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) { - printk("cat_subwrite: cat_sendinst FAILED\n"); - return 1; - } - for (i = 0; i < len; i++) { - if (cat_senddata(modp, asicp, 0xFF, ((__u8 *) buf)[i])) { - printk - ("cat_subwrite: cat_sendata element at %d FAILED\n", - i); - return 1; - } - } - return 0; -} -static int -cat_subread(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset, - __u16 len, void *buf) -{ - int i, retval; - - if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) { - CDEBUG(("cat_subread: cat_subaddrsetup FAILED\n")); - return retval; - } - - if (cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) { - CDEBUG(("cat_subread: cat_sendinst failed\n")); - return 1; - } - for (i = 0; i < len; i++) { - if (cat_getdata(modp, asicp, 0xFF, &((__u8 *) buf)[i])) { - CDEBUG(("cat_subread: cat_getdata element %d failed\n", - i)); - return 1; - } - } - return 0; -} - -/* buffer for storing EPROM data read in during initialisation */ -static __initdata __u8 eprom_buf[0xFFFF]; -static voyager_module_t *voyager_initial_module; - -/* Initialise the cat bus components. We assume this is called by the - * boot cpu *after* all memory initialisation has been done (so we can - * use kmalloc) but before smp initialisation, so we can probe the SMP - * configuration and pick up necessary information. */ -void __init voyager_cat_init(void) -{ - voyager_module_t **modpp = &voyager_initial_module; - voyager_asic_t **asicpp; - voyager_asic_t *qabc_asic = NULL; - int i, j; - unsigned long qic_addr = 0; - __u8 qabc_data[0x20]; - __u8 num_submodules, val; - voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *) & eprom_buf[0]; - - __u8 cmos[4]; - unsigned long addr; - - /* initiallise the SUS mailbox */ - for (i = 0; i < sizeof(cmos); i++) - cmos[i] = voyager_extended_cmos_read(VOYAGER_DUMP_LOCATION + i); - addr = *(unsigned long *)cmos; - if ((addr & 0xff000000) != 0xff000000) { - printk(KERN_ERR - "Voyager failed to get SUS mailbox (addr = 0x%lx\n", - addr); - } else { - static struct resource res; - - res.name = "voyager SUS"; - res.start = addr; - res.end = addr + 0x3ff; - - request_resource(&iomem_resource, &res); - voyager_SUS = (struct voyager_SUS *) - ioremap(addr, 0x400); - printk(KERN_NOTICE "Voyager SUS mailbox version 0x%x\n", - voyager_SUS->SUS_version); - voyager_SUS->kernel_version = VOYAGER_MAILBOX_VERSION; - voyager_SUS->kernel_flags = VOYAGER_OS_HAS_SYSINT; - } - - /* clear the processor counts */ - voyager_extended_vic_processors = 0; - voyager_quad_processors = 0; - - printk("VOYAGER: beginning CAT bus probe\n"); - /* set up the SuperSet Port Block which tells us where the - * CAT communication port is */ - sspb = inb(VOYAGER_SSPB_RELOCATION_PORT) * 0x100; - VDEBUG(("VOYAGER DEBUG: sspb = 0x%x\n", sspb)); - - /* now find out if were 8 slot or normal */ - if ((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER) - == EIGHT_SLOT_IDENTIFIER) { - voyager_8slot = 1; - printk(KERN_NOTICE - "Voyager: Eight slot 51xx configuration detected\n"); - } - - for (i = VOYAGER_MIN_MODULE; i <= VOYAGER_MAX_MODULE; i++) { - __u8 input; - int asic; - __u16 eprom_size; - __u16 sp_offset; - - outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT); - outb(i, VOYAGER_CAT_CONFIG_PORT); - - /* check the presence of the module */ - outb(VOYAGER_CAT_RUN, CAT_CMD); - outb(VOYAGER_CAT_IRCYC, CAT_CMD); - outb(VOYAGER_CAT_HEADER, CAT_DATA); - /* stream series of alternating 1's and 0's to stimulate - * response */ - outb(0xAA, CAT_DATA); - input = inb(CAT_DATA); - outb(VOYAGER_CAT_END, CAT_CMD); - if (input != VOYAGER_CAT_HEADER) { - continue; - } - CDEBUG(("VOYAGER DEBUG: found module id 0x%x, %s\n", i, - cat_module_name(i))); - *modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL); /*&voyager_module_storage[cat_count++]; */ - if (*modpp == NULL) { - printk("**WARNING** kmalloc failure in cat_init\n"); - continue; - } - memset(*modpp, 0, sizeof(voyager_module_t)); - /* need temporary asic for cat_subread. It will be - * filled in correctly later */ - (*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count]; */ - if ((*modpp)->asic == NULL) { - printk("**WARNING** kmalloc failure in cat_init\n"); - continue; - } - memset((*modpp)->asic, 0, sizeof(voyager_asic_t)); - (*modpp)->asic->asic_id = VOYAGER_CAT_ID; - (*modpp)->asic->subaddr = VOYAGER_SUBADDR_HI; - (*modpp)->module_addr = i; - (*modpp)->scan_path_connected = 0; - if (i == VOYAGER_PSI) { - /* Exception leg for modules with no EEPROM */ - printk("Module \"%s\"\n", cat_module_name(i)); - continue; - } - - CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET)); - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_disconnect(*modpp, (*modpp)->asic); - if (cat_subread(*modpp, (*modpp)->asic, - VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size), - &eprom_size)) { - printk - ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n", - i); - outb(VOYAGER_CAT_END, CAT_CMD); - continue; - } - if (eprom_size > sizeof(eprom_buf)) { - printk - ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x. Need %d\n", - i, eprom_size); - outb(VOYAGER_CAT_END, CAT_CMD); - continue; - } - outb(VOYAGER_CAT_END, CAT_CMD); - outb(VOYAGER_CAT_RUN, CAT_CMD); - CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i, - eprom_size)); - if (cat_subread - (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) { - outb(VOYAGER_CAT_END, CAT_CMD); - continue; - } - outb(VOYAGER_CAT_END, CAT_CMD); - printk("Module \"%s\", version 0x%x, tracer 0x%x, asics %d\n", - cat_module_name(i), eprom_hdr->version_id, - *((__u32 *) eprom_hdr->tracer), eprom_hdr->num_asics); - (*modpp)->ee_size = eprom_hdr->ee_size; - (*modpp)->num_asics = eprom_hdr->num_asics; - asicpp = &((*modpp)->asic); - sp_offset = eprom_hdr->scan_path_offset; - /* All we really care about are the Quad cards. We - * identify them because they are in a processor slot - * and have only four asics */ - if ((i < 0x10 || (i >= 0x14 && i < 0x1c) || i > 0x1f)) { - modpp = &((*modpp)->next); - continue; - } - /* Now we know it's in a processor slot, does it have - * a quad baseboard submodule */ - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_read(*modpp, (*modpp)->asic, VOYAGER_SUBMODPRESENT, - &num_submodules); - /* lowest two bits, active low */ - num_submodules = ~(0xfc | num_submodules); - CDEBUG(("VOYAGER CAT: %d submodules present\n", - num_submodules)); - if (num_submodules == 0) { - /* fill in the dyadic extended processors */ - __u8 cpu = i & 0x07; - - printk("Module \"%s\": Dyadic Processor Card\n", - cat_module_name(i)); - voyager_extended_vic_processors |= (1 << cpu); - cpu += 4; - voyager_extended_vic_processors |= (1 << cpu); - outb(VOYAGER_CAT_END, CAT_CMD); - continue; - } - - /* now we want to read the asics on the first submodule, - * which should be the quad base board */ - - cat_read(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, &val); - CDEBUG(("cat_init: SUBMODSELECT value = 0x%x\n", val)); - val = (val & 0x7c) | VOYAGER_QUAD_BASEBOARD; - cat_write(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, val); - - outb(VOYAGER_CAT_END, CAT_CMD); - - CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET)); - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_disconnect(*modpp, (*modpp)->asic); - if (cat_subread(*modpp, (*modpp)->asic, - VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size), - &eprom_size)) { - printk - ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n", - i); - outb(VOYAGER_CAT_END, CAT_CMD); - continue; - } - if (eprom_size > sizeof(eprom_buf)) { - printk - ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x. Need %d\n", - i, eprom_size); - outb(VOYAGER_CAT_END, CAT_CMD); - continue; - } - outb(VOYAGER_CAT_END, CAT_CMD); - outb(VOYAGER_CAT_RUN, CAT_CMD); - CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i, - eprom_size)); - if (cat_subread - (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) { - outb(VOYAGER_CAT_END, CAT_CMD); - continue; - } - outb(VOYAGER_CAT_END, CAT_CMD); - /* Now do everything for the QBB submodule 1 */ - (*modpp)->ee_size = eprom_hdr->ee_size; - (*modpp)->num_asics = eprom_hdr->num_asics; - asicpp = &((*modpp)->asic); - sp_offset = eprom_hdr->scan_path_offset; - /* get rid of the dummy CAT asic and read the real one */ - kfree((*modpp)->asic); - for (asic = 0; asic < (*modpp)->num_asics; asic++) { - int j; - voyager_asic_t *asicp = *asicpp = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++]; */ - voyager_sp_table_t *sp_table; - voyager_at_t *asic_table; - voyager_jtt_t *jtag_table; - - if (asicp == NULL) { - printk - ("**WARNING** kmalloc failure in cat_init\n"); - continue; - } - asicpp = &(asicp->next); - asicp->asic_location = asic; - sp_table = - (voyager_sp_table_t *) (eprom_buf + sp_offset); - asicp->asic_id = sp_table->asic_id; - asic_table = - (voyager_at_t *) (eprom_buf + - sp_table->asic_data_offset); - for (j = 0; j < 4; j++) - asicp->jtag_id[j] = asic_table->jtag_id[j]; - jtag_table = - (voyager_jtt_t *) (eprom_buf + - asic_table->jtag_offset); - asicp->ireg_length = jtag_table->ireg_len; - asicp->bit_location = (*modpp)->inst_bits; - (*modpp)->inst_bits += asicp->ireg_length; - if (asicp->ireg_length > (*modpp)->largest_reg) - (*modpp)->largest_reg = asicp->ireg_length; - if (asicp->ireg_length < (*modpp)->smallest_reg || - (*modpp)->smallest_reg == 0) - (*modpp)->smallest_reg = asicp->ireg_length; - CDEBUG(("asic 0x%x, ireg_length=%d, bit_location=%d\n", - asicp->asic_id, asicp->ireg_length, - asicp->bit_location)); - if (asicp->asic_id == VOYAGER_QUAD_QABC) { - CDEBUG(("VOYAGER CAT: QABC ASIC found\n")); - qabc_asic = asicp; - } - sp_offset += sizeof(voyager_sp_table_t); - } - CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n", (*modpp)->inst_bits, (*modpp)->largest_reg, (*modpp)->smallest_reg)); - /* OK, now we have the QUAD ASICs set up, use them. - * we need to: - * - * 1. Find the Memory area for the Quad CPIs. - * 2. Find the Extended VIC processor - * 3. Configure a second extended VIC processor (This - * cannot be done for the 51xx. - * */ - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_connect(*modpp, (*modpp)->asic); - CDEBUG(("CAT CONNECTED!!\n")); - cat_subread(*modpp, qabc_asic, 0, sizeof(qabc_data), qabc_data); - qic_addr = qabc_data[5] << 8; - qic_addr = (qic_addr | qabc_data[6]) << 8; - qic_addr = (qic_addr | qabc_data[7]) << 8; - printk - ("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n", - cat_module_name(i), qic_addr, qabc_data[8]); -#if 0 /* plumbing fails---FIXME */ - if ((qabc_data[8] & 0xf0) == 0) { - /* FIXME: 32 way 8 CPU slot monster cannot be - * plumbed this way---need to check for it */ - - printk("Plumbing second Extended Quad Processor\n"); - /* second VIC line hardwired to Quad CPU 1 */ - qabc_data[8] |= 0x20; - cat_subwrite(*modpp, qabc_asic, 8, 1, &qabc_data[8]); -#ifdef VOYAGER_CAT_DEBUG - /* verify plumbing */ - cat_subread(*modpp, qabc_asic, 8, 1, &qabc_data[8]); - if ((qabc_data[8] & 0xf0) == 0) { - CDEBUG(("PLUMBING FAILED: 0x%x\n", - qabc_data[8])); - } -#endif - } -#endif - - { - struct resource *res = - kzalloc(sizeof(struct resource), GFP_KERNEL); - res->name = kmalloc(128, GFP_KERNEL); - sprintf((char *)res->name, "Voyager %s Quad CPI", - cat_module_name(i)); - res->start = qic_addr; - res->end = qic_addr + 0x3ff; - request_resource(&iomem_resource, res); - } - - qic_addr = (unsigned long)ioremap_cache(qic_addr, 0x400); - - for (j = 0; j < 4; j++) { - __u8 cpu; - - if (voyager_8slot) { - /* 8 slot has a different mapping, - * each slot has only one vic line, so - * 1 cpu in each slot must be < 8 */ - cpu = (i & 0x07) + j * 8; - } else { - cpu = (i & 0x03) + j * 4; - } - if ((qabc_data[8] & (1 << j))) { - voyager_extended_vic_processors |= (1 << cpu); - } - if (qabc_data[8] & (1 << (j + 4))) { - /* Second SET register plumbed: Quad - * card has two VIC connected CPUs. - * Secondary cannot be booted as a VIC - * CPU */ - voyager_extended_vic_processors |= (1 << cpu); - voyager_allowed_boot_processors &= - (~(1 << cpu)); - } - - voyager_quad_processors |= (1 << cpu); - voyager_quad_cpi_addr[cpu] = (struct voyager_qic_cpi *) - (qic_addr + (j << 8)); - CDEBUG(("CPU%d: CPI address 0x%lx\n", cpu, - (unsigned long)voyager_quad_cpi_addr[cpu])); - } - outb(VOYAGER_CAT_END, CAT_CMD); - - *asicpp = NULL; - modpp = &((*modpp)->next); - } - *modpp = NULL; - printk - ("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n", - voyager_extended_vic_processors, voyager_quad_processors, - voyager_allowed_boot_processors); - request_resource(&ioport_resource, &vic_res); - if (voyager_quad_processors) - request_resource(&ioport_resource, &qic_res); - /* set up the front power switch */ -} - -int voyager_cat_readb(__u8 module, __u8 asic, int reg) -{ - return 0; -} - -static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp) -{ - __u8 val; - int err = 0; - - if (!modp->scan_path_connected) - return 0; - if (asicp->asic_id != VOYAGER_CAT_ID) { - CDEBUG(("cat_disconnect: ASIC is not CAT\n")); - return 1; - } - err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val); - if (err) { - CDEBUG(("cat_disconnect: failed to read SCANPATH\n")); - return err; - } - val &= VOYAGER_DISCONNECT_ASIC; - err = cat_write(modp, asicp, VOYAGER_SCANPATH, val); - if (err) { - CDEBUG(("cat_disconnect: failed to write SCANPATH\n")); - return err; - } - outb(VOYAGER_CAT_END, CAT_CMD); - outb(VOYAGER_CAT_RUN, CAT_CMD); - modp->scan_path_connected = 0; - - return 0; -} - -static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp) -{ - __u8 val; - int err = 0; - - if (modp->scan_path_connected) - return 0; - if (asicp->asic_id != VOYAGER_CAT_ID) { - CDEBUG(("cat_connect: ASIC is not CAT\n")); - return 1; - } - - err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val); - if (err) { - CDEBUG(("cat_connect: failed to read SCANPATH\n")); - return err; - } - val |= VOYAGER_CONNECT_ASIC; - err = cat_write(modp, asicp, VOYAGER_SCANPATH, val); - if (err) { - CDEBUG(("cat_connect: failed to write SCANPATH\n")); - return err; - } - outb(VOYAGER_CAT_END, CAT_CMD); - outb(VOYAGER_CAT_RUN, CAT_CMD); - modp->scan_path_connected = 1; - - return 0; -} - -void voyager_cat_power_off(void) -{ - /* Power the machine off by writing to the PSI over the CAT - * bus */ - __u8 data; - voyager_module_t psi = { 0 }; - voyager_asic_t psi_asic = { 0 }; - - psi.asic = &psi_asic; - psi.asic->asic_id = VOYAGER_CAT_ID; - psi.asic->subaddr = VOYAGER_SUBADDR_HI; - psi.module_addr = VOYAGER_PSI; - psi.scan_path_connected = 0; - - outb(VOYAGER_CAT_END, CAT_CMD); - /* Connect the PSI to the CAT Bus */ - outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT); - outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT); - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_disconnect(&psi, &psi_asic); - /* Read the status */ - cat_subread(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data); - outb(VOYAGER_CAT_END, CAT_CMD); - CDEBUG(("PSI STATUS 0x%x\n", data)); - /* These two writes are power off prep and perform */ - data = PSI_CLEAR; - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data); - outb(VOYAGER_CAT_END, CAT_CMD); - data = PSI_POWER_DOWN; - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data); - outb(VOYAGER_CAT_END, CAT_CMD); -} - -struct voyager_status voyager_status = { 0 }; - -void voyager_cat_psi(__u8 cmd, __u16 reg, __u8 * data) -{ - voyager_module_t psi = { 0 }; - voyager_asic_t psi_asic = { 0 }; - - psi.asic = &psi_asic; - psi.asic->asic_id = VOYAGER_CAT_ID; - psi.asic->subaddr = VOYAGER_SUBADDR_HI; - psi.module_addr = VOYAGER_PSI; - psi.scan_path_connected = 0; - - outb(VOYAGER_CAT_END, CAT_CMD); - /* Connect the PSI to the CAT Bus */ - outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT); - outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT); - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_disconnect(&psi, &psi_asic); - switch (cmd) { - case VOYAGER_PSI_READ: - cat_read(&psi, &psi_asic, reg, data); - break; - case VOYAGER_PSI_WRITE: - cat_write(&psi, &psi_asic, reg, *data); - break; - case VOYAGER_PSI_SUBREAD: - cat_subread(&psi, &psi_asic, reg, 1, data); - break; - case VOYAGER_PSI_SUBWRITE: - cat_subwrite(&psi, &psi_asic, reg, 1, data); - break; - default: - printk(KERN_ERR "Voyager PSI, unrecognised command %d\n", cmd); - break; - } - outb(VOYAGER_CAT_END, CAT_CMD); -} - -void voyager_cat_do_common_interrupt(void) -{ - /* This is caused either by a memory parity error or something - * in the PSI */ - __u8 data; - voyager_module_t psi = { 0 }; - voyager_asic_t psi_asic = { 0 }; - struct voyager_psi psi_reg; - int i; - re_read: - psi.asic = &psi_asic; - psi.asic->asic_id = VOYAGER_CAT_ID; - psi.asic->subaddr = VOYAGER_SUBADDR_HI; - psi.module_addr = VOYAGER_PSI; - psi.scan_path_connected = 0; - - outb(VOYAGER_CAT_END, CAT_CMD); - /* Connect the PSI to the CAT Bus */ - outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT); - outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT); - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_disconnect(&psi, &psi_asic); - /* Read the status. NOTE: Need to read *all* the PSI regs here - * otherwise the cmn int will be reasserted */ - for (i = 0; i < sizeof(psi_reg.regs); i++) { - cat_read(&psi, &psi_asic, i, &((__u8 *) & psi_reg.regs)[i]); - } - outb(VOYAGER_CAT_END, CAT_CMD); - if ((psi_reg.regs.checkbit & 0x02) == 0) { - psi_reg.regs.checkbit |= 0x02; - cat_write(&psi, &psi_asic, 5, psi_reg.regs.checkbit); - printk("VOYAGER RE-READ PSI\n"); - goto re_read; - } - outb(VOYAGER_CAT_RUN, CAT_CMD); - for (i = 0; i < sizeof(psi_reg.subregs); i++) { - /* This looks strange, but the PSI doesn't do auto increment - * correctly */ - cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i, - 1, &((__u8 *) & psi_reg.subregs)[i]); - } - outb(VOYAGER_CAT_END, CAT_CMD); -#ifdef VOYAGER_CAT_DEBUG - printk("VOYAGER PSI: "); - for (i = 0; i < sizeof(psi_reg.regs); i++) - printk("%02x ", ((__u8 *) & psi_reg.regs)[i]); - printk("\n "); - for (i = 0; i < sizeof(psi_reg.subregs); i++) - printk("%02x ", ((__u8 *) & psi_reg.subregs)[i]); - printk("\n"); -#endif - if (psi_reg.regs.intstatus & PSI_MON) { - /* switch off or power fail */ - - if (psi_reg.subregs.supply & PSI_SWITCH_OFF) { - if (voyager_status.switch_off) { - printk(KERN_ERR - "Voyager front panel switch turned off again---Immediate power off!\n"); - voyager_cat_power_off(); - /* not reached */ - } else { - printk(KERN_ERR - "Voyager front panel switch turned off\n"); - voyager_status.switch_off = 1; - voyager_status.request_from_kernel = 1; - wake_up_process(voyager_thread); - } - /* Tell the hardware we're taking care of the - * shutdown, otherwise it will power the box off - * within 3 seconds of the switch being pressed and, - * which is much more important to us, continue to - * assert the common interrupt */ - data = PSI_CLR_SWITCH_OFF; - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG, - 1, &data); - outb(VOYAGER_CAT_END, CAT_CMD); - } else { - - VDEBUG(("Voyager ac fail reg 0x%x\n", - psi_reg.subregs.ACfail)); - if ((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) { - /* No further update */ - return; - } -#if 0 - /* Don't bother trying to find out who failed. - * FIXME: This probably makes the code incorrect on - * anything other than a 345x */ - for (i = 0; i < 5; i++) { - if (psi_reg.subregs.ACfail & (1 << i)) { - break; - } - } - printk(KERN_NOTICE "AC FAIL IN SUPPLY %d\n", i); -#endif - /* DON'T do this: it shuts down the AC PSI - outb(VOYAGER_CAT_RUN, CAT_CMD); - data = PSI_MASK_MASK | i; - cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK, - 1, &data); - outb(VOYAGER_CAT_END, CAT_CMD); - */ - printk(KERN_ERR "Voyager AC power failure\n"); - outb(VOYAGER_CAT_RUN, CAT_CMD); - data = PSI_COLD_START; - cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, - 1, &data); - outb(VOYAGER_CAT_END, CAT_CMD); - voyager_status.power_fail = 1; - voyager_status.request_from_kernel = 1; - wake_up_process(voyager_thread); - } - - } else if (psi_reg.regs.intstatus & PSI_FAULT) { - /* Major fault! */ - printk(KERN_ERR - "Voyager PSI Detected major fault, immediate power off!\n"); - voyager_cat_power_off(); - /* not reached */ - } else if (psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM - | PSI_CURRENT | PSI_DVM - | PSI_PSCFAULT | PSI_STAT_CHG)) { - /* other psi fault */ - - printk(KERN_WARNING "Voyager PSI status 0x%x\n", data); - /* clear the PSI fault */ - outb(VOYAGER_CAT_RUN, CAT_CMD); - cat_write(&psi, &psi_asic, VOYAGER_PSI_STATUS_REG, 0); - outb(VOYAGER_CAT_END, CAT_CMD); - } -} diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c deleted file mode 100644 index 98e3c2bc756..00000000000 --- a/arch/x86/mach-voyager/voyager_smp.c +++ /dev/null @@ -1,1805 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8 -*- */ - -/* Copyright (C) 1999,2001 - * - * Author: J.E.J.Bottomley@HansenPartnership.com - * - * This file provides all the same external entries as smp.c but uses - * the voyager hal to provide the functionality - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* TLB state -- visible externally, indexed physically */ -DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; - -/* CPU IRQ affinity -- set to all ones initially */ -static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = - {[0 ... NR_CPUS-1] = ~0UL }; - -/* per CPU data structure (for /proc/cpuinfo et al), visible externally - * indexed physically */ -DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); -EXPORT_PER_CPU_SYMBOL(cpu_info); - -/* physical ID of the CPU used to boot the system */ -unsigned char boot_cpu_id; - -/* The memory line addresses for the Quad CPIs */ -struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned; - -/* The masks for the Extended VIC processors, filled in by cat_init */ -__u32 voyager_extended_vic_processors = 0; - -/* Masks for the extended Quad processors which cannot be VIC booted */ -__u32 voyager_allowed_boot_processors = 0; - -/* The mask for the Quad Processors (both extended and non-extended) */ -__u32 voyager_quad_processors = 0; - -/* Total count of live CPUs, used in process.c to display - * the CPU information and in irq.c for the per CPU irq - * activity count. Finally exported by i386_ksyms.c */ -static int voyager_extended_cpus = 1; - -/* Used for the invalidate map that's also checked in the spinlock */ -static volatile unsigned long smp_invalidate_needed; - -/* Bitmask of CPUs present in the system - exported by i386_syms.c, used - * by scheduler but indexed physically */ -cpumask_t phys_cpu_present_map = CPU_MASK_NONE; - -/* The internal functions */ -static void send_CPI(__u32 cpuset, __u8 cpi); -static void ack_CPI(__u8 cpi); -static int ack_QIC_CPI(__u8 cpi); -static void ack_special_QIC_CPI(__u8 cpi); -static void ack_VIC_CPI(__u8 cpi); -static void send_CPI_allbutself(__u8 cpi); -static void mask_vic_irq(unsigned int irq); -static void unmask_vic_irq(unsigned int irq); -static unsigned int startup_vic_irq(unsigned int irq); -static void enable_local_vic_irq(unsigned int irq); -static void disable_local_vic_irq(unsigned int irq); -static void before_handle_vic_irq(unsigned int irq); -static void after_handle_vic_irq(unsigned int irq); -static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask); -static void ack_vic_irq(unsigned int irq); -static void vic_enable_cpi(void); -static void do_boot_cpu(__u8 cpuid); -static void do_quad_bootstrap(void); -static void initialize_secondary(void); - -int hard_smp_processor_id(void); -int safe_smp_processor_id(void); - -/* Inline functions */ -static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi) -{ - voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = - (smp_processor_id() << 16) + cpi; -} - -static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi) -{ - int cpu; - - for_each_online_cpu(cpu) { - if (cpuset & (1 << cpu)) { -#ifdef VOYAGER_DEBUG - if (!cpu_online(cpu)) - VDEBUG(("CPU%d sending cpi %d to CPU%d not in " - "cpu_online_map\n", - hard_smp_processor_id(), cpi, cpu)); -#endif - send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); - } - } -} - -static inline void wrapper_smp_local_timer_interrupt(void) -{ - irq_enter(); - smp_local_timer_interrupt(); - irq_exit(); -} - -static inline void send_one_CPI(__u8 cpu, __u8 cpi) -{ - if (voyager_quad_processors & (1 << cpu)) - send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); - else - send_CPI(1 << cpu, cpi); -} - -static inline void send_CPI_allbutself(__u8 cpi) -{ - __u8 cpu = smp_processor_id(); - __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); - send_CPI(mask, cpi); -} - -static inline int is_cpu_quad(void) -{ - __u8 cpumask = inb(VIC_PROC_WHO_AM_I); - return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); -} - -static inline int is_cpu_extended(void) -{ - __u8 cpu = hard_smp_processor_id(); - - return (voyager_extended_vic_processors & (1 << cpu)); -} - -static inline int is_cpu_vic_boot(void) -{ - __u8 cpu = hard_smp_processor_id(); - - return (voyager_extended_vic_processors - & voyager_allowed_boot_processors & (1 << cpu)); -} - -static inline void ack_CPI(__u8 cpi) -{ - switch (cpi) { - case VIC_CPU_BOOT_CPI: - if (is_cpu_quad() && !is_cpu_vic_boot()) - ack_QIC_CPI(cpi); - else - ack_VIC_CPI(cpi); - break; - case VIC_SYS_INT: - case VIC_CMN_INT: - /* These are slightly strange. Even on the Quad card, - * They are vectored as VIC CPIs */ - if (is_cpu_quad()) - ack_special_QIC_CPI(cpi); - else - ack_VIC_CPI(cpi); - break; - default: - printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi); - break; - } -} - -/* local variables */ - -/* The VIC IRQ descriptors -- these look almost identical to the - * 8259 IRQs except that masks and things must be kept per processor - */ -static struct irq_chip vic_chip = { - .name = "VIC", - .startup = startup_vic_irq, - .mask = mask_vic_irq, - .unmask = unmask_vic_irq, - .set_affinity = set_vic_irq_affinity, -}; - -/* used to count up as CPUs are brought on line (starts at 0) */ -static int cpucount = 0; - -/* The per cpu profile stuff - used in smp_local_timer_interrupt */ -static DEFINE_PER_CPU(int, prof_multiplier) = 1; -static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; -static DEFINE_PER_CPU(int, prof_counter) = 1; - -/* the map used to check if a CPU has booted */ -static __u32 cpu_booted_map; - -/* the synchronize flag used to hold all secondary CPUs spinning in - * a tight loop until the boot sequence is ready for them */ -static cpumask_t smp_commenced_mask = CPU_MASK_NONE; - -/* This is for the new dynamic CPU boot code */ - -/* The per processor IRQ masks (these are usually kept in sync) */ -static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; - -/* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */ -static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; - -/* Lock for enable/disable of VIC interrupts */ -static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); - -/* The boot processor is correctly set up in PC mode when it - * comes up, but the secondaries need their master/slave 8259 - * pairs initializing correctly */ - -/* Interrupt counters (per cpu) and total - used to try to - * even up the interrupt handling routines */ -static long vic_intr_total = 0; -static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 }; -static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 }; - -/* Since we can only use CPI0, we fake all the other CPIs */ -static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; - -/* debugging routine to read the isr of the cpu's pic */ -static inline __u16 vic_read_isr(void) -{ - __u16 isr; - - outb(0x0b, 0xa0); - isr = inb(0xa0) << 8; - outb(0x0b, 0x20); - isr |= inb(0x20); - - return isr; -} - -static __init void qic_setup(void) -{ - if (!is_cpu_quad()) { - /* not a quad, no setup */ - return; - } - outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); - outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); - - if (is_cpu_extended()) { - /* the QIC duplicate of the VIC base register */ - outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); - outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); - - /* FIXME: should set up the QIC timer and memory parity - * error vectors here */ - } -} - -static __init void vic_setup_pic(void) -{ - outb(1, VIC_REDIRECT_REGISTER_1); - /* clear the claim registers for dynamic routing */ - outb(0, VIC_CLAIM_REGISTER_0); - outb(0, VIC_CLAIM_REGISTER_1); - - outb(0, VIC_PRIORITY_REGISTER); - /* Set the Primary and Secondary Microchannel vector - * bases to be the same as the ordinary interrupts - * - * FIXME: This would be more efficient using separate - * vectors. */ - outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); - outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); - /* Now initiallise the master PIC belonging to this CPU by - * sending the four ICWs */ - - /* ICW1: level triggered, ICW4 needed */ - outb(0x19, 0x20); - - /* ICW2: vector base */ - outb(FIRST_EXTERNAL_VECTOR, 0x21); - - /* ICW3: slave at line 2 */ - outb(0x04, 0x21); - - /* ICW4: 8086 mode */ - outb(0x01, 0x21); - - /* now the same for the slave PIC */ - - /* ICW1: level trigger, ICW4 needed */ - outb(0x19, 0xA0); - - /* ICW2: slave vector base */ - outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); - - /* ICW3: slave ID */ - outb(0x02, 0xA1); - - /* ICW4: 8086 mode */ - outb(0x01, 0xA1); -} - -static void do_quad_bootstrap(void) -{ - if (is_cpu_quad() && is_cpu_vic_boot()) { - int i; - unsigned long flags; - __u8 cpuid = hard_smp_processor_id(); - - local_irq_save(flags); - - for (i = 0; i < 4; i++) { - /* FIXME: this would be >>3 &0x7 on the 32 way */ - if (((cpuid >> 2) & 0x03) == i) - /* don't lower our own mask! */ - continue; - - /* masquerade as local Quad CPU */ - outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID); - /* enable the startup CPI */ - outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1); - /* restore cpu id */ - outb(0, QIC_PROCESSOR_ID); - } - local_irq_restore(flags); - } -} - -void prefill_possible_map(void) -{ - /* This is empty on voyager because we need a much - * earlier detection which is done in find_smp_config */ -} - -/* Set up all the basic stuff: read the SMP config and make all the - * SMP information reflect only the boot cpu. All others will be - * brought on-line later. */ -void __init find_smp_config(void) -{ - int i; - - boot_cpu_id = hard_smp_processor_id(); - - printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); - - /* initialize the CPU structures (moved from smp_boot_cpus) */ - for (i = 0; i < nr_cpu_ids; i++) - cpu_irq_affinity[i] = ~0; - cpu_online_map = cpumask_of_cpu(boot_cpu_id); - - /* The boot CPU must be extended */ - voyager_extended_vic_processors = 1 << boot_cpu_id; - /* initially, all of the first 8 CPUs can boot */ - voyager_allowed_boot_processors = 0xff; - /* set up everything for just this CPU, we can alter - * this as we start the other CPUs later */ - /* now get the CPU disposition from the extended CMOS */ - cpus_addr(phys_cpu_present_map)[0] = - voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); - cpus_addr(phys_cpu_present_map)[0] |= - voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; - cpus_addr(phys_cpu_present_map)[0] |= - voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + - 2) << 16; - cpus_addr(phys_cpu_present_map)[0] |= - voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + - 3) << 24; - init_cpu_possible(&phys_cpu_present_map); - printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", - cpus_addr(phys_cpu_present_map)[0]); - /* Here we set up the VIC to enable SMP */ - /* enable the CPIs by writing the base vector to their register */ - outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); - outb(1, VIC_REDIRECT_REGISTER_1); - /* set the claim registers for static routing --- Boot CPU gets - * all interrupts untill all other CPUs started */ - outb(0xff, VIC_CLAIM_REGISTER_0); - outb(0xff, VIC_CLAIM_REGISTER_1); - /* Set the Primary and Secondary Microchannel vector - * bases to be the same as the ordinary interrupts - * - * FIXME: This would be more efficient using separate - * vectors. */ - outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); - outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); - - /* Finally tell the firmware that we're driving */ - outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG, - VOYAGER_SUS_IN_CONTROL_PORT); - - current_thread_info()->cpu = boot_cpu_id; - percpu_write(cpu_number, boot_cpu_id); -} - -/* - * The bootstrap kernel entry code has set these up. Save them - * for a given CPU, id is physical */ -void __init smp_store_cpu_info(int id) -{ - struct cpuinfo_x86 *c = &cpu_data(id); - - *c = boot_cpu_data; - c->cpu_index = id; - - identify_secondary_cpu(c); -} - -/* Routine initially called when a non-boot CPU is brought online */ -static void __init start_secondary(void *unused) -{ - __u8 cpuid = hard_smp_processor_id(); - - cpu_init(); - - /* OK, we're in the routine */ - ack_CPI(VIC_CPU_BOOT_CPI); - - /* setup the 8259 master slave pair belonging to this CPU --- - * we won't actually receive any until the boot CPU - * relinquishes it's static routing mask */ - vic_setup_pic(); - - qic_setup(); - - if (is_cpu_quad() && !is_cpu_vic_boot()) { - /* clear the boot CPI */ - __u8 dummy; - - dummy = - voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; - printk("read dummy %d\n", dummy); - } - - /* lower the mask to receive CPIs */ - vic_enable_cpi(); - - VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); - - notify_cpu_starting(cpuid); - - /* enable interrupts */ - local_irq_enable(); - - /* get our bogomips */ - calibrate_delay(); - - /* save our processor parameters */ - smp_store_cpu_info(cpuid); - - /* if we're a quad, we may need to bootstrap other CPUs */ - do_quad_bootstrap(); - - /* FIXME: this is rather a poor hack to prevent the CPU - * activating softirqs while it's supposed to be waiting for - * permission to proceed. Without this, the new per CPU stuff - * in the softirqs will fail */ - local_irq_disable(); - cpu_set(cpuid, cpu_callin_map); - - /* signal that we're done */ - cpu_booted_map = 1; - - while (!cpu_isset(cpuid, smp_commenced_mask)) - rep_nop(); - local_irq_enable(); - - local_flush_tlb(); - - cpu_set(cpuid, cpu_online_map); - wmb(); - cpu_idle(); -} - -/* Routine to kick start the given CPU and wait for it to report ready - * (or timeout in startup). When this routine returns, the requested - * CPU is either fully running and configured or known to be dead. - * - * We call this routine sequentially 1 CPU at a time, so no need for - * locking */ - -static void __init do_boot_cpu(__u8 cpu) -{ - struct task_struct *idle; - int timeout; - unsigned long flags; - int quad_boot = (1 << cpu) & voyager_quad_processors - & ~(voyager_extended_vic_processors - & voyager_allowed_boot_processors); - - /* This is the format of the CPI IDT gate (in real mode) which - * we're hijacking to boot the CPU */ - union IDTFormat { - struct seg { - __u16 Offset; - __u16 Segment; - } idt; - __u32 val; - } hijack_source; - - __u32 *hijack_vector; - __u32 start_phys_address = setup_trampoline(); - - /* There's a clever trick to this: The linux trampoline is - * compiled to begin at absolute location zero, so make the - * address zero but have the data segment selector compensate - * for the actual address */ - hijack_source.idt.Offset = start_phys_address & 0x000F; - hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; - - cpucount++; - alternatives_smp_switch(1); - - idle = fork_idle(cpu); - if (IS_ERR(idle)) - panic("failed fork for CPU%d", cpu); - idle->thread.ip = (unsigned long)start_secondary; - /* init_tasks (in sched.c) is indexed logically */ - stack_start.sp = (void *)idle->thread.sp; - - per_cpu(current_task, cpu) = idle; - early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); - irq_ctx_init(cpu); - - /* Note: Don't modify initial ss override */ - VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, - (unsigned long)hijack_source.val, hijack_source.idt.Segment, - hijack_source.idt.Offset, stack_start.sp)); - - /* init lowmem identity mapping */ - clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); - flush_tlb_all(); - - if (quad_boot) { - printk("CPU %d: non extended Quad boot\n", cpu); - hijack_vector = - (__u32 *) - phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4); - *hijack_vector = hijack_source.val; - } else { - printk("CPU%d: extended VIC boot\n", cpu); - hijack_vector = - (__u32 *) - phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4); - *hijack_vector = hijack_source.val; - /* VIC errata, may also receive interrupt at this address */ - hijack_vector = - (__u32 *) - phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + - VIC_DEFAULT_CPI_BASE) * 4); - *hijack_vector = hijack_source.val; - } - /* All non-boot CPUs start with interrupts fully masked. Need - * to lower the mask of the CPI we're about to send. We do - * this in the VIC by masquerading as the processor we're - * about to boot and lowering its interrupt mask */ - local_irq_save(flags); - if (quad_boot) { - send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); - } else { - outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); - /* here we're altering registers belonging to `cpu' */ - - outb(VIC_BOOT_INTERRUPT_MASK, 0x21); - /* now go back to our original identity */ - outb(boot_cpu_id, VIC_PROCESSOR_ID); - - /* and boot the CPU */ - - send_CPI((1 << cpu), VIC_CPU_BOOT_CPI); - } - cpu_booted_map = 0; - local_irq_restore(flags); - - /* now wait for it to become ready (or timeout) */ - for (timeout = 0; timeout < 50000; timeout++) { - if (cpu_booted_map) - break; - udelay(100); - } - /* reset the page table */ - zap_low_mappings(); - - if (cpu_booted_map) { - VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", - cpu, smp_processor_id())); - - printk("CPU%d: ", cpu); - print_cpu_info(&cpu_data(cpu)); - wmb(); - cpu_set(cpu, cpu_callout_map); - cpu_set(cpu, cpu_present_map); - } else { - printk("CPU%d FAILED TO BOOT: ", cpu); - if (* - ((volatile unsigned char *)phys_to_virt(start_phys_address)) - == 0xA5) - printk("Stuck.\n"); - else - printk("Not responding.\n"); - - cpucount--; - } -} - -void __init smp_boot_cpus(void) -{ - int i; - - /* CAT BUS initialisation must be done after the memory */ - /* FIXME: The L4 has a catbus too, it just needs to be - * accessed in a totally different way */ - if (voyager_level == 5) { - voyager_cat_init(); - - /* now that the cat has probed the Voyager System Bus, sanity - * check the cpu map */ - if (((voyager_quad_processors | voyager_extended_vic_processors) - & cpus_addr(phys_cpu_present_map)[0]) != - cpus_addr(phys_cpu_present_map)[0]) { - /* should panic */ - printk("\n\n***WARNING*** " - "Sanity check of CPU present map FAILED\n"); - } - } else if (voyager_level == 4) - voyager_extended_vic_processors = - cpus_addr(phys_cpu_present_map)[0]; - - /* this sets up the idle task to run on the current cpu */ - voyager_extended_cpus = 1; - /* Remove the global_irq_holder setting, it triggers a BUG() on - * schedule at the moment */ - //global_irq_holder = boot_cpu_id; - - /* FIXME: Need to do something about this but currently only works - * on CPUs with a tsc which none of mine have. - smp_tune_scheduling(); - */ - smp_store_cpu_info(boot_cpu_id); - /* setup the jump vector */ - initial_code = (unsigned long)initialize_secondary; - printk("CPU%d: ", boot_cpu_id); - print_cpu_info(&cpu_data(boot_cpu_id)); - - if (is_cpu_quad()) { - /* booting on a Quad CPU */ - printk("VOYAGER SMP: Boot CPU is Quad\n"); - qic_setup(); - do_quad_bootstrap(); - } - - /* enable our own CPIs */ - vic_enable_cpi(); - - cpu_set(boot_cpu_id, cpu_online_map); - cpu_set(boot_cpu_id, cpu_callout_map); - - /* loop over all the extended VIC CPUs and boot them. The - * Quad CPUs must be bootstrapped by their extended VIC cpu */ - for (i = 0; i < nr_cpu_ids; i++) { - if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) - continue; - do_boot_cpu(i); - /* This udelay seems to be needed for the Quad boots - * don't remove unless you know what you're doing */ - udelay(1000); - } - /* we could compute the total bogomips here, but why bother?, - * Code added from smpboot.c */ - { - unsigned long bogosum = 0; - - for_each_online_cpu(i) - bogosum += cpu_data(i).loops_per_jiffy; - printk(KERN_INFO "Total of %d processors activated " - "(%lu.%02lu BogoMIPS).\n", - cpucount + 1, bogosum / (500000 / HZ), - (bogosum / (5000 / HZ)) % 100); - } - voyager_extended_cpus = hweight32(voyager_extended_vic_processors); - printk("VOYAGER: Extended (interrupt handling CPUs): " - "%d, non-extended: %d\n", voyager_extended_cpus, - num_booting_cpus() - voyager_extended_cpus); - /* that's it, switch to symmetric mode */ - outb(0, VIC_PRIORITY_REGISTER); - outb(0, VIC_CLAIM_REGISTER_0); - outb(0, VIC_CLAIM_REGISTER_1); - - VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); -} - -/* Reload the secondary CPUs task structure (this function does not - * return ) */ -static void __init initialize_secondary(void) -{ -#if 0 - // AC kernels only - set_current(hard_get_current()); -#endif - - /* - * We don't actually need to load the full TSS, - * basically just the stack pointer and the eip. - */ - - asm volatile ("movl %0,%%esp\n\t" - "jmp *%1"::"r" (current->thread.sp), - "r"(current->thread.ip)); -} - -/* handle a Voyager SYS_INT -- If we don't, the base board will - * panic the system. - * - * System interrupts occur because some problem was detected on the - * various busses. To find out what you have to probe all the - * hardware via the CAT bus. FIXME: At the moment we do nothing. */ -void smp_vic_sys_interrupt(struct pt_regs *regs) -{ - ack_CPI(VIC_SYS_INT); - printk("Voyager SYSTEM INTERRUPT\n"); -} - -/* Handle a voyager CMN_INT; These interrupts occur either because of - * a system status change or because a single bit memory error - * occurred. FIXME: At the moment, ignore all this. */ -void smp_vic_cmn_interrupt(struct pt_regs *regs) -{ - static __u8 in_cmn_int = 0; - static DEFINE_SPINLOCK(cmn_int_lock); - - /* common ints are broadcast, so make sure we only do this once */ - _raw_spin_lock(&cmn_int_lock); - if (in_cmn_int) - goto unlock_end; - - in_cmn_int++; - _raw_spin_unlock(&cmn_int_lock); - - VDEBUG(("Voyager COMMON INTERRUPT\n")); - - if (voyager_level == 5) - voyager_cat_do_common_interrupt(); - - _raw_spin_lock(&cmn_int_lock); - in_cmn_int = 0; - unlock_end: - _raw_spin_unlock(&cmn_int_lock); - ack_CPI(VIC_CMN_INT); -} - -/* - * Reschedule call back. Nothing to do, all the work is done - * automatically when we return from the interrupt. */ -static void smp_reschedule_interrupt(void) -{ - /* do nothing */ -} - -static struct mm_struct *flush_mm; -static unsigned long flush_va; -static DEFINE_SPINLOCK(tlbstate_lock); - -/* - * We cannot call mmdrop() because we are in interrupt context, - * instead update mm->cpu_vm_mask. - * - * We need to reload %cr3 since the page tables may be going - * away from under us.. - */ -static inline void voyager_leave_mm(unsigned long cpu) -{ - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) - BUG(); - cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); - load_cr3(swapper_pg_dir); -} - -/* - * Invalidate call-back - */ -static void smp_invalidate_interrupt(void) -{ - __u8 cpu = smp_processor_id(); - - if (!test_bit(cpu, &smp_invalidate_needed)) - return; - /* This will flood messages. Don't uncomment unless you see - * Problems with cross cpu invalidation - VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", - smp_processor_id())); - */ - - if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { - if (flush_va == TLB_FLUSH_ALL) - local_flush_tlb(); - else - __flush_tlb_one(flush_va); - } else - voyager_leave_mm(cpu); - } - smp_mb__before_clear_bit(); - clear_bit(cpu, &smp_invalidate_needed); - smp_mb__after_clear_bit(); -} - -/* All the new flush operations for 2.4 */ - -/* This routine is called with a physical cpu mask */ -static void -voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm, - unsigned long va) -{ - int stuck = 50000; - - if (!cpumask) - BUG(); - if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask) - BUG(); - if (cpumask & (1 << smp_processor_id())) - BUG(); - if (!mm) - BUG(); - - spin_lock(&tlbstate_lock); - - flush_mm = mm; - flush_va = va; - atomic_set_mask(cpumask, &smp_invalidate_needed); - /* - * We have to send the CPI only to - * CPUs affected. - */ - send_CPI(cpumask, VIC_INVALIDATE_CPI); - - while (smp_invalidate_needed) { - mb(); - if (--stuck == 0) { - printk("***WARNING*** Stuck doing invalidate CPI " - "(CPU%d)\n", smp_processor_id()); - break; - } - } - - /* Uncomment only to debug invalidation problems - VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); - */ - - flush_mm = NULL; - flush_va = 0; - spin_unlock(&tlbstate_lock); -} - -void flush_tlb_current_task(void) -{ - struct mm_struct *mm = current->mm; - unsigned long cpu_mask; - - preempt_disable(); - - cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); - local_flush_tlb(); - if (cpu_mask) - voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); - - preempt_enable(); -} - -void flush_tlb_mm(struct mm_struct *mm) -{ - unsigned long cpu_mask; - - preempt_disable(); - - cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); - - if (current->active_mm == mm) { - if (current->mm) - local_flush_tlb(); - else - voyager_leave_mm(smp_processor_id()); - } - if (cpu_mask) - voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); - - preempt_enable(); -} - -void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) -{ - struct mm_struct *mm = vma->vm_mm; - unsigned long cpu_mask; - - preempt_disable(); - - cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); - if (current->active_mm == mm) { - if (current->mm) - __flush_tlb_one(va); - else - voyager_leave_mm(smp_processor_id()); - } - - if (cpu_mask) - voyager_flush_tlb_others(cpu_mask, mm, va); - - preempt_enable(); -} - -EXPORT_SYMBOL(flush_tlb_page); - -/* enable the requested IRQs */ -static void smp_enable_irq_interrupt(void) -{ - __u8 irq; - __u8 cpu = get_cpu(); - - VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, - vic_irq_enable_mask[cpu])); - - spin_lock(&vic_irq_lock); - for (irq = 0; irq < 16; irq++) { - if (vic_irq_enable_mask[cpu] & (1 << irq)) - enable_local_vic_irq(irq); - } - vic_irq_enable_mask[cpu] = 0; - spin_unlock(&vic_irq_lock); - - put_cpu_no_resched(); -} - -/* - * CPU halt call-back - */ -static void smp_stop_cpu_function(void *dummy) -{ - VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); - cpu_clear(smp_processor_id(), cpu_online_map); - local_irq_disable(); - for (;;) - halt(); -} - -/* execute a thread on a new CPU. The function to be called must be - * previously set up. This is used to schedule a function for - * execution on all CPUs - set up the function then broadcast a - * function_interrupt CPI to come here on each CPU */ -static void smp_call_function_interrupt(void) -{ - irq_enter(); - generic_smp_call_function_interrupt(); - __get_cpu_var(irq_stat).irq_call_count++; - irq_exit(); -} - -static void smp_call_function_single_interrupt(void) -{ - irq_enter(); - generic_smp_call_function_single_interrupt(); - __get_cpu_var(irq_stat).irq_call_count++; - irq_exit(); -} - -/* Sorry about the name. In an APIC based system, the APICs - * themselves are programmed to send a timer interrupt. This is used - * by linux to reschedule the processor. Voyager doesn't have this, - * so we use the system clock to interrupt one processor, which in - * turn, broadcasts a timer CPI to all the others --- we receive that - * CPI here. We don't use this actually for counting so losing - * ticks doesn't matter - * - * FIXME: For those CPUs which actually have a local APIC, we could - * try to use it to trigger this interrupt instead of having to - * broadcast the timer tick. Unfortunately, all my pentium DYADs have - * no local APIC, so I can't do this - * - * This function is currently a placeholder and is unused in the code */ -void smp_apic_timer_interrupt(struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - wrapper_smp_local_timer_interrupt(); - set_irq_regs(old_regs); -} - -/* All of the QUAD interrupt GATES */ -void smp_qic_timer_interrupt(struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - ack_QIC_CPI(QIC_TIMER_CPI); - wrapper_smp_local_timer_interrupt(); - set_irq_regs(old_regs); -} - -void smp_qic_invalidate_interrupt(struct pt_regs *regs) -{ - ack_QIC_CPI(QIC_INVALIDATE_CPI); - smp_invalidate_interrupt(); -} - -void smp_qic_reschedule_interrupt(struct pt_regs *regs) -{ - ack_QIC_CPI(QIC_RESCHEDULE_CPI); - smp_reschedule_interrupt(); -} - -void smp_qic_enable_irq_interrupt(struct pt_regs *regs) -{ - ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); - smp_enable_irq_interrupt(); -} - -void smp_qic_call_function_interrupt(struct pt_regs *regs) -{ - ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); - smp_call_function_interrupt(); -} - -void smp_qic_call_function_single_interrupt(struct pt_regs *regs) -{ - ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); - smp_call_function_single_interrupt(); -} - -void smp_vic_cpi_interrupt(struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - __u8 cpu = smp_processor_id(); - - if (is_cpu_quad()) - ack_QIC_CPI(VIC_CPI_LEVEL0); - else - ack_VIC_CPI(VIC_CPI_LEVEL0); - - if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) - wrapper_smp_local_timer_interrupt(); - if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) - smp_invalidate_interrupt(); - if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) - smp_reschedule_interrupt(); - if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) - smp_enable_irq_interrupt(); - if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) - smp_call_function_interrupt(); - if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) - smp_call_function_single_interrupt(); - set_irq_regs(old_regs); -} - -static void do_flush_tlb_all(void *info) -{ - unsigned long cpu = smp_processor_id(); - - __flush_tlb_all(); - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) - voyager_leave_mm(cpu); -} - -/* flush the TLB of every active CPU in the system */ -void flush_tlb_all(void) -{ - on_each_cpu(do_flush_tlb_all, 0, 1); -} - -/* send a reschedule CPI to one CPU by physical CPU number*/ -static void voyager_smp_send_reschedule(int cpu) -{ - send_one_CPI(cpu, VIC_RESCHEDULE_CPI); -} - -int hard_smp_processor_id(void) -{ - __u8 i; - __u8 cpumask = inb(VIC_PROC_WHO_AM_I); - if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) - return cpumask & 0x1F; - - for (i = 0; i < 8; i++) { - if (cpumask & (1 << i)) - return i; - } - printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); - return 0; -} - -int safe_smp_processor_id(void) -{ - return hard_smp_processor_id(); -} - -/* broadcast a halt to all other CPUs */ -static void voyager_smp_send_stop(void) -{ - smp_call_function(smp_stop_cpu_function, NULL, 1); -} - -/* this function is triggered in time.c when a clock tick fires - * we need to re-broadcast the tick to all CPUs */ -void smp_vic_timer_interrupt(void) -{ - send_CPI_allbutself(VIC_TIMER_CPI); - smp_local_timer_interrupt(); -} - -/* local (per CPU) timer interrupt. It does both profiling and - * process statistics/rescheduling. - * - * We do profiling in every local tick, statistics/rescheduling - * happen only every 'profiling multiplier' ticks. The default - * multiplier is 1 and it can be changed by writing the new multiplier - * value into /proc/profile. - */ -void smp_local_timer_interrupt(void) -{ - int cpu = smp_processor_id(); - long weight; - - profile_tick(CPU_PROFILING); - if (--per_cpu(prof_counter, cpu) <= 0) { - /* - * The multiplier may have changed since the last time we got - * to this point as a result of the user writing to - * /proc/profile. In this case we need to adjust the APIC - * timer accordingly. - * - * Interrupts are already masked off at this point. - */ - per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu); - if (per_cpu(prof_counter, cpu) != - per_cpu(prof_old_multiplier, cpu)) { - /* FIXME: need to update the vic timer tick here */ - per_cpu(prof_old_multiplier, cpu) = - per_cpu(prof_counter, cpu); - } - - update_process_times(user_mode_vm(get_irq_regs())); - } - - if (((1 << cpu) & voyager_extended_vic_processors) == 0) - /* only extended VIC processors participate in - * interrupt distribution */ - return; - - /* - * We take the 'long' return path, and there every subsystem - * grabs the appropriate locks (kernel lock/ irq lock). - * - * we might want to decouple profiling from the 'long path', - * and do the profiling totally in assembly. - * - * Currently this isn't too much of an issue (performance wise), - * we can take more than 100K local irqs per second on a 100 MHz P5. - */ - - if ((++vic_tick[cpu] & 0x7) != 0) - return; - /* get here every 16 ticks (about every 1/6 of a second) */ - - /* Change our priority to give someone else a chance at getting - * the IRQ. The algorithm goes like this: - * - * In the VIC, the dynamically routed interrupt is always - * handled by the lowest priority eligible (i.e. receiving - * interrupts) CPU. If >1 eligible CPUs are equal lowest, the - * lowest processor number gets it. - * - * The priority of a CPU is controlled by a special per-CPU - * VIC priority register which is 3 bits wide 0 being lowest - * and 7 highest priority.. - * - * Therefore we subtract the average number of interrupts from - * the number we've fielded. If this number is negative, we - * lower the activity count and if it is positive, we raise - * it. - * - * I'm afraid this still leads to odd looking interrupt counts: - * the totals are all roughly equal, but the individual ones - * look rather skewed. - * - * FIXME: This algorithm is total crap when mixed with SMP - * affinity code since we now try to even up the interrupt - * counts when an affinity binding is keeping them on a - * particular CPU*/ - weight = (vic_intr_count[cpu] * voyager_extended_cpus - - vic_intr_total) >> 4; - weight += 4; - if (weight > 7) - weight = 7; - if (weight < 0) - weight = 0; - - outb((__u8) weight, VIC_PRIORITY_REGISTER); - -#ifdef VOYAGER_DEBUG - if ((vic_tick[cpu] & 0xFFF) == 0) { - /* print this message roughly every 25 secs */ - printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", - cpu, vic_tick[cpu], weight); - } -#endif -} - -/* setup the profiling timer */ -int setup_profiling_timer(unsigned int multiplier) -{ - int i; - - if ((!multiplier)) - return -EINVAL; - - /* - * Set the new multiplier for each CPU. CPUs don't start using the - * new values until the next timer interrupt in which they do process - * accounting. - */ - for (i = 0; i < nr_cpu_ids; ++i) - per_cpu(prof_multiplier, i) = multiplier; - - return 0; -} - -/* This is a bit of a mess, but forced on us by the genirq changes - * there's no genirq handler that really does what voyager wants - * so hack it up with the simple IRQ handler */ -static void handle_vic_irq(unsigned int irq, struct irq_desc *desc) -{ - before_handle_vic_irq(irq); - handle_simple_irq(irq, desc); - after_handle_vic_irq(irq); -} - -/* The CPIs are handled in the per cpu 8259s, so they must be - * enabled to be received: FIX: enabling the CPIs in the early - * boot sequence interferes with bug checking; enable them later - * on in smp_init */ -#define VIC_SET_GATE(cpi, vector) \ - set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector)) -#define QIC_SET_GATE(cpi, vector) \ - set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) - -void __init voyager_smp_intr_init(void) -{ - int i; - - /* initialize the per cpu irq mask to all disabled */ - for (i = 0; i < nr_cpu_ids; i++) - vic_irq_mask[i] = 0xFFFF; - - VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); - - VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt); - VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt); - - QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt); - QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt); - QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); - QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); - QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); - - /* now put the VIC descriptor into the first 48 IRQs - * - * This is for later: first 16 correspond to PC IRQs; next 16 - * are Primary MC IRQs and final 16 are Secondary MC IRQs */ - for (i = 0; i < 48; i++) - set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); -} - -/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per - * processor to receive CPI */ -static void send_CPI(__u32 cpuset, __u8 cpi) -{ - int cpu; - __u32 quad_cpuset = (cpuset & voyager_quad_processors); - - if (cpi < VIC_START_FAKE_CPI) { - /* fake CPI are only used for booting, so send to the - * extended quads as well---Quads must be VIC booted */ - outb((__u8) (cpuset), VIC_CPI_Registers[cpi]); - return; - } - if (quad_cpuset) - send_QIC_CPI(quad_cpuset, cpi); - cpuset &= ~quad_cpuset; - cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ - if (cpuset == 0) - return; - for_each_online_cpu(cpu) { - if (cpuset & (1 << cpu)) - set_bit(cpi, &vic_cpi_mailbox[cpu]); - } - if (cpuset) - outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); -} - -/* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and - * set the cache line to shared by reading it. - * - * DON'T make this inline otherwise the cache line read will be - * optimised away - * */ -static int ack_QIC_CPI(__u8 cpi) -{ - __u8 cpu = hard_smp_processor_id(); - - cpi &= 7; - - outb(1 << cpi, QIC_INTERRUPT_CLEAR1); - return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; -} - -static void ack_special_QIC_CPI(__u8 cpi) -{ - switch (cpi) { - case VIC_CMN_INT: - outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); - break; - case VIC_SYS_INT: - outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0); - break; - } - /* also clear at the VIC, just in case (nop for non-extended proc) */ - ack_VIC_CPI(cpi); -} - -/* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ -static void ack_VIC_CPI(__u8 cpi) -{ -#ifdef VOYAGER_DEBUG - unsigned long flags; - __u16 isr; - __u8 cpu = smp_processor_id(); - - local_irq_save(flags); - isr = vic_read_isr(); - if ((isr & (1 << (cpi & 7))) == 0) { - printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); - } -#endif - /* send specific EOI; the two system interrupts have - * bit 4 set for a separate vector but behave as the - * corresponding 3 bit intr */ - outb_p(0x60 | (cpi & 7), 0x20); - -#ifdef VOYAGER_DEBUG - if ((vic_read_isr() & (1 << (cpi & 7))) != 0) { - printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); - } - local_irq_restore(flags); -#endif -} - -/* cribbed with thanks from irq.c */ -#define __byte(x,y) (((unsigned char *)&(y))[x]) -#define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) -#define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) - -static unsigned int startup_vic_irq(unsigned int irq) -{ - unmask_vic_irq(irq); - - return 0; -} - -/* The enable and disable routines. This is where we run into - * conflicting architectural philosophy. Fundamentally, the voyager - * architecture does not expect to have to disable interrupts globally - * (the IRQ controllers belong to each CPU). The processor masquerade - * which is used to start the system shouldn't be used in a running OS - * since it will cause great confusion if two separate CPUs drive to - * the same IRQ controller (I know, I've tried it). - * - * The solution is a variant on the NCR lazy SPL design: - * - * 1) To disable an interrupt, do nothing (other than set the - * IRQ_DISABLED flag). This dares the interrupt actually to arrive. - * - * 2) If the interrupt dares to come in, raise the local mask against - * it (this will result in all the CPU masks being raised - * eventually). - * - * 3) To enable the interrupt, lower the mask on the local CPU and - * broadcast an Interrupt enable CPI which causes all other CPUs to - * adjust their masks accordingly. */ - -static void unmask_vic_irq(unsigned int irq) -{ - /* linux doesn't to processor-irq affinity, so enable on - * all CPUs we know about */ - int cpu = smp_processor_id(), real_cpu; - __u16 mask = (1 << irq); - __u32 processorList = 0; - unsigned long flags; - - VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n", - irq, cpu, cpu_irq_affinity[cpu])); - spin_lock_irqsave(&vic_irq_lock, flags); - for_each_online_cpu(real_cpu) { - if (!(voyager_extended_vic_processors & (1 << real_cpu))) - continue; - if (!(cpu_irq_affinity[real_cpu] & mask)) { - /* irq has no affinity for this CPU, ignore */ - continue; - } - if (real_cpu == cpu) { - enable_local_vic_irq(irq); - } else if (vic_irq_mask[real_cpu] & mask) { - vic_irq_enable_mask[real_cpu] |= mask; - processorList |= (1 << real_cpu); - } - } - spin_unlock_irqrestore(&vic_irq_lock, flags); - if (processorList) - send_CPI(processorList, VIC_ENABLE_IRQ_CPI); -} - -static void mask_vic_irq(unsigned int irq) -{ - /* lazy disable, do nothing */ -} - -static void enable_local_vic_irq(unsigned int irq) -{ - __u8 cpu = smp_processor_id(); - __u16 mask = ~(1 << irq); - __u16 old_mask = vic_irq_mask[cpu]; - - vic_irq_mask[cpu] &= mask; - if (vic_irq_mask[cpu] == old_mask) - return; - - VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", - irq, cpu)); - - if (irq & 8) { - outb_p(cached_A1(cpu), 0xA1); - (void)inb_p(0xA1); - } else { - outb_p(cached_21(cpu), 0x21); - (void)inb_p(0x21); - } -} - -static void disable_local_vic_irq(unsigned int irq) -{ - __u8 cpu = smp_processor_id(); - __u16 mask = (1 << irq); - __u16 old_mask = vic_irq_mask[cpu]; - - if (irq == 7) - return; - - vic_irq_mask[cpu] |= mask; - if (old_mask == vic_irq_mask[cpu]) - return; - - VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", - irq, cpu)); - - if (irq & 8) { - outb_p(cached_A1(cpu), 0xA1); - (void)inb_p(0xA1); - } else { - outb_p(cached_21(cpu), 0x21); - (void)inb_p(0x21); - } -} - -/* The VIC is level triggered, so the ack can only be issued after the - * interrupt completes. However, we do Voyager lazy interrupt - * handling here: It is an extremely expensive operation to mask an - * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If - * this interrupt actually comes in, then we mask and ack here to push - * the interrupt off to another CPU */ -static void before_handle_vic_irq(unsigned int irq) -{ - irq_desc_t *desc = irq_to_desc(irq); - __u8 cpu = smp_processor_id(); - - _raw_spin_lock(&vic_irq_lock); - vic_intr_total++; - vic_intr_count[cpu]++; - - if (!(cpu_irq_affinity[cpu] & (1 << irq))) { - /* The irq is not in our affinity mask, push it off - * onto another CPU */ - VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d " - "on cpu %d\n", irq, cpu)); - disable_local_vic_irq(irq); - /* set IRQ_INPROGRESS to prevent the handler in irq.c from - * actually calling the interrupt routine */ - desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; - } else if (desc->status & IRQ_DISABLED) { - /* Damn, the interrupt actually arrived, do the lazy - * disable thing. The interrupt routine in irq.c will - * not handle a IRQ_DISABLED interrupt, so nothing more - * need be done here */ - VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n", - irq, cpu)); - disable_local_vic_irq(irq); - desc->status |= IRQ_REPLAY; - } else { - desc->status &= ~IRQ_REPLAY; - } - - _raw_spin_unlock(&vic_irq_lock); -} - -/* Finish the VIC interrupt: basically mask */ -static void after_handle_vic_irq(unsigned int irq) -{ - irq_desc_t *desc = irq_to_desc(irq); - - _raw_spin_lock(&vic_irq_lock); - { - unsigned int status = desc->status & ~IRQ_INPROGRESS; -#ifdef VOYAGER_DEBUG - __u16 isr; -#endif - - desc->status = status; - if ((status & IRQ_DISABLED)) - disable_local_vic_irq(irq); -#ifdef VOYAGER_DEBUG - /* DEBUG: before we ack, check what's in progress */ - isr = vic_read_isr(); - if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) { - int i; - __u8 cpu = smp_processor_id(); - __u8 real_cpu; - int mask; /* Um... initialize me??? --RR */ - - printk("VOYAGER SMP: CPU%d lost interrupt %d\n", - cpu, irq); - for_each_possible_cpu(real_cpu, mask) { - - outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, - VIC_PROCESSOR_ID); - isr = vic_read_isr(); - if (isr & (1 << irq)) { - printk - ("VOYAGER SMP: CPU%d ack irq %d\n", - real_cpu, irq); - ack_vic_irq(irq); - } - outb(cpu, VIC_PROCESSOR_ID); - } - } -#endif /* VOYAGER_DEBUG */ - /* as soon as we ack, the interrupt is eligible for - * receipt by another CPU so everything must be in - * order here */ - ack_vic_irq(irq); - if (status & IRQ_REPLAY) { - /* replay is set if we disable the interrupt - * in the before_handle_vic_irq() routine, so - * clear the in progress bit here to allow the - * next CPU to handle this correctly */ - desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS); - } -#ifdef VOYAGER_DEBUG - isr = vic_read_isr(); - if ((isr & (1 << irq)) != 0) - printk("VOYAGER SMP: after_handle_vic_irq() after " - "ack irq=%d, isr=0x%x\n", irq, isr); -#endif /* VOYAGER_DEBUG */ - } - _raw_spin_unlock(&vic_irq_lock); - - /* All code after this point is out of the main path - the IRQ - * may be intercepted by another CPU if reasserted */ -} - -/* Linux processor - interrupt affinity manipulations. - * - * For each processor, we maintain a 32 bit irq affinity mask. - * Initially it is set to all 1's so every processor accepts every - * interrupt. In this call, we change the processor's affinity mask: - * - * Change from enable to disable: - * - * If the interrupt ever comes in to the processor, we will disable it - * and ack it to push it off to another CPU, so just accept the mask here. - * - * Change from disable to enable: - * - * change the mask and then do an interrupt enable CPI to re-enable on - * the selected processors */ - -void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask) -{ - /* Only extended processors handle interrupts */ - unsigned long real_mask; - unsigned long irq_mask = 1 << irq; - int cpu; - - real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors; - - if (cpus_addr(*mask)[0] == 0) - /* can't have no CPUs to accept the interrupt -- extremely - * bad things will happen */ - return; - - if (irq == 0) - /* can't change the affinity of the timer IRQ. This - * is due to the constraint in the voyager - * architecture that the CPI also comes in on and IRQ - * line and we have chosen IRQ0 for this. If you - * raise the mask on this interrupt, the processor - * will no-longer be able to accept VIC CPIs */ - return; - - if (irq >= 32) - /* You can only have 32 interrupts in a voyager system - * (and 32 only if you have a secondary microchannel - * bus) */ - return; - - for_each_online_cpu(cpu) { - unsigned long cpu_mask = 1 << cpu; - - if (cpu_mask & real_mask) { - /* enable the interrupt for this cpu */ - cpu_irq_affinity[cpu] |= irq_mask; - } else { - /* disable the interrupt for this cpu */ - cpu_irq_affinity[cpu] &= ~irq_mask; - } - } - /* this is magic, we now have the correct affinity maps, so - * enable the interrupt. This will send an enable CPI to - * those CPUs who need to enable it in their local masks, - * causing them to correct for the new affinity . If the - * interrupt is currently globally disabled, it will simply be - * disabled again as it comes in (voyager lazy disable). If - * the affinity map is tightened to disable the interrupt on a - * cpu, it will be pushed off when it comes in */ - unmask_vic_irq(irq); -} - -static void ack_vic_irq(unsigned int irq) -{ - if (irq & 8) { - outb(0x62, 0x20); /* Specific EOI to cascade */ - outb(0x60 | (irq & 7), 0xA0); - } else { - outb(0x60 | (irq & 7), 0x20); - } -} - -/* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 - * but are not vectored by it. This means that the 8259 mask must be - * lowered to receive them */ -static __init void vic_enable_cpi(void) -{ - __u8 cpu = smp_processor_id(); - - /* just take a copy of the current mask (nop for boot cpu) */ - vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; - - enable_local_vic_irq(VIC_CPI_LEVEL0); - enable_local_vic_irq(VIC_CPI_LEVEL1); - /* for sys int and cmn int */ - enable_local_vic_irq(7); - - if (is_cpu_quad()) { - outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); - outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); - VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", - cpu, QIC_CPI_ENABLE)); - } - - VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n", - cpu, vic_irq_mask[cpu])); -} - -void voyager_smp_dump() -{ - int old_cpu = smp_processor_id(), cpu; - - /* dump the interrupt masks of each processor */ - for_each_online_cpu(cpu) { - __u16 imr, isr, irr; - unsigned long flags; - - local_irq_save(flags); - outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); - imr = (inb(0xa1) << 8) | inb(0x21); - outb(0x0a, 0xa0); - irr = inb(0xa0) << 8; - outb(0x0a, 0x20); - irr |= inb(0x20); - outb(0x0b, 0xa0); - isr = inb(0xa0) << 8; - outb(0x0b, 0x20); - isr |= inb(0x20); - outb(old_cpu, VIC_PROCESSOR_ID); - local_irq_restore(flags); - printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n", - cpu, vic_irq_mask[cpu], imr, irr, isr); -#if 0 - /* These lines are put in to try to unstick an un ack'd irq */ - if (isr != 0) { - int irq; - for (irq = 0; irq < 16; irq++) { - if (isr & (1 << irq)) { - printk("\tCPU%d: ack irq %d\n", - cpu, irq); - local_irq_save(flags); - outb(VIC_CPU_MASQUERADE_ENABLE | cpu, - VIC_PROCESSOR_ID); - ack_vic_irq(irq); - outb(old_cpu, VIC_PROCESSOR_ID); - local_irq_restore(flags); - } - } - } -#endif - } -} - -void smp_voyager_power_off(void *dummy) -{ - if (smp_processor_id() == boot_cpu_id) - voyager_power_off(); - else - smp_stop_cpu_function(NULL); -} - -static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) -{ - /* FIXME: ignore max_cpus for now */ - smp_boot_cpus(); -} - -static void __cpuinit voyager_smp_prepare_boot_cpu(void) -{ - int cpu = smp_processor_id(); - switch_to_new_gdt(cpu); - - cpu_set(cpu, cpu_online_map); - cpu_set(cpu, cpu_callout_map); - cpu_set(cpu, cpu_possible_map); - cpu_set(cpu, cpu_present_map); - -} - -static int __cpuinit voyager_cpu_up(unsigned int cpu) -{ - /* This only works at boot for x86. See "rewrite" above. */ - if (cpu_isset(cpu, smp_commenced_mask)) - return -ENOSYS; - - /* In case one didn't come up */ - if (!cpu_isset(cpu, cpu_callin_map)) - return -EIO; - /* Unleash the CPU! */ - cpu_set(cpu, smp_commenced_mask); - while (!cpu_online(cpu)) - mb(); - return 0; -} - -static void __init voyager_smp_cpus_done(unsigned int max_cpus) -{ - zap_low_mappings(); -} - -void __init smp_setup_processor_id(void) -{ - current_thread_info()->cpu = hard_smp_processor_id(); -} - -static void voyager_send_call_func(const struct cpumask *callmask) -{ - __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id()); - send_CPI(mask, VIC_CALL_FUNCTION_CPI); -} - -static void voyager_send_call_func_single(int cpu) -{ - send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI); -} - -struct smp_ops smp_ops = { - .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, - .smp_prepare_cpus = voyager_smp_prepare_cpus, - .cpu_up = voyager_cpu_up, - .smp_cpus_done = voyager_smp_cpus_done, - - .smp_send_stop = voyager_smp_send_stop, - .smp_send_reschedule = voyager_smp_send_reschedule, - - .send_call_func_ipi = voyager_send_call_func, - .send_call_func_single_ipi = voyager_send_call_func_single, -}; diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c deleted file mode 100644 index 15464a20fb3..00000000000 --- a/arch/x86/mach-voyager/voyager_thread.c +++ /dev/null @@ -1,128 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8 -*- */ - -/* Copyright (C) 2001 - * - * Author: J.E.J.Bottomley@HansenPartnership.com - * - * This module provides the machine status monitor thread for the - * voyager architecture. This allows us to monitor the machine - * environment (temp, voltage, fan function) and the front panel and - * internal UPS. If a fault is detected, this thread takes corrective - * action (usually just informing init) - * */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct task_struct *voyager_thread; -static __u8 set_timeout; - -static int execute(const char *string) -{ - int ret; - - char *envp[] = { - "HOME=/", - "TERM=linux", - "PATH=/sbin:/usr/sbin:/bin:/usr/bin", - NULL, - }; - char *argv[] = { - "/bin/bash", - "-c", - (char *)string, - NULL, - }; - - if ((ret = - call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) { - printk(KERN_ERR "Voyager failed to run \"%s\": %i\n", string, - ret); - } - return ret; -} - -static void check_from_kernel(void) -{ - if (voyager_status.switch_off) { - - /* FIXME: This should be configurable via proc */ - execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1"); - } else if (voyager_status.power_fail) { - VDEBUG(("Voyager daemon detected AC power failure\n")); - - /* FIXME: This should be configureable via proc */ - execute("umask 600; echo F > /etc/powerstatus; kill -PWR 1"); - set_timeout = 1; - } -} - -static void check_continuing_condition(void) -{ - if (voyager_status.power_fail) { - __u8 data; - voyager_cat_psi(VOYAGER_PSI_SUBREAD, - VOYAGER_PSI_AC_FAIL_REG, &data); - if ((data & 0x1f) == 0) { - /* all power restored */ - printk(KERN_NOTICE - "VOYAGER AC power restored, cancelling shutdown\n"); - /* FIXME: should be user configureable */ - execute - ("umask 600; echo O > /etc/powerstatus; kill -PWR 1"); - set_timeout = 0; - } - } -} - -static int thread(void *unused) -{ - printk(KERN_NOTICE "Voyager starting monitor thread\n"); - - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT); - - VDEBUG(("Voyager Daemon awoken\n")); - if (voyager_status.request_from_kernel == 0) { - /* probably awoken from timeout */ - check_continuing_condition(); - } else { - check_from_kernel(); - voyager_status.request_from_kernel = 0; - } - } -} - -static int __init voyager_thread_start(void) -{ - voyager_thread = kthread_run(thread, NULL, "kvoyagerd"); - if (IS_ERR(voyager_thread)) { - printk(KERN_ERR - "Voyager: Failed to create system monitor thread.\n"); - return PTR_ERR(voyager_thread); - } - return 0; -} - -static void __exit voyager_thread_stop(void) -{ - kthread_stop(voyager_thread); -} - -module_init(voyager_thread_start); -module_exit(voyager_thread_stop); diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 87b9ab16642..b83e119fbeb 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -6,7 +6,7 @@ config XEN bool "Xen guest support" select PARAVIRT select PARAVIRT_CLOCK - depends on X86_64 || (X86_32 && X86_PAE && !(X86_VISWS || X86_VOYAGER)) + depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) depends on X86_CMPXCHG && X86_TSC help This is the Linux Xen port. Enabling this will allow the -- cgit v1.2.3 From d85a881d780cc7aaebe1b7aefcddbcb939acbe2d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Feb 2009 00:29:45 +0100 Subject: x86: remove various unused subarch hooks Impact: remove dead code Remove: - pre_setup_arch_hook() - mca_nmi_hook() If needed they can be added back via an x86_quirk handler. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/arch_hooks.h | 6 +++--- arch/x86/kernel/mca_32.c | 4 +--- arch/x86/kernel/setup.c | 34 ---------------------------------- 3 files changed, 4 insertions(+), 40 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/arch_hooks.h b/arch/x86/include/asm/arch_hooks.h index cbd4957838a..54248172be1 100644 --- a/arch/x86/include/asm/arch_hooks.h +++ b/arch/x86/include/asm/arch_hooks.h @@ -15,12 +15,12 @@ extern void init_ISA_irqs(void); extern irqreturn_t timer_interrupt(int irq, void *dev_id); /* these are the defined hooks */ -extern void intr_init_hook(void); extern void pre_intr_init_hook(void); -extern void pre_setup_arch_hook(void); +extern void intr_init_hook(void); + extern void trap_init_hook(void); + extern void pre_time_init_hook(void); extern void time_init_hook(void); -extern void mca_nmi_hook(void); #endif /* _ASM_X86_ARCH_HOOKS_H */ diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c index 2dc183758be..f74eef52ab5 100644 --- a/arch/x86/kernel/mca_32.c +++ b/arch/x86/kernel/mca_32.c @@ -474,6 +474,4 @@ void __kprobes mca_handle_nmi(void) * adapter was responsible for the error. */ bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback); - - mca_nmi_hook(); -} /* mca_handle_nmi */ +} diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d699811b3f7..d4de1e4c204 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -668,7 +668,6 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); visws_early_detect(); - pre_setup_arch_hook(); #else printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif @@ -1022,18 +1021,6 @@ void __init intr_init_hook(void) } } -/** - * pre_setup_arch_hook - hook called prior to any setup_arch() execution - * - * Description: - * generally used to activate any machine specific identification - * routines that may be needed before setup_arch() runs. On Voyager - * this is used to get the board revision and type. - **/ -void __init pre_setup_arch_hook(void) -{ -} - /** * trap_init_hook - initialise system specific traps * @@ -1088,25 +1075,4 @@ void __init time_init_hook(void) irq0.mask = cpumask_of_cpu(0); setup_irq(0, &irq0); } - -#ifdef CONFIG_MCA -/** - * mca_nmi_hook - hook into MCA specific NMI chain - * - * Description: - * The MCA (Microchannel Architecture) has an NMI chain for NMI sources - * along the MCA bus. Use this to hook into that chain if you will need - * it. - **/ -void mca_nmi_hook(void) -{ - /* - * If I recall correctly, there's a whole bunch of other things that - * we can do to check for NMI problems, but that's all I know about - * at the moment. - */ - pr_warning("NMI generated from unknown source!\n"); -} -#endif /* CONFIG_MCA */ - #endif /* CONFIG_X86_32 */ -- cgit v1.2.3 From 8e6dafd6c741cd4679b4de3c5d9698851e4fa59c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Feb 2009 00:34:39 +0100 Subject: x86: refactor x86_quirks support Impact: cleanup Make x86_quirks support more transparent. The highlevel methods are now named: extern void x86_quirk_pre_intr_init(void); extern void x86_quirk_intr_init(void); extern void x86_quirk_trap_init(void); extern void x86_quirk_pre_time_init(void); extern void x86_quirk_time_init(void); This makes it clear that if some platform extension has to do something here that it is considered ... weird, and is discouraged. Also remove arch_hooks.h and move it into setup.h (and other header files where appropriate). Signed-off-by: Ingo Molnar --- arch/x86/include/asm/arch_hooks.h | 26 -------------------------- arch/x86/include/asm/i8259.h | 4 ++++ arch/x86/include/asm/setup.h | 9 +++++++++ arch/x86/include/asm/timer.h | 2 ++ arch/x86/kernel/apic/apic.c | 1 - arch/x86/kernel/apic/probe_32.c | 1 - arch/x86/kernel/i8259.c | 1 - arch/x86/kernel/irqinit_32.c | 13 +++++++------ arch/x86/kernel/kvmclock.c | 1 - arch/x86/kernel/mca_32.c | 1 - arch/x86/kernel/paravirt.c | 1 - arch/x86/kernel/setup.c | 23 ++++++++++++----------- arch/x86/kernel/time_32.c | 6 +++--- arch/x86/kernel/traps.c | 4 ++-- arch/x86/kernel/visws_quirks.c | 1 - arch/x86/kernel/vmiclock_32.c | 1 - 16 files changed, 39 insertions(+), 56 deletions(-) delete mode 100644 arch/x86/include/asm/arch_hooks.h (limited to 'arch') diff --git a/arch/x86/include/asm/arch_hooks.h b/arch/x86/include/asm/arch_hooks.h deleted file mode 100644 index 54248172be1..00000000000 --- a/arch/x86/include/asm/arch_hooks.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef _ASM_X86_ARCH_HOOKS_H -#define _ASM_X86_ARCH_HOOKS_H - -#include - -/* - * linux/include/asm/arch_hooks.h - * - * define the architecture specific hooks - */ - -/* these aren't arch hooks, they are generic routines - * that can be used by the hooks */ -extern void init_ISA_irqs(void); -extern irqreturn_t timer_interrupt(int irq, void *dev_id); - -/* these are the defined hooks */ -extern void pre_intr_init_hook(void); -extern void intr_init_hook(void); - -extern void trap_init_hook(void); - -extern void pre_time_init_hook(void); -extern void time_init_hook(void); - -#endif /* _ASM_X86_ARCH_HOOKS_H */ diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 58d7091eeb1..1a99e6c092a 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h @@ -60,4 +60,8 @@ extern struct irq_chip i8259A_chip; extern void mask_8259A(void); extern void unmask_8259A(void); +#ifdef CONFIG_X86_32 +extern void init_ISA_irqs(void); +#endif + #endif /* _ASM_X86_I8259_H */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 8029369cd6f..66801cb72f6 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -13,6 +13,7 @@ struct mpc_cpu; struct mpc_bus; struct mpc_oemtable; + struct x86_quirks { int (*arch_pre_time_init)(void); int (*arch_time_init)(void); @@ -33,6 +34,14 @@ struct x86_quirks { int (*update_apic)(void); }; +extern void x86_quirk_pre_intr_init(void); +extern void x86_quirk_intr_init(void); + +extern void x86_quirk_trap_init(void); + +extern void x86_quirk_pre_time_init(void); +extern void x86_quirk_time_init(void); + #endif /* __ASSEMBLY__ */ #ifdef __i386__ diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 2bb6a835c45..a81195eaa2b 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -3,6 +3,7 @@ #include #include #include +#include #define TICK_SIZE (tick_nsec / 1000) @@ -12,6 +13,7 @@ unsigned long native_calibrate_tsc(void); #ifdef CONFIG_X86_32 extern int timer_ack; extern int recalibrate_cpu_khz(void); +extern irqreturn_t timer_interrupt(int irq, void *dev_id); #endif /* CONFIG_X86_32 */ extern int no_timer_check; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index c6bd7710585..f9cecdfd05c 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -34,7 +34,6 @@ #include #include -#include #include #include #include diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index c9ec90742e9..3a730fa574b 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 11d5093eb28..df89102bef8 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -22,7 +22,6 @@ #include #include #include -#include #include /* diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index bf629cadec1..50b8c3a3006 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -127,8 +127,8 @@ void __init native_init_IRQ(void) { int i; - /* all the set up before the call gates are initialised */ - pre_intr_init_hook(); + /* Execute any quirks before the call gates are initialised: */ + x86_quirk_pre_intr_init(); /* * Cover the whole vector space, no vector can escape @@ -188,10 +188,11 @@ void __init native_init_IRQ(void) if (!acpi_ioapic) setup_irq(2, &irq2); - /* setup after call gates are initialised (usually add in - * the architecture specific gates) + /* + * Call quirks after call gates are initialised (usually add in + * the architecture specific gates): */ - intr_init_hook(); + x86_quirk_intr_init(); /* * External FPU? Set up irq13 if so, for diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 652fce6d2cc..137f2e8132d 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c index f74eef52ab5..845d80ce1ef 100644 --- a/arch/x86/kernel/mca_32.c +++ b/arch/x86/kernel/mca_32.c @@ -51,7 +51,6 @@ #include #include #include -#include static unsigned char which_scsi; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 6dc4dca255e..63dd358d8ee 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d4de1e4c204..5b85759e797 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -74,8 +74,9 @@ #include #include #include -#include #include +#include +#include #include #include #include @@ -987,7 +988,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_32 /** - * pre_intr_init_hook - initialisation prior to setting up interrupt vectors + * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors * * Description: * Perform any necessary interrupt initialisation prior to setting up @@ -995,7 +996,7 @@ void __init setup_arch(char **cmdline_p) * interrupts should be initialised here if the machine emulates a PC * in any way. **/ -void __init pre_intr_init_hook(void) +void __init x86_quirk_pre_intr_init(void) { if (x86_quirks->arch_pre_intr_init) { if (x86_quirks->arch_pre_intr_init()) @@ -1005,7 +1006,7 @@ void __init pre_intr_init_hook(void) } /** - * intr_init_hook - post gate setup interrupt initialisation + * x86_quirk_intr_init - post gate setup interrupt initialisation * * Description: * Fill in any interrupts that may have been left out by the general @@ -1013,7 +1014,7 @@ void __init pre_intr_init_hook(void) * than the devices on the I/O bus (like APIC interrupts in intel MP * systems) are started here. **/ -void __init intr_init_hook(void) +void __init x86_quirk_intr_init(void) { if (x86_quirks->arch_intr_init) { if (x86_quirks->arch_intr_init()) @@ -1022,13 +1023,13 @@ void __init intr_init_hook(void) } /** - * trap_init_hook - initialise system specific traps + * x86_quirk_trap_init - initialise system specific traps * * Description: * Called as the final act of trap_init(). Used in VISWS to initialise * the various board specific APIC traps. **/ -void __init trap_init_hook(void) +void __init x86_quirk_trap_init(void) { if (x86_quirks->arch_trap_init) { if (x86_quirks->arch_trap_init()) @@ -1044,23 +1045,23 @@ static struct irqaction irq0 = { }; /** - * pre_time_init_hook - do any specific initialisations before. + * x86_quirk_pre_time_init - do any specific initialisations before. * **/ -void __init pre_time_init_hook(void) +void __init x86_quirk_pre_time_init(void) { if (x86_quirks->arch_pre_time_init) x86_quirks->arch_pre_time_init(); } /** - * time_init_hook - do any specific initialisations for the system timer. + * x86_quirk_time_init - do any specific initialisations for the system timer. * * Description: * Must plug the system timer interrupt source at HZ into the IRQ listed * in irq_vectors.h:TIMER_IRQ **/ -void __init time_init_hook(void) +void __init x86_quirk_time_init(void) { if (x86_quirks->arch_time_init) { /* diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index 764c74e871f..5c5d87f0b2e 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c @@ -33,7 +33,7 @@ #include #include -#include +#include #include #include #include @@ -118,7 +118,7 @@ void __init hpet_time_init(void) { if (!hpet_enable()) setup_pit_timer(); - time_init_hook(); + x86_quirk_time_init(); } /* @@ -131,7 +131,7 @@ void __init hpet_time_init(void) */ void __init time_init(void) { - pre_time_init_hook(); + x86_quirk_pre_time_init(); tsc_init(); late_time_init = choose_time_init(); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index acb8c0585ab..c8c0a7e530b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -61,7 +61,7 @@ #include #else #include -#include +#include #include #include "cpu/mcheck/mce.h" @@ -1026,6 +1026,6 @@ void __init trap_init(void) cpu_init(); #ifdef CONFIG_X86_32 - trap_init_hook(); + x86_quirk_trap_init(); #endif } diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 34199d30ff4..191a876e9e8 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 8c48b465059..49b4cd6707f 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -28,7 +28,6 @@ #include #include -#include #include #include #include -- cgit v1.2.3 From ecda06289f8202d4c6beb235f59ea464f4a91209 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 22 Feb 2009 22:14:56 -0800 Subject: x86: check mptable physptr with max_low_pfn on 32bit Impact: fix early crash on LinuxBIOS systems Kevin O'Connor reported that Coreboot aka LinuxBIOS tries to put mptable somewhere very high, well above max_low_pfn (below which BIOSes generally put the mptable), causing a panic. The BIOS will probably be changed to be compatible with older Linus versions, but nevertheless the MP-spec does not forbid an MP-table in arbitrary system RAM, so make sure it all works even if the table is in an unexpected place. Check physptr with max_low_pfn * PAGE_SIZE. Reported-by: Kevin O'Connor Signed-off-by: Yinghai Lu Cc: Stefan Reinauer Cc: coreboot@coreboot.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 7f4d2586972..37cb1bda1ba 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -710,13 +710,22 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, * of physical memory; so that simply reserving * PAGE_SIZE from mpf->physptr yields BUG() * in reserve_bootmem. + * also need to make sure physptr is below than + * max_low_pfn + * we don't need reserve the area above max_low_pfn */ unsigned long end = max_low_pfn * PAGE_SIZE; - if (mpf->physptr + size > end) - size = end - mpf->physptr; -#endif + + if (mpf->physptr < end) { + if (mpf->physptr + size > end) + size = end - mpf->physptr; + reserve_bootmem_generic(mpf->physptr, size, + BOOTMEM_DEFAULT); + } +#else reserve_bootmem_generic(mpf->physptr, size, BOOTMEM_DEFAULT); +#endif } return 1; -- cgit v1.2.3 From bda3a89745d7bb88d3e735046c0cdc3d0eb2ac24 Mon Sep 17 00:00:00 2001 From: Stas Sergeev Date: Mon, 23 Feb 2009 19:13:07 +0300 Subject: x86: minor cleanup in the espfix code Impact: Cleanup Checkin be44d2aabce2d62f72d5751d1871b6212bf7a1c7 eliminates the use of a 16-bit stack for espfix. However, at least one instruction remained that only operated on the low 16 bits of %esp. This is not a bug per se because the kernel stack is always an aligned 4K or 8K block. Therefore it cannot cross 64K boundaries; this code, in fact, relies strictly on that fact. However, it's a lot cleaner (and, for that matter, smaller) to operate on the entire 32-bit register. Signed-off-by: Stas Sergeev CC: Zachary Amsden CC: Chuck Ebbert Signed-off-by: H. Peter Anvin --- arch/x86/kernel/entry_32.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 999e827ef9c..899e8938e79 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1359,7 +1359,7 @@ nmi_espfix_stack: CFI_ADJUST_CFA_OFFSET 4 pushl %esp CFI_ADJUST_CFA_OFFSET 4 - addw $4, (%esp) + addl $4, (%esp) /* copy the iret frame of 12 bytes */ .rept 3 pushl 16(%esp) -- cgit v1.2.3 From c132937556f56ee4b831ef4b23f1846e05fde102 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 24 Feb 2009 11:57:20 +0900 Subject: bootmem: clean up arch-specific bootmem wrapping Impact: cleaner and consistent bootmem wrapping By setting CONFIG_HAVE_ARCH_BOOTMEM_NODE, archs can define arch-specific wrappers for bootmem allocation. However, this is done a bit strangely in that only the high level convenience macros can be changed while lower level, but still exported, interface functions can't be wrapped. This not only is messy but also leads to strange situation where alloc_bootmem() does what the arch wants it to do but the equivalent __alloc_bootmem() call doesn't although they should be able to be used interchangeably. This patch updates bootmem such that archs can override / wrap the backend function - alloc_bootmem_core() instead of the highlevel interface functions to allow simpler and consistent wrapping. Also, HAVE_ARCH_BOOTMEM_NODE is renamed to HAVE_ARCH_BOOTMEM. Signed-off-by: Tejun Heo Cc: Johannes Weiner --- arch/avr32/Kconfig | 2 +- arch/x86/Kconfig | 2 +- arch/x86/include/asm/mmzone_32.h | 43 +++++----------------------------------- 3 files changed, 7 insertions(+), 40 deletions(-) (limited to 'arch') diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index b189680d18b..05fe3053dca 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -181,7 +181,7 @@ source "kernel/Kconfig.preempt" config QUICKLIST def_bool y -config HAVE_ARCH_BOOTMEM_NODE +config HAVE_ARCH_BOOTMEM def_bool n config ARCH_HAVE_MEMORY_PRESENT diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d3f6eadfd4b..6fd3b2302ed 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1111,7 +1111,7 @@ config NODES_SHIFT Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accomodate various tables. -config HAVE_ARCH_BOOTMEM_NODE +config HAVE_ARCH_BOOTMEM def_bool y depends on X86_32 && NUMA diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 07f1af494ca..1e0fa9e63af 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h @@ -93,45 +93,12 @@ static inline int pfn_valid(int pfn) #endif /* CONFIG_DISCONTIGMEM */ #ifdef CONFIG_NEED_MULTIPLE_NODES - -/* - * Following are macros that are specific to this numa platform. - */ -#define reserve_bootmem(addr, size, flags) \ - reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags)) -#define alloc_bootmem(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_nopanic(x) \ - __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ - __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) -#define alloc_bootmem_pages(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_pages_nopanic(x) \ - __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \ - __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low_pages(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) -#define alloc_bootmem_node(pgdat, x) \ -({ \ - struct pglist_data __maybe_unused \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ - __pa(MAX_DMA_ADDRESS)); \ -}) -#define alloc_bootmem_pages_node(pgdat, x) \ -({ \ - struct pglist_data __maybe_unused \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ - __pa(MAX_DMA_ADDRESS)); \ -}) -#define alloc_bootmem_low_pages_node(pgdat, x) \ +/* always use node 0 for bootmem on this numa platform */ +#define alloc_bootmem_core(__bdata, size, align, goal, limit) \ ({ \ - struct pglist_data __maybe_unused \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \ + bootmem_data_t __maybe_unused * __abm_bdata_dummy = (__bdata); \ + __alloc_bootmem_core(NODE_DATA(0)->bdata, \ + (size), (align), (goal), (limit)); \ }) #endif /* CONFIG_NEED_MULTIPLE_NODES */ -- cgit v1.2.3 From c0c0a29379b5848aec2e8f1c58d853d3cb7118b8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 24 Feb 2009 11:57:21 +0900 Subject: vmalloc: add @align to vm_area_register_early() Impact: allow larger alignment for early vmalloc area allocation Some early vmalloc users might want larger alignment, for example, for custom large page mapping. Add @align to vm_area_register_early(). While at it, drop docbook comment on non-existent @size. Signed-off-by: Tejun Heo Cc: Nick Piggin Cc: Ivan Kokshaysky --- arch/alpha/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index df6df025ded..91eddd8505d 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -200,7 +200,7 @@ callback_init(void * kernel_end) /* register the vm area */ console_remap_vm.flags = VM_ALLOC; console_remap_vm.size = nr_pages << PAGE_SHIFT; - vm_area_register_early(&console_remap_vm); + vm_area_register_early(&console_remap_vm, PAGE_SIZE); vaddr = (unsigned long)consle_remap_vm.addr; -- cgit v1.2.3 From 458a3e644c3327be529393982e24277eda8f1ac7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 24 Feb 2009 11:57:21 +0900 Subject: x86: update populate_extra_pte() and add populate_extra_pmd() Impact: minor change to populate_extra_pte() and addition of pmd flavor Update populate_extra_pte() to return pointer to the pte_t for the specified address and add populate_extra_pmd() which only populates till the pmd and returns pointer to the pmd entry for the address. For 64bit, pud/pmd/pte fill functions are separated out from set_pte_vaddr[_pud]() and used for set_pte_vaddr[_pud]() and populate_extra_{pte|pmd}(). Signed-off-by: Tejun Heo --- arch/x86/include/asm/pgtable.h | 3 +- arch/x86/kernel/setup_percpu.c | 7 +++- arch/x86/mm/init_32.c | 13 ++++++-- arch/x86/mm/init_64.c | 75 +++++++++++++++++++++++++----------------- 4 files changed, 63 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index dd91c2515c6..46312eb0d68 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -402,7 +402,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, /* Install a pte for a particular vaddr in kernel space. */ void set_pte_vaddr(unsigned long vaddr, pte_t pte); -void populate_extra_pte(unsigned long vaddr); +pmd_t *populate_extra_pmd(unsigned long vaddr); +pte_t *populate_extra_pte(unsigned long vaddr); #ifdef CONFIG_X86_32 extern void native_pagetable_setup_start(pgd_t *base); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 2dce4355821..671e6528a82 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -41,6 +41,11 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { }; EXPORT_SYMBOL(__per_cpu_offset); +static void __init pcpu4k_populate_pte(unsigned long addr) +{ + populate_extra_pte(addr); +} + static inline void setup_percpu_segment(int cpu) { #ifdef CONFIG_X86_32 @@ -104,7 +109,7 @@ void __init setup_per_cpu_areas(void) } } - pcpu_unit_size = pcpu_setup_static(populate_extra_pte, pages, size); + pcpu_unit_size = pcpu_setup_static(pcpu4k_populate_pte, pages, size); free_bootmem(__pa(pages), pages_size); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8b1a0ef7f87..84a26883ab4 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -137,14 +137,21 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) return pte_offset_kernel(pmd, 0); } -void __init populate_extra_pte(unsigned long vaddr) +pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); int pmd_idx = pmd_index(vaddr); + + return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; +} + +pte_t * __init populate_extra_pte(unsigned long vaddr) +{ + int pte_idx = pte_index(vaddr); pmd_t *pmd; - pmd = one_md_table_init(swapper_pg_dir + pgd_idx); - one_page_table_init(pmd + pmd_idx); + pmd = populate_extra_pmd(vaddr); + return one_page_table_init(pmd) + pte_idx; } static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 7f91e2cdc4c..7d4e76da336 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -168,34 +168,51 @@ static __ref void *spp_getpage(void) return ptr; } -void -set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) +static pud_t * __init fill_pud(pgd_t *pgd, unsigned long vaddr) { - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + if (pgd_none(*pgd)) { + pud_t *pud = (pud_t *)spp_getpage(); + pgd_populate(&init_mm, pgd, pud); + if (pud != pud_offset(pgd, 0)) + printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", + pud, pud_offset(pgd, 0)); + } + return pud_offset(pgd, vaddr); +} - pud = pud_page + pud_index(vaddr); +static pmd_t * __init fill_pmd(pud_t *pud, unsigned long vaddr) +{ if (pud_none(*pud)) { - pmd = (pmd_t *) spp_getpage(); + pmd_t *pmd = (pmd_t *) spp_getpage(); pud_populate(&init_mm, pud, pmd); - if (pmd != pmd_offset(pud, 0)) { + if (pmd != pmd_offset(pud, 0)) printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", - pmd, pmd_offset(pud, 0)); - return; - } + pmd, pmd_offset(pud, 0)); } - pmd = pmd_offset(pud, vaddr); + return pmd_offset(pud, vaddr); +} + +static pte_t * __init fill_pte(pmd_t *pmd, unsigned long vaddr) +{ if (pmd_none(*pmd)) { - pte = (pte_t *) spp_getpage(); + pte_t *pte = (pte_t *) spp_getpage(); pmd_populate_kernel(&init_mm, pmd, pte); - if (pte != pte_offset_kernel(pmd, 0)) { + if (pte != pte_offset_kernel(pmd, 0)) printk(KERN_ERR "PAGETABLE BUG #02!\n"); - return; - } } + return pte_offset_kernel(pmd, vaddr); +} + +void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pud = pud_page + pud_index(vaddr); + pmd = fill_pmd(pud, vaddr); + pte = fill_pte(pmd, vaddr); - pte = pte_offset_kernel(pmd, vaddr); set_pte(pte, new_pte); /* @@ -205,8 +222,7 @@ set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) __flush_tlb_one(vaddr); } -void -set_pte_vaddr(unsigned long vaddr, pte_t pteval) +void set_pte_vaddr(unsigned long vaddr, pte_t pteval) { pgd_t *pgd; pud_t *pud_page; @@ -223,23 +239,22 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval) set_pte_vaddr_pud(pud_page, vaddr, pteval); } -void __init populate_extra_pte(unsigned long vaddr) +pmd_t * __init populate_extra_pmd(unsigned long vaddr) { pgd_t *pgd; pud_t *pud; pgd = pgd_offset_k(vaddr); - if (pgd_none(*pgd)) { - pud = (pud_t *)spp_getpage(); - pgd_populate(&init_mm, pgd, pud); - if (pud != pud_offset(pgd, 0)) { - printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", - pud, pud_offset(pgd, 0)); - return; - } - } + pud = fill_pud(pgd, vaddr); + return fill_pmd(pud, vaddr); +} + +pte_t * __init populate_extra_pte(unsigned long vaddr) +{ + pmd_t *pmd; - set_pte_vaddr_pud((pud_t *)pgd_page_vaddr(*pgd), vaddr, __pte(0)); + pmd = populate_extra_pmd(vaddr); + return fill_pte(pmd, vaddr); } /* -- cgit v1.2.3 From 8d408b4be37bc49c9086531f2ebe411cf5731746 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 24 Feb 2009 11:57:21 +0900 Subject: percpu: give more latitude to arch specific first chunk initialization Impact: more latitude for first percpu chunk allocation The first percpu chunk serves the kernel static percpu area and may or may not contain extra room for further dynamic allocation. Initialization of the first chunk needs to be done before normal memory allocation service is up, so it has its own init path - pcpu_setup_static(). It seems archs need more latitude while initializing the first chunk for example to take advantage of large page mapping. This patch makes the following changes to allow this. * Define PERCPU_DYNAMIC_RESERVE to give arch hint about how much space to reserve in the first chunk for further dynamic allocation. * Rename pcpu_setup_static() to pcpu_setup_first_chunk(). * Make pcpu_setup_first_chunk() much more flexible by fetching page pointer by callback and adding optional @unit_size, @free_size and @base_addr arguments which allow archs to selectively part of chunk initialization to their likings. Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 671e6528a82..d928e888720 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -41,6 +41,16 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { }; EXPORT_SYMBOL(__per_cpu_offset); +static struct page **pcpu4k_pages __initdata; +static int pcpu4k_nr_static_pages __initdata; + +static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) +{ + if (pageno < pcpu4k_nr_static_pages) + return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; + return NULL; +} + static void __init pcpu4k_populate_pte(unsigned long addr) { populate_extra_pte(addr); @@ -109,7 +119,10 @@ void __init setup_per_cpu_areas(void) } } - pcpu_unit_size = pcpu_setup_static(pcpu4k_populate_pte, pages, size); + pcpu4k_pages = pages; + pcpu4k_nr_static_pages = nr_cpu_pages; + pcpu_unit_size = pcpu_setup_first_chunk(pcpu4k_get_page, size, 0, 0, + NULL, pcpu4k_populate_pte); free_bootmem(__pa(pages), pages_size); -- cgit v1.2.3 From 5f5d8405d1c50f5cf7e1dbfe9c9b44e2f015c8fd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 24 Feb 2009 11:57:21 +0900 Subject: x86: separate out setup_pcpu_4k() from setup_per_cpu_areas() Impact: modularize percpu first chunk allocation x86 is gonna have a few different strategies for the first chunk allocation. Modularize it by separating out the current allocation mechanism into pcpu_alloc_bootmem() and setup_pcpu_4k(). Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 144 +++++++++++++++++++++++++++++------------ 1 file changed, 102 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d928e888720..4a17c96f4f6 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,52 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { }; EXPORT_SYMBOL(__per_cpu_offset); +/** + * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu + * @cpu: cpu to allocate for + * @size: size allocation in bytes + * @align: alignment + * + * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper + * does the right thing for NUMA regardless of the current + * configuration. + * + * RETURNS: + * Pointer to the allocated area on success, NULL on failure. + */ +static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, + unsigned long align) +{ + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +#ifdef CONFIG_NEED_MULTIPLE_NODES + int node = early_cpu_to_node(cpu); + void *ptr; + + if (!node_online(node) || !NODE_DATA(node)) { + ptr = __alloc_bootmem_nopanic(size, align, goal); + pr_info("cpu %d has no node %d or node-local memory\n", + cpu, node); + pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", + cpu, size, __pa(ptr)); + } else { + ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), + size, align, goal); + pr_debug("per cpu data for cpu%d %lu bytes on node%d at " + "%016lx\n", cpu, size, node, __pa(ptr)); + } + return ptr; +#else + return __alloc_bootmem_nopanic(size, align, goal); +#endif +} + +/* + * 4k page allocator + * + * This is the basic allocator. Static percpu area is allocated + * page-by-page and most of initialization is done by the generic + * setup function. + */ static struct page **pcpu4k_pages __initdata; static int pcpu4k_nr_static_pages __initdata; @@ -56,6 +103,51 @@ static void __init pcpu4k_populate_pte(unsigned long addr) populate_extra_pte(addr); } +static ssize_t __init setup_pcpu_4k(size_t static_size) +{ + size_t pages_size; + unsigned int cpu; + int i, j; + ssize_t ret; + + pcpu4k_nr_static_pages = PFN_UP(static_size); + + /* unaligned allocations can't be freed, round up to page size */ + pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() + * sizeof(pcpu4k_pages[0])); + pcpu4k_pages = alloc_bootmem(pages_size); + + /* allocate and copy */ + j = 0; + for_each_possible_cpu(cpu) + for (i = 0; i < pcpu4k_nr_static_pages; i++) { + void *ptr; + + ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); + if (!ptr) + goto enomem; + + memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); + pcpu4k_pages[j++] = virt_to_page(ptr); + } + + /* we're ready, commit */ + pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", + pcpu4k_nr_static_pages, static_size); + + ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL, + pcpu4k_populate_pte); + goto out_free_ar; + +enomem: + while (--j >= 0) + free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); + ret = -ENOMEM; +out_free_ar: + free_bootmem(__pa(pcpu4k_pages), pages_size); + return ret; +} + static inline void setup_percpu_segment(int cpu) { #ifdef CONFIG_X86_32 @@ -76,56 +168,24 @@ static inline void setup_percpu_segment(int cpu) */ void __init setup_per_cpu_areas(void) { - ssize_t size = __per_cpu_end - __per_cpu_start; - unsigned int nr_cpu_pages = DIV_ROUND_UP(size, PAGE_SIZE); - static struct page **pages; - size_t pages_size; - unsigned int cpu, i, j; + size_t static_size = __per_cpu_end - __per_cpu_start; + unsigned int cpu; unsigned long delta; size_t pcpu_unit_size; + ssize_t ret; pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); - pr_info("PERCPU: Allocating %zd bytes for static per cpu data\n", size); - - pages_size = nr_cpu_pages * num_possible_cpus() * sizeof(pages[0]); - pages = alloc_bootmem(pages_size); - - j = 0; - for_each_possible_cpu(cpu) { - void *ptr; - - for (i = 0; i < nr_cpu_pages; i++) { -#ifndef CONFIG_NEED_MULTIPLE_NODES - ptr = alloc_bootmem_pages(PAGE_SIZE); -#else - int node = early_cpu_to_node(cpu); - - if (!node_online(node) || !NODE_DATA(node)) { - ptr = alloc_bootmem_pages(PAGE_SIZE); - pr_info("cpu %d has no node %d or node-local " - "memory\n", cpu, node); - pr_debug("per cpu data for cpu%d at %016lx\n", - cpu, __pa(ptr)); - } else { - ptr = alloc_bootmem_pages_node(NODE_DATA(node), - PAGE_SIZE); - pr_debug("per cpu data for cpu%d on node%d " - "at %016lx\n", cpu, node, __pa(ptr)); - } -#endif - memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); - pages[j++] = virt_to_page(ptr); - } - } - pcpu4k_pages = pages; - pcpu4k_nr_static_pages = nr_cpu_pages; - pcpu_unit_size = pcpu_setup_first_chunk(pcpu4k_get_page, size, 0, 0, - NULL, pcpu4k_populate_pte); + /* allocate percpu area */ + ret = setup_pcpu_4k(static_size); + if (ret < 0) + panic("cannot allocate static percpu area (%zu bytes, err=%zd)", + static_size, ret); - free_bootmem(__pa(pages), pages_size); + pcpu_unit_size = ret; + /* alrighty, percpu areas up and running */ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; -- cgit v1.2.3 From 89c9215165ca609096e845926d9a18f1306176a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 24 Feb 2009 11:57:21 +0900 Subject: x86: add embedding percpu first chunk allocator Impact: add better first percpu allocation for !NUMA On !NUMA, we can simply allocate contiguous memory and use it for the first chunk without mapping it into vmalloc area. As the memory area is covered by the large page physical memory mapping, it allows the dynamic perpcu allocator to not add any TLB overhead for the static percpu area and whatever falls into the first chunk and the implementation is very simple too. Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 86 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 4a17c96f4f6..fd4c399675d 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -42,6 +42,35 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { }; EXPORT_SYMBOL(__per_cpu_offset); +/** + * pcpu_need_numa - determine percpu allocation needs to consider NUMA + * + * If NUMA is not configured or there is only one NUMA node available, + * there is no reason to consider NUMA. This function determines + * whether percpu allocation should consider NUMA or not. + * + * RETURNS: + * true if NUMA should be considered; otherwise, false. + */ +static bool __init pcpu_need_numa(void) +{ +#ifdef CONFIG_NEED_MULTIPLE_NODES + pg_data_t *last = NULL; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + int node = early_cpu_to_node(cpu); + + if (node_online(node) && NODE_DATA(node) && + last && last != NODE_DATA(node)) + return true; + + last = NODE_DATA(node); + } +#endif + return false; +} + /** * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu * @cpu: cpu to allocate for @@ -81,6 +110,59 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, #endif } +/* + * Embedding allocator + * + * The first chunk is sized to just contain the static area plus + * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using + * bootmem allocator and used as-is without being mapped into vmalloc + * area. This enables the first chunk to piggy back on the linear + * physical PMD mapping and doesn't add any additional pressure to + * TLB. + */ +static void *pcpue_ptr __initdata; +static size_t pcpue_unit_size __initdata; + +static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) +{ + return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + + ((size_t)pageno << PAGE_SHIFT)); +} + +static ssize_t __init setup_pcpu_embed(size_t static_size) +{ + unsigned int cpu; + + /* + * If large page isn't supported, there's no benefit in doing + * this. Also, embedding allocation doesn't play well with + * NUMA. + */ + if (!cpu_has_pse || pcpu_need_numa()) + return -EINVAL; + + /* allocate and copy */ + pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); + pcpue_unit_size = max(pcpue_unit_size, PCPU_MIN_UNIT_SIZE); + pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, + PAGE_SIZE); + if (!pcpue_ptr) + return -ENOMEM; + + for_each_possible_cpu(cpu) + memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load, + static_size); + + /* we're ready, commit */ + pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", + pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size); + + return pcpu_setup_first_chunk(pcpue_get_page, static_size, + pcpue_unit_size, + pcpue_unit_size - static_size, pcpue_ptr, + NULL); +} + /* * 4k page allocator * @@ -178,7 +260,9 @@ void __init setup_per_cpu_areas(void) NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); /* allocate percpu area */ - ret = setup_pcpu_4k(static_size); + ret = setup_pcpu_embed(static_size); + if (ret < 0) + ret = setup_pcpu_4k(static_size); if (ret < 0) panic("cannot allocate static percpu area (%zu bytes, err=%zd)", static_size, ret); -- cgit v1.2.3 From 8ac837571491e239e64bd87863c1679d8002e8a2 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 24 Feb 2009 11:57:22 +0900 Subject: x86: add remapping percpu first chunk allocator Impact: add better first percpu allocation for NUMA On NUMA, embedding allocator can't be used as different units can't be made to fall in the correct NUMA nodes. To use large page mapping, each unit needs to be remapped. However, percpu areas are usually much smaller than large page size and unused space hurts a lot as the number of cpus grow. This allocator remaps large pages for each chunk but gives back unused part to the bootmem allocator making the large pages mapped twice. This adds slightly to the TLB pressure but is much better than using 4k mappings while still being NUMA-friendly. Ingo suggested that this would be the correct approach for NUMA. Signed-off-by: Tejun Heo Cc: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 137 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 135 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index fd4c399675d..2d946a8f78b 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -110,6 +110,133 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, #endif } +/* + * Remap allocator + * + * This allocator uses PMD page as unit. A PMD page is allocated for + * each cpu and each is remapped into vmalloc area using PMD mapping. + * As PMD page is quite large, only part of it is used for the first + * chunk. Unused part is returned to the bootmem allocator. + * + * So, the PMD pages are mapped twice - once to the physical mapping + * and to the vmalloc area for the first percpu chunk. The double + * mapping does add one more PMD TLB entry pressure but still is much + * better than only using 4k mappings while still being NUMA friendly. + */ +#ifdef CONFIG_NEED_MULTIPLE_NODES +static size_t pcpur_size __initdata; +static void **pcpur_ptrs __initdata; + +static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) +{ + size_t off = (size_t)pageno << PAGE_SHIFT; + + if (off >= pcpur_size) + return NULL; + + return virt_to_page(pcpur_ptrs[cpu] + off); +} + +static ssize_t __init setup_pcpu_remap(size_t static_size) +{ + static struct vm_struct vm; + pg_data_t *last; + size_t ptrs_size; + unsigned int cpu; + ssize_t ret; + + /* + * If large page isn't supported, there's no benefit in doing + * this. Also, on non-NUMA, embedding is better. + */ + if (!cpu_has_pse || pcpu_need_numa()) + return -EINVAL; + + last = NULL; + for_each_possible_cpu(cpu) { + int node = early_cpu_to_node(cpu); + + if (node_online(node) && NODE_DATA(node) && + last && last != NODE_DATA(node)) + goto proceed; + + last = NODE_DATA(node); + } + return -EINVAL; + +proceed: + /* + * Currently supports only single page. Supporting multiple + * pages won't be too difficult if it ever becomes necessary. + */ + pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); + if (pcpur_size > PMD_SIZE) { + pr_warning("PERCPU: static data is larger than large page, " + "can't use large page\n"); + return -EINVAL; + } + + /* allocate pointer array and alloc large pages */ + ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); + pcpur_ptrs = alloc_bootmem(ptrs_size); + + for_each_possible_cpu(cpu) { + pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); + if (!pcpur_ptrs[cpu]) + goto enomem; + + /* + * Only use pcpur_size bytes and give back the rest. + * + * Ingo: The 2MB up-rounding bootmem is needed to make + * sure the partial 2MB page is still fully RAM - it's + * not well-specified to have a PAT-incompatible area + * (unmapped RAM, device memory, etc.) in that hole. + */ + free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), + PMD_SIZE - pcpur_size); + + memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); + } + + /* allocate address and map */ + vm.flags = VM_ALLOC; + vm.size = num_possible_cpus() * PMD_SIZE; + vm_area_register_early(&vm, PMD_SIZE); + + for_each_possible_cpu(cpu) { + pmd_t *pmd; + + pmd = populate_extra_pmd((unsigned long)vm.addr + + cpu * PMD_SIZE); + set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), + PAGE_KERNEL_LARGE)); + } + + /* we're ready, commit */ + pr_info("PERCPU: Remapped at %p with large pages, static data " + "%zu bytes\n", vm.addr, static_size); + + ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE, + pcpur_size - static_size, vm.addr, NULL); + goto out_free_ar; + +enomem: + for_each_possible_cpu(cpu) + if (pcpur_ptrs[cpu]) + free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); + ret = -ENOMEM; +out_free_ar: + free_bootmem(__pa(pcpur_ptrs), ptrs_size); + return ret; +} +#else +static ssize_t __init setup_pcpu_remap(size_t static_size) +{ + return -EINVAL; +} +#endif + /* * Embedding allocator * @@ -259,8 +386,14 @@ void __init setup_per_cpu_areas(void) pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); - /* allocate percpu area */ - ret = setup_pcpu_embed(static_size); + /* + * Allocate percpu area. If PSE is supported, try to make use + * of large page mappings. Please read comments on top of + * each allocator for details. + */ + ret = setup_pcpu_remap(static_size); + if (ret < 0) + ret = setup_pcpu_embed(static_size); if (ret < 0) ret = setup_pcpu_4k(static_size); if (ret < 0) -- cgit v1.2.3 From 30d697fa3a25fed809a873b17531a00282dc1234 Mon Sep 17 00:00:00 2001 From: Salman Qazi Date: Mon, 23 Feb 2009 18:03:04 -0800 Subject: x86: fix performance regression in write() syscall While the introduction of __copy_from_user_nocache (see commit: 0812a579c92fefa57506821fa08e90f47cb6dbdd) may have been an improvement for sufficiently large writes, there is evidence to show that it is deterimental for small writes. Unixbench's fstime test gives the following results for 256 byte writes with MAX_BLOCK of 2000: 2.6.29-rc6 ( 5 samples, each in KB/sec ): 283750, 295200, 294500, 293000, 293300 2.6.29-rc6 + this patch (5 samples, each in KB/sec): 313050, 3106750, 293350, 306300, 307900 2.6.18 395700, 342000, 399100, 366050, 359850 See w_test() in src/fstime.c in unixbench version 4.1.0. Basically, the above test consists of counting how much we can write in this manner: alarm(10); while (!sigalarm) { for (f_blocks = 0; f_blocks < 2000; ++f_blocks) { write(f, buf, 256); } lseek(f, 0L, 0); } Note, there are other components to the write syscall regression that are not addressed here. Signed-off-by: Salman Qazi Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess_64.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 84210c479fc..987a2c10fe2 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -192,14 +192,26 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { might_sleep(); - return __copy_user_nocache(dst, src, size, 1); + /* + * In practice this limit means that large file write()s + * which get chunked to 4K copies get handled via + * non-temporal stores here. Smaller writes get handled + * via regular __copy_from_user(): + */ + if (likely(size >= PAGE_SIZE)) + return __copy_user_nocache(dst, src, size, 1); + else + return __copy_from_user(dst, src, size); } static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { - return __copy_user_nocache(dst, src, size, 0); + if (likely(size >= PAGE_SIZE)) + return __copy_user_nocache(dst, src, size, 0); + else + return __copy_from_user_inatomic(dst, src, size); } unsigned long -- cgit v1.2.3 From 2a0b1001116de51f13b2969a9a52bd2bc294644f Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:56:57 +0300 Subject: x86: head_64.S - remove useless balign Impact: cleanup NEXT_PAGE already has 'balign' so no need to keep this redundant one. Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 2e648e3a5ea..52c9af429dd 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -329,8 +329,6 @@ early_idt_ripmsg: #endif /* CONFIG_EARLY_PRINTK */ .previous -.balign PAGE_SIZE - #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ ENTRY(name) -- cgit v1.2.3 From 5e112ae23b404ccba0a61630b82ec44f0d084880 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:56:58 +0300 Subject: x86: head_64.S - use IDT_ENTRIES instead of hardcoded number Impact: cleanup Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Cc: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 52c9af429dd..54b29bb24e7 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -417,7 +417,7 @@ ENTRY(phys_base) .section .bss, "aw", @nobits .align L1_CACHE_BYTES ENTRY(idt_table) - .skip 256 * 16 + .skip IDT_ENTRIES * 16 .section .bss.page_aligned, "aw", @nobits .align PAGE_SIZE -- cgit v1.2.3 From 57e372932cec8eb141cde039aaeaa91b69fceba2 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:56:59 +0300 Subject: x86: invalid_vm86_irq -- use predefined macros Impact: cleanup Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Cc: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index b07278c55e9..8a285f356f8 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -128,7 +128,7 @@ #ifndef __ASSEMBLY__ static inline int invalid_vm86_irq(int irq) { - return irq < 3 || irq > 15; + return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; } #endif -- cgit v1.2.3 From b3baaa138cd4223bffb6ca64b873d25cfb1d7c70 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:57:00 +0300 Subject: x86: entry_64.S - add missing ENDPROC native_usergs_sysret64 is described as extern void native_usergs_sysret64(void) so lets add ENDPROC here Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Cc: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_64.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index fbcf96b295f..7cf8d798042 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -188,6 +188,7 @@ return_to_handler: ENTRY(native_usergs_sysret64) swapgs sysretq +ENDPROC(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ -- cgit v1.2.3 From bc8b2b9258488b932cd399112e01d5afffc4ee96 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:57:01 +0300 Subject: x86: head_64.S - use GLOBAL macro Impact: cleanup Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_64.S | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 7cf8d798042..8a6c7c63885 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -77,20 +77,17 @@ ENTRY(ftrace_caller) movq 8(%rbp), %rsi subq $MCOUNT_INSN_SIZE, %rdi -.globl ftrace_call -ftrace_call: +GLOBAL(ftrace_call) call ftrace_stub MCOUNT_RESTORE_FRAME #ifdef CONFIG_FUNCTION_GRAPH_TRACER -.globl ftrace_graph_call -ftrace_graph_call: +GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif -.globl ftrace_stub -ftrace_stub: +GLOBAL(ftrace_stub) retq END(ftrace_caller) @@ -110,8 +107,7 @@ ENTRY(mcount) jnz ftrace_graph_caller #endif -.globl ftrace_stub -ftrace_stub: +GLOBAL(ftrace_stub) retq trace: @@ -148,9 +144,7 @@ ENTRY(ftrace_graph_caller) retq END(ftrace_graph_caller) - -.globl return_to_handler -return_to_handler: +GLOBAL(return_to_handler) subq $80, %rsp movq %rax, (%rsp) @@ -634,16 +628,14 @@ tracesys: * Syscall return path ending with IRET. * Has correct top of stack, but partial stack frame. */ - .globl int_ret_from_sys_call - .globl int_with_check -int_ret_from_sys_call: +GLOBAL(int_ret_from_sys_call) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $3,CS-ARGOFFSET(%rsp) je retint_restore_args movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ -int_with_check: +GLOBAL(int_with_check) LOCKDEP_SYS_EXIT_IRQ GET_THREAD_INFO(%rcx) movl TI_flags(%rcx),%edx -- cgit v1.2.3 From 9f331119a4f95a44d918fe6d5e85998fabf99b72 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:57:02 +0300 Subject: x86: efi_stub_32,64 - add missing ENDPROCs Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Signed-off-by: Ingo Molnar --- arch/x86/kernel/efi_stub_32.S | 1 + arch/x86/kernel/efi_stub_64.S | 7 +++++++ 2 files changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S index ef00bb77d7e..a206b946929 100644 --- a/arch/x86/kernel/efi_stub_32.S +++ b/arch/x86/kernel/efi_stub_32.S @@ -113,6 +113,7 @@ ENTRY(efi_call_phys) movl (%edx), %ecx pushl %ecx ret +ENDPROC(efi_call_phys) .previous .data diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S index 99b47d48c9f..4c07ccab814 100644 --- a/arch/x86/kernel/efi_stub_64.S +++ b/arch/x86/kernel/efi_stub_64.S @@ -41,6 +41,7 @@ ENTRY(efi_call0) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call0) ENTRY(efi_call1) SAVE_XMM @@ -50,6 +51,7 @@ ENTRY(efi_call1) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call1) ENTRY(efi_call2) SAVE_XMM @@ -59,6 +61,7 @@ ENTRY(efi_call2) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call2) ENTRY(efi_call3) SAVE_XMM @@ -69,6 +72,7 @@ ENTRY(efi_call3) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call3) ENTRY(efi_call4) SAVE_XMM @@ -80,6 +84,7 @@ ENTRY(efi_call4) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call4) ENTRY(efi_call5) SAVE_XMM @@ -92,6 +97,7 @@ ENTRY(efi_call5) addq $48, %rsp RESTORE_XMM ret +ENDPROC(efi_call5) ENTRY(efi_call6) SAVE_XMM @@ -107,3 +113,4 @@ ENTRY(efi_call6) addq $48, %rsp RESTORE_XMM ret +ENDPROC(efi_call6) -- cgit v1.2.3 From 10b614eaa847447c2c8646030728309d14bd2d05 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 24 Feb 2009 21:41:32 +0100 Subject: x86_32: summit_32, use BAD_APICID Use BAD_APICID instead of 0xFF constants in summit_cpu_mask_to_apicid. Also remove bogus comments about what we actually return. Signed-off-by: Jiri Slaby Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/summit_32.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index cfe7b09015d..d02f4385707 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -303,12 +303,10 @@ static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) int cpu; num_bits_set = cpus_weight(*cpumask); - /* Return id to all */ if (num_bits_set >= nr_cpu_ids) - return 0xFF; + return BAD_APICID; /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): + * The cpus in the mask must all be on the apic cluster. */ cpu = first_cpu(*cpumask); apicid = summit_cpu_to_logical_apicid(cpu); @@ -318,9 +316,9 @@ static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) int new_apicid = summit_cpu_to_logical_apicid(cpu); if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); + printk("%s: Not a valid mask!\n", __func__); - return 0xFF; + return BAD_APICID; } apicid = apicid | new_apicid; cpus_found++; -- cgit v1.2.3 From b5f26d05565d070b7b352dba56b1f96e10021980 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 24 Feb 2009 21:41:33 +0100 Subject: x86_32: summit_32, de-inline functions The ones which go only into struct genapic are de-inlined by compiler anyway, so remove the inline specifier from them. Afterwards, remove summit_setup_portio_remap completely as it is unused. Remove inline also from summit_cpu_mask_to_apicid, since it's not worth it (it is used in struct genapic too). Signed-off-by: Jiri Slaby Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/summit_32.c | 47 +++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index d02f4385707..32838b57a94 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -48,7 +48,7 @@ #include #include -static inline unsigned summit_get_apic_id(unsigned long x) +static unsigned summit_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } @@ -58,7 +58,7 @@ static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) default_send_IPI_mask_sequence_logical(mask, vector); } -static inline void summit_send_IPI_allbutself(int vector) +static void summit_send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); @@ -67,7 +67,7 @@ static inline void summit_send_IPI_allbutself(int vector) summit_send_IPI_mask(&mask, vector); } -static inline void summit_send_IPI_all(int vector) +static void summit_send_IPI_all(int vector) { summit_send_IPI_mask(&cpu_online_map, vector); } @@ -82,8 +82,8 @@ extern void setup_summit(void); #define setup_summit() {} #endif -static inline int -summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +static int summit_mps_oem_check(struct mpc_table *mpc, char *oem, + char *productid) { if (!strncmp(oem, "IBM ENSW", 8) && (!strncmp(productid, "VIGIL SMP", 9) @@ -98,7 +98,7 @@ summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) } /* Hook from generic ACPI tables.c */ -static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERVIGIL", 8) @@ -186,7 +186,7 @@ static inline int is_WPEG(struct rio_detail *rio){ #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) -static inline const cpumask_t *summit_target_cpus(void) +static const cpumask_t *summit_target_cpus(void) { /* CPU_MASK_ALL (0xff) has undefined behaviour with * dest_LowestPrio mode logical clustered apic interrupt routing @@ -195,19 +195,18 @@ static inline const cpumask_t *summit_target_cpus(void) return &cpumask_of_cpu(0); } -static inline unsigned long -summit_check_apicid_used(physid_mask_t bitmap, int apicid) +static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } /* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long summit_check_apicid_present(int bit) +static unsigned long summit_check_apicid_present(int bit) { return 1; } -static inline void summit_init_apic_ldr(void) +static void summit_init_apic_ldr(void) { unsigned long val, id; int count = 0; @@ -234,18 +233,18 @@ static inline void summit_init_apic_ldr(void) apic_write(APIC_LDR, val); } -static inline int summit_apic_id_registered(void) +static int summit_apic_id_registered(void) { return 1; } -static inline void summit_setup_apic_routing(void) +static void summit_setup_apic_routing(void) { printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", nr_ioapics); } -static inline int summit_apicid_to_node(int logical_apicid) +static int summit_apicid_to_node(int logical_apicid) { #ifdef CONFIG_SMP return apicid_2_node[hard_smp_processor_id()]; @@ -266,7 +265,7 @@ static inline int summit_cpu_to_logical_apicid(int cpu) #endif } -static inline int summit_cpu_present_to_apicid(int mps_cpu) +static int summit_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); @@ -274,28 +273,23 @@ static inline int summit_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -static inline physid_mask_t -summit_ioapic_phys_id_map(physid_mask_t phys_id_map) +static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0x0F); } -static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) +static physid_mask_t summit_apicid_to_cpu_present(int apicid) { return physid_mask_of_physid(0); } -static inline void summit_setup_portio_remap(void) -{ -} - -static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) +static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) { return 1; } -static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpus_found = 0; int num_bits_set; @@ -328,8 +322,7 @@ static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } -static inline unsigned int -summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, +static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { int apicid = summit_cpu_to_logical_apicid(0); @@ -354,7 +347,7 @@ summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, * * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. */ -static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) +static int summit_phys_pkg_id(int cpuid_apic, int index_msb) { return hard_smp_processor_id() >> index_msb; } -- cgit v1.2.3 From 46cb27f5169d37be38be8e5729b9a0100e989fa8 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 24 Feb 2009 13:12:43 -0800 Subject: x86: check range in reserve_early() Impact: cleanup one 32-bit system reports: BIOS-provided physical RAM map: BIOS-e820: 0000000000000000 - 000000000009fc00 (usable) BIOS-e820: 000000000009fc00 - 00000000000a0000 (reserved) BIOS-e820: 00000000000f0000 - 0000000000100000 (reserved) BIOS-e820: 0000000000100000 - 000000001c000000 (usable) BIOS-e820: 00000000ffff0000 - 0000000100000000 (reserved) DMI 2.0 present. last_pfn = 0x1c000 max_arch_pfn = 0x100000 kernel direct mapping tables up to 1c000000 @ 7000-c000 .. RAMDISK: 1bc69000 - 1bfef4fa .. 0MB HIGHMEM available. 448MB LOWMEM available. mapped low ram: 0 - 1c000000 low ram: 00000000 - 1c000000 bootmap 00002000 - 00005800 (9 early reservations) ==> bootmem [0000000000 - 001c000000] #0 [0000000000 - 0000001000] BIOS data page ==> [0000000000 - 0000001000] #1 [0000001000 - 0000002000] EX TRAMPOLINE ==> [0000001000 - 0000002000] #2 [0000006000 - 0000007000] TRAMPOLINE ==> [0000006000 - 0000007000] #3 [0000400000 - 00009ed14c] TEXT DATA BSS ==> [0000400000 - 00009ed14c] #4 [001bc69000 - 001bfef4fa] RAMDISK ==> [001bc69000 - 001bfef4fa] #5 [00009ee000 - 00009f2000] INIT_PG_TABLE ==> [00009ee000 - 00009f2000] #6 [000009f400 - 0000100000] BIOS reserved ==> [000009f400 - 0000100000] #7 [0000007000 - 0000007000] PGTABLE #8 [0000002000 - 0000006000] BOOTMAP ==> [0000002000 - 0000006000] Notice the strange blank PGTABLE entry. The reason is init_pg_table is big enough, and zero range is called with init_memory_mapping/reserve_early(). So try to check the range in reserve_early() v2: fix the reversed compare Signed-off-by: Yinghai Lu Cc: nickpiggin@yahoo.com.au Cc: ink@jurassic.park.msu.ru Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index e85826829cf..508bec1cee2 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -858,6 +858,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name) */ void __init reserve_early(u64 start, u64 end, char *name) { + if (start >= end) + return; + drop_overlaps_that_are_ok(start, end); __reserve_early(start, end, name, 0); } -- cgit v1.2.3 From 1250fbed1471b681f3e75e4938f356a1c0a92d5e Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 12 Feb 2009 13:43:20 +0100 Subject: x86, mce: enable machine checks in 64-bit defconfig Impact: defconfig change Enable MCE in the 64-bit defconfig. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/configs/x86_64_defconfig | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 10728fd91c6..9fd7d156a12 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.29-rc4 -# Thu Feb 12 12:57:29 2009 +# Tue Feb 24 15:44:16 2009 # CONFIG_64BIT=y # CONFIG_X86_32 is not set @@ -268,7 +268,9 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -# CONFIG_X86_MCE is not set +CONFIG_X86_MCE=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y # CONFIG_I8K is not set CONFIG_MICROCODE=y CONFIG_MICROCODE_INTEL=y -- cgit v1.2.3 From 15d4fcd615989ed83fe848e6a3c7e9f0361cf0d0 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 24 Feb 2009 15:52:58 -0800 Subject: x86, mce: enable machine checks in 32-bit defconfig Impact: defconfig change Enable MCE in the 32-bit defconfig. Signed-off-by: H. Peter Anvin --- arch/x86/configs/i386_defconfig | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 8f40a80ccb5..c2cae417fab 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.29-rc4 -# Thu Feb 12 12:57:57 2009 +# Tue Feb 24 15:50:58 2009 # # CONFIG_64BIT is not set CONFIG_X86_32=y @@ -268,7 +268,9 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -# CONFIG_X86_MCE is not set +CONFIG_X86_MCE=y +CONFIG_X86_MCE_NONFATAL=y +CONFIG_X86_MCE_P4THERMAL=y CONFIG_VM86=y # CONFIG_TOSHIBA is not set # CONFIG_I8K is not set -- cgit v1.2.3 From 24ff954233ecfd45801383f831626f88937ebe6f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 25 Feb 2009 10:38:10 +0900 Subject: x86, percpu: fix minor bugs in setup_percpu.c Recent changes in setup_percpu.c made a now meaningless DBG() statement fail to compile and introduced a comparison-of-different-types warning. Fix them. Compile failure is reported by Ingo Molnar. Signed-off-by: Tejun Heo Reported-by: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 2d946a8f78b..c29f301d388 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -270,7 +270,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) /* allocate and copy */ pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); - pcpue_unit_size = max(pcpue_unit_size, PCPU_MIN_UNIT_SIZE); + pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE); pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, PAGE_SIZE); if (!pcpue_ptr) @@ -438,8 +438,6 @@ void __init setup_per_cpu_areas(void) */ if (cpu == boot_cpu_id) switch_to_new_gdt(cpu); - - DBG("PERCPU: cpu %4d %p\n", cpu, ptr); } /* indicate the early static arrays will soon be gone */ -- cgit v1.2.3 From d325100504f1d0c296a1fbfef558deaa655e2240 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 25 Feb 2009 11:01:40 +0900 Subject: x86: convert cacheflush macros inline functions Impact: cleanup Unused macro parameters cause spurious unused variable warnings. Convert all cacheflush macros to inline functions to avoid the warnings and achieve better type checking. Signed-off-by: Tejun Heo --- arch/x86/include/asm/cacheflush.h | 53 ++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 2f8466540fb..5b301b7ff5f 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -5,24 +5,43 @@ #include /* Caches aren't brain-dead on the intel. */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_dcache_page(page) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma, pg) do { } while (0) -#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) +static inline void flush_cache_all(void) { } +static inline void flush_cache_mm(struct mm_struct *mm) { } +static inline void flush_cache_dup_mm(struct mm_struct *mm) { } +static inline void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { } +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long vmaddr, unsigned long pfn) { } +static inline void flush_dcache_page(struct page *page) { } +static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } +static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } +static inline void flush_icache_range(unsigned long start, + unsigned long end) { } +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) { } +static inline void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, + unsigned long addr, + unsigned long len) { } +static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } +static inline void flush_cache_vunmap(unsigned long start, + unsigned long end) { } -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - memcpy((dst), (src), (len)) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy((dst), (src), (len)) +static inline void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, const void *src, + unsigned long len) +{ + memcpy(dst, src, len); +} + +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, const void *src, + unsigned long len) +{ + memcpy(dst, src, len); +} #define PG_non_WB PG_arch_1 PAGEFLAG(NonWB, non_WB) -- cgit v1.2.3 From fff78ad5cee1d6f695103ec590cbd2a9f3c39e8c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:28:42 -0500 Subject: [CPUFREQ] Stupidly trivial CodingStyle fix GNU indent complains about this being ambiguous, because it's dumb. One of my automated tests relies on the output of indent, so this shuts it up. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k7.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 1b446d79a8f..9cd73d15743 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -94,7 +94,7 @@ static struct cpufreq_frequency_table *powernow_table; static unsigned int can_scale_bus; static unsigned int can_scale_vid; -static unsigned int minimum_speed=-1; +static unsigned int minimum_speed = -1; static unsigned int maximum_speed; static unsigned int number_scales; static unsigned int fsb; -- cgit v1.2.3 From b5c916666240032b29f73a1ca52c3e0fac37335c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:39:47 -0500 Subject: [CPUFREQ] checkpatch cleanups for cpufreq-nforce2 Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | 38 +++++++++++++++------------ 1 file changed, 21 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 965ea52767a..ad8284f0e86 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -32,7 +32,7 @@ * nforce2_chipset: * FSB is changed using the chipset */ -static struct pci_dev *nforce2_chipset_dev; +static struct pci_dev *nforce2_dev; /* fid: * multiplier * 10 @@ -56,7 +56,8 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)"); MODULE_PARM_DESC(min_fsb, "Minimum FSB to use, if not defined: current FSB - 50"); -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "cpufreq-nforce2", msg) /** * nforce2_calc_fsb - calculate FSB @@ -118,11 +119,11 @@ static void nforce2_write_pll(int pll) int temp; /* Set the pll addr. to 0x00 */ - pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, 0); + pci_write_config_dword(nforce2_dev, NFORCE2_PLLADR, 0); /* Now write the value in all 64 registers */ for (temp = 0; temp <= 0x3f; temp++) - pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, pll); + pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll); return; } @@ -139,8 +140,8 @@ static unsigned int nforce2_fsb_read(int bootfsb) u32 fsb, temp = 0; /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */ - nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, - 0x01EF, PCI_ANY_ID, PCI_ANY_ID, NULL); + nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 0x01EF, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (!nforce2_sub5) return 0; @@ -148,13 +149,13 @@ static unsigned int nforce2_fsb_read(int bootfsb) fsb /= 1000000; /* Check if PLL register is already set */ - pci_read_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8 *)&temp); + pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp); if (bootfsb || !temp) return fsb; /* Use PLL register FSB value */ - pci_read_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, &temp); + pci_read_config_dword(nforce2_dev, NFORCE2_PLLREG, &temp); fsb = nforce2_calc_fsb(temp); return fsb; @@ -185,7 +186,7 @@ static int nforce2_set_fsb(unsigned int fsb) } /* First write? Then set actual value */ - pci_read_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8 *)&temp); + pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp); if (!temp) { pll = nforce2_calc_pll(tfsb); @@ -197,7 +198,7 @@ static int nforce2_set_fsb(unsigned int fsb) /* Enable write access */ temp = 0x01; - pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8)temp); + pci_write_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8)temp); diff = tfsb - fsb; @@ -222,7 +223,7 @@ static int nforce2_set_fsb(unsigned int fsb) } temp = 0x40; - pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLADR, (u8)temp); + pci_write_config_byte(nforce2_dev, NFORCE2_PLLADR, (u8)temp); return 0; } @@ -244,7 +245,8 @@ static unsigned int nforce2_get(unsigned int cpu) * nforce2_target - set a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * Sets a new CPUFreq policy. */ @@ -328,7 +330,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) if (!fid) { if (!cpu_khz) { printk(KERN_WARNING - "cpufreq: cpu_khz not set, can't calculate multiplier!\n"); + "cpufreq: cpu_khz not set, " + "can't calculate multiplier!\n"); return -ENODEV; } @@ -392,17 +395,18 @@ static struct cpufreq_driver nforce2_driver = { */ static unsigned int nforce2_detect_chipset(void) { - nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, + nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, PCI_ANY_ID, PCI_ANY_ID, NULL); - if (nforce2_chipset_dev == NULL) + if (nforce2_dev == NULL) return -ENODEV; printk(KERN_INFO "cpufreq: Detected nForce2 chipset revision %X\n", - nforce2_chipset_dev->revision); + nforce2_dev->revision); printk(KERN_INFO - "cpufreq: FSB changing is maybe unstable and can lead to crashes and data loss.\n"); + "cpufreq: FSB changing is maybe unstable and can lead to " + "crashes and data loss.\n"); return 0; } -- cgit v1.2.3 From 20174b65d9fdc8dddef3d2ab9647e01fdab13064 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:42:19 -0500 Subject: [CPUFREQ] nforce2: Use driver prefix, not cpufreq prefix. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index ad8284f0e86..99262906838 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -56,6 +56,7 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)"); MODULE_PARM_DESC(min_fsb, "Minimum FSB to use, if not defined: current FSB - 50"); +#define PFX "cpufreq-nforce2: " #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ "cpufreq-nforce2", msg) @@ -175,13 +176,13 @@ static int nforce2_set_fsb(unsigned int fsb) int pll = 0; if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) { - printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb); + printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb); return -EINVAL; } tfsb = nforce2_fsb_read(0); if (!tfsb) { - printk(KERN_ERR "cpufreq: Error while reading the FSB\n"); + printk(KERN_ERR PFX "Error while reading the FSB\n"); return -EINVAL; } @@ -278,7 +279,7 @@ static int nforce2_target(struct cpufreq_policy *policy, /* local_irq_save(flags); */ if (nforce2_set_fsb(target_fsb) < 0) - printk(KERN_ERR "cpufreq: Changing FSB to %d failed\n", + printk(KERN_ERR PFX "Changing FSB to %d failed\n", target_fsb); else dprintk("Changed FSB successfully to %d\n", @@ -329,9 +330,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) /* FIX: Get FID from CPU */ if (!fid) { if (!cpu_khz) { - printk(KERN_WARNING - "cpufreq: cpu_khz not set, " - "can't calculate multiplier!\n"); + printk(KERN_WARNING PFX + "cpu_khz not set, can't calculate multiplier!\n"); return -ENODEV; } @@ -346,7 +346,7 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) } } - printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb, + printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb, fid / 10, fid % 10); /* Set maximum FSB to FSB at boot time */ @@ -402,10 +402,10 @@ static unsigned int nforce2_detect_chipset(void) if (nforce2_dev == NULL) return -ENODEV; - printk(KERN_INFO "cpufreq: Detected nForce2 chipset revision %X\n", + printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n", nforce2_dev->revision); - printk(KERN_INFO - "cpufreq: FSB changing is maybe unstable and can lead to " + printk(KERN_INFO PFX + "FSB changing is maybe unstable and can lead to " "crashes and data loss.\n"); return 0; @@ -424,7 +424,7 @@ static int __init nforce2_init(void) /* detect chipset */ if (nforce2_detect_chipset()) { - printk(KERN_ERR "cpufreq: No nForce2 chipset.\n"); + printk(KERN_ERR PFX "No nForce2 chipset.\n"); return -ENODEV; } -- cgit v1.2.3 From 04cd1a99dc22bcaa1eccbe8f8386df8c1295e979 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:50:33 -0500 Subject: [CPUFREQ] checkpatch cleanups for elanfreq The remaining warning about the simple_strtoul conversion to strict_strtoul seems kind of pointless to me. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/elanfreq.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index fe613c93b36..006b278b0d5 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c @@ -184,7 +184,8 @@ static int elanfreq_target(struct cpufreq_policy *policy, { unsigned int newstate = 0; - if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], + target_freq, relation, &newstate)) return -EINVAL; elanfreq_set_cpu_state(newstate); @@ -301,7 +302,8 @@ static void __exit elanfreq_exit(void) module_param(max_freq, int, 0444); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Robert Schwebel , Sven Geggus "); +MODULE_AUTHOR("Robert Schwebel , " + "Sven Geggus "); MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs"); module_init(elanfreq_init); -- cgit v1.2.3 From c9b8c871525aeda153494996d84a832270205d81 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:54:47 -0500 Subject: [CPUFREQ] checkpatch cleanups for e_powersaver Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/e_powersaver.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index c2f930d8664..3f83ea12c47 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c @@ -12,12 +12,12 @@ #include #include #include +#include +#include +#include #include #include -#include -#include -#include #define EPS_BRAND_C7M 0 #define EPS_BRAND_C7 1 @@ -184,7 +184,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy) break; } - switch(brand) { + switch (brand) { case EPS_BRAND_C7M: printk(KERN_CONT "C7-M\n"); break; @@ -218,17 +218,20 @@ static int eps_cpu_init(struct cpufreq_policy *policy) /* Print voltage and multiplier */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); current_voltage = lo & 0xff; - printk(KERN_INFO "eps: Current voltage = %dmV\n", current_voltage * 16 + 700); + printk(KERN_INFO "eps: Current voltage = %dmV\n", + current_voltage * 16 + 700); current_multiplier = (lo >> 8) & 0xff; printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier); /* Print limits */ max_voltage = hi & 0xff; - printk(KERN_INFO "eps: Highest voltage = %dmV\n", max_voltage * 16 + 700); + printk(KERN_INFO "eps: Highest voltage = %dmV\n", + max_voltage * 16 + 700); max_multiplier = (hi >> 8) & 0xff; printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier); min_voltage = (hi >> 16) & 0xff; - printk(KERN_INFO "eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700); + printk(KERN_INFO "eps: Lowest voltage = %dmV\n", + min_voltage * 16 + 700); min_multiplier = (hi >> 24) & 0xff; printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier); @@ -318,7 +321,7 @@ static int eps_cpu_exit(struct cpufreq_policy *policy) return 0; } -static struct freq_attr* eps_attr[] = { +static struct freq_attr *eps_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -356,7 +359,7 @@ static void __exit eps_exit(void) cpufreq_unregister_driver(&eps_driver); } -MODULE_AUTHOR("Rafa³ Bilski "); +MODULE_AUTHOR("Rafal Bilski "); MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's."); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 00f6a235bf241e62dfadf32b4322309e65c7b177 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 23:06:57 -0500 Subject: [CPUFREQ] checkpatch cleanups for gx-suspmod Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/gx-suspmod.c | 105 +++++++++++++++++++------------ 1 file changed, 65 insertions(+), 40 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c index 9d9eae82e60..ac27ec2264d 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c @@ -79,8 +79,9 @@ #include #include #include +#include + #include -#include /* PCI config registers, all at F0 */ #define PCI_PMER1 0x80 /* power management enable register 1 */ @@ -122,8 +123,8 @@ static struct gxfreq_params *gx_params; static int stock_freq; /* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ -static int pci_busclk = 0; -module_param (pci_busclk, int, 0444); +static int pci_busclk; +module_param(pci_busclk, int, 0444); /* maximum duration for which the cpu may be suspended * (32us * MAX_DURATION). If no parameter is given, this defaults @@ -132,7 +133,7 @@ module_param (pci_busclk, int, 0444); * is suspended -- processing power is just 0.39% of what it used to be, * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ static int max_duration = 255; -module_param (max_duration, int, 0444); +module_param(max_duration, int, 0444); /* For the default policy, we want at least some processing power * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) @@ -140,7 +141,8 @@ module_param (max_duration, int, 0444); #define POLICY_MIN_DIV 20 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "gx-suspmod", msg) /** * we can detect a core multipiler from dir0_lsb @@ -166,12 +168,20 @@ static int gx_freq_mult[16] = { * Low Level chipset interface * ****************************************************************/ static struct pci_device_id gx_chipset_tbl[] __initdata = { - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, + PCI_ANY_ID, PCI_ANY_ID }, { 0, }, }; +static void gx_write_byte(int reg, int value) +{ + pci_write_config_byte(gx_params->cs55x0, reg, value); +} + /** * gx_detect_chipset: * @@ -200,7 +210,8 @@ static __init struct pci_dev *gx_detect_chipset(void) /** * gx_get_cpuspeed: * - * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. + * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi + * Geode CPU runs. */ static unsigned int gx_get_cpuspeed(unsigned int cpu) { @@ -217,17 +228,18 @@ static unsigned int gx_get_cpuspeed(unsigned int cpu) * **/ -static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) +static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, + u8 *off_duration) { unsigned int i; u8 tmp_on, tmp_off; int old_tmp_freq = stock_freq; int tmp_freq; - *off_duration=1; - *on_duration=0; + *off_duration = 1; + *on_duration = 0; - for (i=max_duration; i>0; i--) { + for (i = max_duration; i > 0; i--) { tmp_off = ((khz * i) / stock_freq) & 0xff; tmp_on = i - tmp_off; tmp_freq = (stock_freq * tmp_off) / i; @@ -259,26 +271,34 @@ static void gx_set_cpuspeed(unsigned int khz) freqs.cpu = 0; freqs.old = gx_get_cpuspeed(0); - new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration); + new_khz = gx_validate_speed(khz, &gx_params->on_duration, + &gx_params->off_duration); freqs.new = new_khz; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); local_irq_save(flags); - if (new_khz != stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ + + + if (new_khz != stock_freq) { + /* if new khz == 100% of CPU speed, it is special case */ switch (gx_params->cs55x0->device) { case PCI_DEVICE_ID_CYRIX_5530_LEGACY: pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; /* FIXME: need to test other values -- Zwane,Miura */ - pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ - pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ - pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); - - if (gx_params->cs55x0->revision < 0x10) { /* CS5530(rev 1.2, 1.3) */ - suscfg = gx_params->pci_suscfg | SUSMOD; - } else { /* CS5530A,B.. */ - suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; + /* typical 2 to 4ms */ + gx_write_byte(PCI_IRQTC, 4); + /* typical 50 to 100ms */ + gx_write_byte(PCI_VIDTC, 100); + gx_write_byte(PCI_PMER1, pmer1); + + if (gx_params->cs55x0->revision < 0x10) { + /* CS5530(rev 1.2, 1.3) */ + suscfg = gx_params->pci_suscfg|SUSMOD; + } else { + /* CS5530A,B.. */ + suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE; } break; case PCI_DEVICE_ID_CYRIX_5520: @@ -294,13 +314,13 @@ static void gx_set_cpuspeed(unsigned int khz) suscfg = gx_params->pci_suscfg & ~(SUSMOD); gx_params->off_duration = 0; gx_params->on_duration = 0; - dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n"); + dprintk("suspend modulation disabled: cpu runs 100%% speed.\n"); } - pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); - pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); + gx_write_byte(PCI_MODOFF, gx_params->off_duration); + gx_write_byte(PCI_MODON, gx_params->on_duration); - pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); + gx_write_byte(PCI_SUSCFG, suscfg); pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); local_irq_restore(flags); @@ -334,7 +354,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy) return -EINVAL; policy->cpu = 0; - cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), + stock_freq); /* it needs to be assured that at least one supported frequency is * within policy->min and policy->max. If it is not, policy->max @@ -354,7 +375,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy) policy->max = tmp_freq; if (policy->max < policy->min) policy->max = policy->min; - cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), + stock_freq); return 0; } @@ -398,18 +420,18 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) return -ENODEV; /* determine maximum frequency */ - if (pci_busclk) { + if (pci_busclk) maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; - } else if (cpu_khz) { + else if (cpu_khz) maxfreq = cpu_khz; - } else { + else maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; - } + stock_freq = maxfreq; curfreq = gx_get_cpuspeed(0); dprintk("cpu max frequency is %d.\n", maxfreq); - dprintk("cpu current frequency is %dkHz.\n",curfreq); + dprintk("cpu current frequency is %dkHz.\n", curfreq); /* setup basic struct for cpufreq API */ policy->cpu = 0; @@ -447,7 +469,8 @@ static int __init cpufreq_gx_init(void) struct pci_dev *gx_pci; /* Test if we have the right hardware */ - if ((gx_pci = gx_detect_chipset()) == NULL) + gx_pci = gx_detect_chipset(); + if (gx_pci == NULL) return -ENODEV; /* check whether module parameters are sane */ @@ -468,9 +491,11 @@ static int __init cpufreq_gx_init(void) pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); - pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); + pci_read_config_byte(params->cs55x0, PCI_MODOFF, + &(params->off_duration)); - if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { + ret = cpufreq_register_driver(&gx_suspmod_driver); + if (ret) { kfree(params); return ret; /* register error! */ } @@ -485,9 +510,9 @@ static void __exit cpufreq_gx_exit(void) kfree(gx_params); } -MODULE_AUTHOR ("Hiroshi Miura "); -MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Hiroshi Miura "); +MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); +MODULE_LICENSE("GPL"); module_init(cpufreq_gx_init); module_exit(cpufreq_gx_exit); -- cgit v1.2.3 From ac617bd0f7b959dc6708ad2f0d6b9dcf4382f1ed Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 23:29:53 -0500 Subject: [CPUFREQ] checkpatch cleanups for longhaul Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/longhaul.c | 182 +++++++++++++++++---------------- arch/x86/kernel/cpu/cpufreq/longhaul.h | 12 +-- 2 files changed, 100 insertions(+), 94 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index a4cff5d6e38..dc03622a83a 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -30,12 +30,12 @@ #include #include #include +#include +#include +#include +#include #include -#include -#include -#include -#include #include #include "longhaul.h" @@ -58,7 +58,7 @@ #define USE_NORTHBRIDGE (1 << 2) static int cpu_model; -static unsigned int numscales=16; +static unsigned int numscales = 16; static unsigned int fsb; static const struct mV_pos *vrm_mV_table; @@ -67,8 +67,8 @@ static const unsigned char *mV_vrm_table; static unsigned int highest_speed, lowest_speed; /* kHz */ static unsigned int minmult, maxmult; static int can_scale_voltage; -static struct acpi_processor *pr = NULL; -static struct acpi_processor_cx *cx = NULL; +static struct acpi_processor *pr; +static struct acpi_processor_cx *cx; static u32 acpi_regs_addr; static u8 longhaul_flags; static unsigned int longhaul_index; @@ -78,12 +78,13 @@ static int scale_voltage; static int disable_acpi_c3; static int revid_errata; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "longhaul", msg) /* Clock ratios multiplied by 10 */ -static int clock_ratio[32]; -static int eblcr_table[32]; +static int mults[32]; +static int eblcr[32]; static int longhaul_version; static struct cpufreq_frequency_table *longhaul_table; @@ -93,7 +94,7 @@ static char speedbuffer[8]; static char *print_speed(int speed) { if (speed < 1000) { - snprintf(speedbuffer, sizeof(speedbuffer),"%dMHz", speed); + snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed); return speedbuffer; } @@ -122,27 +123,28 @@ static unsigned int calc_speed(int mult) static int longhaul_get_cpu_mult(void) { - unsigned long invalue=0,lo, hi; + unsigned long invalue = 0, lo, hi; - rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); - invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22; - if (longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) { + rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22; + if (longhaul_version == TYPE_LONGHAUL_V2 || + longhaul_version == TYPE_POWERSAVER) { if (lo & (1<<27)) - invalue+=16; + invalue += 16; } - return eblcr_table[invalue]; + return eblcr[invalue]; } /* For processor with BCR2 MSR */ -static void do_longhaul1(unsigned int clock_ratio_index) +static void do_longhaul1(unsigned int mults_index) { union msr_bcr2 bcr2; rdmsrl(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; - bcr2.bits.CLOCKMUL = clock_ratio_index & 0xff; + bcr2.bits.CLOCKMUL = mults_index & 0xff; /* Sync to timer tick */ safe_halt(); @@ -161,7 +163,7 @@ static void do_longhaul1(unsigned int clock_ratio_index) /* For processor with Longhaul MSR */ -static void do_powersaver(int cx_address, unsigned int clock_ratio_index, +static void do_powersaver(int cx_address, unsigned int mults_index, unsigned int dir) { union msr_longhaul longhaul; @@ -173,11 +175,11 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index, longhaul.bits.RevisionKey = longhaul.bits.RevisionID; else longhaul.bits.RevisionKey = 0; - longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; - longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; + longhaul.bits.SoftBusRatio = mults_index & 0xf; + longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4; /* Setup new voltage */ if (can_scale_voltage) - longhaul.bits.SoftVID = (clock_ratio_index >> 8) & 0x1f; + longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f; /* Sync to timer tick */ safe_halt(); /* Raise voltage if necessary */ @@ -240,14 +242,14 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index, /** * longhaul_set_cpu_frequency() - * @clock_ratio_index : bitpattern of the new multiplier. + * @mults_index : bitpattern of the new multiplier. * * Sets a new clock ratio. */ static void longhaul_setstate(unsigned int table_index) { - unsigned int clock_ratio_index; + unsigned int mults_index; int speed, mult; struct cpufreq_freqs freqs; unsigned long flags; @@ -256,9 +258,9 @@ static void longhaul_setstate(unsigned int table_index) u32 bm_timeout = 1000; unsigned int dir = 0; - clock_ratio_index = longhaul_table[table_index].index; + mults_index = longhaul_table[table_index].index; /* Safety precautions */ - mult = clock_ratio[clock_ratio_index & 0x1f]; + mult = mults[mults_index & 0x1f]; if (mult == -1) return; speed = calc_speed(mult); @@ -274,7 +276,7 @@ static void longhaul_setstate(unsigned int table_index) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", + dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: preempt_disable(); @@ -282,8 +284,8 @@ retry_loop: pic2_mask = inb(0xA1); pic1_mask = inb(0x21); /* works on C3. save mask. */ - outb(0xFF,0xA1); /* Overkill */ - outb(0xFE,0x21); /* TMR0 only */ + outb(0xFF, 0xA1); /* Overkill */ + outb(0xFE, 0x21); /* TMR0 only */ /* Wait while PCI bus is busy. */ if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE @@ -312,7 +314,7 @@ retry_loop: * Software controlled multipliers only. */ case TYPE_LONGHAUL_V1: - do_longhaul1(clock_ratio_index); + do_longhaul1(mults_index); break; /* @@ -327,9 +329,9 @@ retry_loop: if (longhaul_flags & USE_ACPI_C3) { /* Don't allow wakeup */ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); - do_powersaver(cx->address, clock_ratio_index, dir); + do_powersaver(cx->address, mults_index, dir); } else { - do_powersaver(0, clock_ratio_index, dir); + do_powersaver(0, mults_index, dir); } break; } @@ -341,8 +343,8 @@ retry_loop: /* Enable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); } - outb(pic2_mask,0xA1); /* restore mask */ - outb(pic1_mask,0x21); + outb(pic2_mask, 0xA1); /* restore mask */ + outb(pic1_mask, 0x21); local_irq_restore(flags); preempt_enable(); @@ -392,7 +394,8 @@ retry_loop: cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); if (!bm_timeout) - printk(KERN_INFO PFX "Warning: Timeout while waiting for idle PCI bus.\n"); + printk(KERN_INFO PFX "Warning: Timeout while waiting for " + "idle PCI bus.\n"); } /* @@ -458,31 +461,32 @@ static int __init longhaul_get_ranges(void) break; } - dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", + dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n", minmult/10, minmult%10, maxmult/10, maxmult%10); highest_speed = calc_speed(maxmult); lowest_speed = calc_speed(minmult); - dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, + dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, print_speed(lowest_speed/1000), print_speed(highest_speed/1000)); if (lowest_speed == highest_speed) { - printk (KERN_INFO PFX "highestspeed == lowest, aborting.\n"); + printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n"); return -EINVAL; } if (lowest_speed > highest_speed) { - printk (KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", + printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", lowest_speed, highest_speed); return -EINVAL; } - longhaul_table = kmalloc((numscales + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL); - if(!longhaul_table) + longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table), + GFP_KERNEL); + if (!longhaul_table) return -ENOMEM; for (j = 0; j < numscales; j++) { - ratio = clock_ratio[j]; + ratio = mults[j]; if (ratio == -1) continue; if (ratio > maxmult || ratio < minmult) @@ -521,7 +525,7 @@ static int __init longhaul_get_ranges(void) /* Find index we are running on */ for (j = 0; j < k; j++) { - if (clock_ratio[longhaul_table[j].index & 0x1f] == mult) { + if (mults[longhaul_table[j].index & 0x1f] == mult) { longhaul_index = j; break; } @@ -559,20 +563,22 @@ static void __init longhaul_setup_voltagescaling(void) maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { - printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " + printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " "Voltage scaling disabled.\n", - minvid.mV/1000, minvid.mV%1000, maxvid.mV/1000, maxvid.mV%1000); + minvid.mV/1000, minvid.mV%1000, + maxvid.mV/1000, maxvid.mV%1000); return; } if (minvid.mV == maxvid.mV) { - printk (KERN_INFO PFX "Claims to support voltage scaling but min & max are " - "both %d.%03d. Voltage scaling disabled\n", + printk(KERN_INFO PFX "Claims to support voltage scaling but " + "min & max are both %d.%03d. " + "Voltage scaling disabled\n", maxvid.mV/1000, maxvid.mV%1000); return; } - /* How many voltage steps */ + /* How many voltage steps*/ numvscales = maxvid.pos - minvid.pos + 1; printk(KERN_INFO PFX "Max VID=%d.%03d " @@ -586,7 +592,7 @@ static void __init longhaul_setup_voltagescaling(void) j = longhaul.bits.MinMHzBR; if (longhaul.bits.MinMHzBR4) j += 16; - min_vid_speed = eblcr_table[j]; + min_vid_speed = eblcr[j]; if (min_vid_speed == -1) return; switch (longhaul.bits.MinMHzFSB) { @@ -617,7 +623,8 @@ static void __init longhaul_setup_voltagescaling(void) pos = minvid.pos; longhaul_table[j].index |= mV_vrm_table[pos] << 8; vid = vrm_mV_table[mV_vrm_table[pos]]; - printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV); + printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", + speed, j, vid.mV); j++; } @@ -640,7 +647,8 @@ static int longhaul_target(struct cpufreq_policy *policy, unsigned int dir = 0; u8 vid, current_vid; - if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) + if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, + relation, &table_index)) return -EINVAL; /* Don't set same frequency again */ @@ -656,7 +664,8 @@ static int longhaul_target(struct cpufreq_policy *policy, * this in hardware, C3 is old and we need to do this * in software. */ i = longhaul_index; - current_vid = (longhaul_table[longhaul_index].index >> 8) & 0x1f; + current_vid = (longhaul_table[longhaul_index].index >> 8); + current_vid &= 0x1f; if (table_index > longhaul_index) dir = 1; while (i != table_index) { @@ -691,9 +700,9 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle, { struct acpi_device *d; - if ( acpi_bus_get_device(obj_handle, &d) ) { + if (acpi_bus_get_device(obj_handle, &d)) return 0; - } + *return_value = acpi_driver_data(d); return 1; } @@ -750,7 +759,7 @@ static int longhaul_setup_southbridge(void) /* Find VT8235 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); if (dev == NULL) - /* Find VT8237 southbridge */ + /* Find VT8237 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, NULL); if (dev != NULL) { @@ -769,7 +778,8 @@ static int longhaul_setup_southbridge(void) if (pci_cmd & 1 << 7) { pci_read_config_dword(dev, 0x88, &acpi_regs_addr); acpi_regs_addr &= 0xff00; - printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr); + printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", + acpi_regs_addr); } pci_dev_put(dev); @@ -781,7 +791,7 @@ static int longhaul_setup_southbridge(void) static int __init longhaul_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); - char *cpuname=NULL; + char *cpuname = NULL; int ret; u32 lo, hi; @@ -791,8 +801,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) cpu_model = CPU_SAMUEL; cpuname = "C3 'Samuel' [C5A]"; longhaul_version = TYPE_LONGHAUL_V1; - memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio)); - memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr)); + memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); + memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr)); break; case 7: @@ -803,10 +813,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) cpuname = "C3 'Samuel 2' [C5B]"; /* Note, this is not a typo, early Samuel2's had * Samuel1 ratios. */ - memcpy(clock_ratio, samuel1_clock_ratio, - sizeof(samuel1_clock_ratio)); - memcpy(eblcr_table, samuel2_eblcr, - sizeof(samuel2_eblcr)); + memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); + memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V1; @@ -817,10 +825,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) cpu_model = CPU_EZRA; cpuname = "C3 'Ezra' [C5C]"; } - memcpy(clock_ratio, ezra_clock_ratio, - sizeof(ezra_clock_ratio)); - memcpy(eblcr_table, ezra_eblcr, - sizeof(ezra_eblcr)); + memcpy(mults, ezra_mults, sizeof(ezra_mults)); + memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr)); break; } break; @@ -829,18 +835,16 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) cpu_model = CPU_EZRA_T; cpuname = "C3 'Ezra-T' [C5M]"; longhaul_version = TYPE_POWERSAVER; - numscales=32; - memcpy (clock_ratio, ezrat_clock_ratio, sizeof(ezrat_clock_ratio)); - memcpy (eblcr_table, ezrat_eblcr, sizeof(ezrat_eblcr)); + numscales = 32; + memcpy(mults, ezrat_mults, sizeof(ezrat_mults)); + memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr)); break; case 9: longhaul_version = TYPE_POWERSAVER; numscales = 32; - memcpy(clock_ratio, - nehemiah_clock_ratio, - sizeof(nehemiah_clock_ratio)); - memcpy(eblcr_table, nehemiah_eblcr, sizeof(nehemiah_eblcr)); + memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); + memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); switch (c->x86_mask) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; @@ -869,14 +873,14 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) longhaul_version = TYPE_LONGHAUL_V1; } - printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname); + printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname); switch (longhaul_version) { case TYPE_LONGHAUL_V1: case TYPE_LONGHAUL_V2: - printk ("Longhaul v%d supported.\n", longhaul_version); + printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version); break; case TYPE_POWERSAVER: - printk ("Powersaver supported.\n"); + printk(KERN_CONT "Powersaver supported.\n"); break; }; @@ -940,7 +944,7 @@ static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) return 0; } -static struct freq_attr* longhaul_attr[] = { +static struct freq_attr *longhaul_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -966,13 +970,15 @@ static int __init longhaul_init(void) #ifdef CONFIG_SMP if (num_online_cpus() > 1) { - printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); + printk(KERN_ERR PFX "More than 1 CPU detected, " + "longhaul disabled.\n"); return -ENODEV; } #endif #ifdef CONFIG_X86_IO_APIC if (cpu_has_apic) { - printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n"); + printk(KERN_ERR PFX "APIC detected. Longhaul is currently " + "broken in this configuration.\n"); return -ENODEV; } #endif @@ -993,8 +999,8 @@ static void __exit longhaul_exit(void) { int i; - for (i=0; i < numscales; i++) { - if (clock_ratio[i] == maxmult) { + for (i = 0; i < numscales; i++) { + if (mults[i] == maxmult) { longhaul_setstate(i); break; } @@ -1007,11 +1013,11 @@ static void __exit longhaul_exit(void) /* Even if BIOS is exporting ACPI C3 state, and it is used * with success when CPU is idle, this state doesn't * trigger frequency transition in some cases. */ -module_param (disable_acpi_c3, int, 0644); +module_param(disable_acpi_c3, int, 0644); MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); /* Change CPU voltage with frequency. Very usefull to save * power, but most VIA C3 processors aren't supporting it. */ -module_param (scale_voltage, int, 0644); +module_param(scale_voltage, int, 0644); MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); /* Force revision key to 0 for processors which doesn't * support voltage scaling, but are introducing itself as @@ -1019,9 +1025,9 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); module_param(revid_errata, int, 0644); MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); -MODULE_AUTHOR ("Dave Jones "); -MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dave Jones "); +MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors."); +MODULE_LICENSE("GPL"); late_initcall(longhaul_init); module_exit(longhaul_exit); diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.h b/arch/x86/kernel/cpu/cpufreq/longhaul.h index 4fcc320997d..e2360a469f7 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.h +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.h @@ -49,14 +49,14 @@ union msr_longhaul { /* * Clock ratio tables. Div/Mod by 10 to get ratio. - * The eblcr ones specify the ratio read from the CPU. - * The clock_ratio ones specify what to write to the CPU. + * The eblcr values specify the ratio read from the CPU. + * The mults values specify what to write to the CPU. */ /* * VIA C3 Samuel 1 & Samuel 2 (stepping 0) */ -static const int __initdata samuel1_clock_ratio[16] = { +static const int __initdata samuel1_mults[16] = { -1, /* 0000 -> RESERVED */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -119,7 +119,7 @@ static const int __initdata samuel2_eblcr[16] = { /* * VIA C3 Ezra */ -static const int __initdata ezra_clock_ratio[16] = { +static const int __initdata ezra_mults[16] = { 100, /* 0000 -> 10.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -160,7 +160,7 @@ static const int __initdata ezra_eblcr[16] = { /* * VIA C3 (Ezra-T) [C5M]. */ -static const int __initdata ezrat_clock_ratio[32] = { +static const int __initdata ezrat_mults[32] = { 100, /* 0000 -> 10.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -235,7 +235,7 @@ static const int __initdata ezrat_eblcr[32] = { /* * VIA C3 Nehemiah */ -static const int __initdata nehemiah_clock_ratio[32] = { +static const int __initdata nehemiah_mults[32] = { 100, /* 0000 -> 10.0x */ -1, /* 0001 -> 16.0x */ 40, /* 0010 -> 4.0x */ -- cgit v1.2.3 From 48ee923a666d4cc54e48d55fc573c57492501122 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 23:32:50 -0500 Subject: [CPUFREQ] checkpatch cleanups for longrun Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/longrun.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index 777a7ff075d..da5f70fcb76 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c @@ -11,12 +11,13 @@ #include #include #include +#include #include #include -#include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longrun", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "longrun", msg) static struct cpufreq_driver longrun_driver; @@ -51,7 +52,7 @@ static void __init longrun_get_policy(struct cpufreq_policy *policy) msr_lo &= 0x0000007F; msr_hi &= 0x0000007F; - if ( longrun_high_freq <= longrun_low_freq ) { + if (longrun_high_freq <= longrun_low_freq) { /* Assume degenerate Longrun table */ policy->min = policy->max = longrun_high_freq; } else { @@ -79,7 +80,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy) if (!policy) return -EINVAL; - if ( longrun_high_freq <= longrun_low_freq ) { + if (longrun_high_freq <= longrun_low_freq) { /* Assume degenerate Longrun table */ pctg_lo = pctg_hi = 100; } else { @@ -152,7 +153,7 @@ static unsigned int longrun_get(unsigned int cpu) cpuid(0x80860007, &eax, &ebx, &ecx, &edx); dprintk("cpuid eax is %u\n", eax); - return (eax * 1000); + return eax * 1000; } /** @@ -196,7 +197,8 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); *high_freq = msr_lo * 1000; /* to kHz */ - dprintk("longrun table interface told %u - %u kHz\n", *low_freq, *high_freq); + dprintk("longrun table interface told %u - %u kHz\n", + *low_freq, *high_freq); if (*low_freq > *high_freq) *low_freq = *high_freq; @@ -219,7 +221,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, cpuid(0x80860007, &eax, &ebx, &ecx, &edx); /* try decreasing in 10% steps, some processors react only * on some barrier values */ - for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -=10) { + for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) { /* set to 0 to try_hi perf_pctg */ msr_lo &= 0xFFFFFF80; msr_hi &= 0xFFFFFF80; @@ -236,7 +238,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) * eqals - * low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) + * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) * * high_freq * perf_pctg is stored tempoarily into "ebx". */ @@ -317,9 +319,10 @@ static void __exit longrun_exit(void) } -MODULE_AUTHOR ("Dominik Brodowski "); -MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe and Efficeon processors."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dominik Brodowski "); +MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and " + "Efficeon processors."); +MODULE_LICENSE("GPL"); module_init(longrun_init); module_exit(longrun_exit); -- cgit v1.2.3 From 14a6650f13b958aabc30ddd575b0902384b22457 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sun, 18 Jan 2009 00:00:04 -0500 Subject: [CPUFREQ] checkpatch cleanups for powernow-k6 Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k6.c | 44 ++++++++++++++++++------------- 1 file changed, 26 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index c1ac5790c63..f10dea409f4 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c @@ -1,6 +1,7 @@ /* * This file was based upon code in Powertweak Linux (http://powertweak.sf.net) - * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski. + * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, + * Dominik Brodowski. * * Licensed under the terms of the GNU GPL License version 2. * @@ -13,14 +14,15 @@ #include #include #include - -#include #include #include +#include + #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long as it is unused */ +#define PFX "powernow-k6: " static unsigned int busfreq; /* FSB, in 10 kHz */ static unsigned int max_multiplier; @@ -47,8 +49,8 @@ static struct cpufreq_frequency_table clock_ratio[] = { */ static int powernow_k6_get_cpu_multiplier(void) { - u64 invalue = 0; - u32 msrval; + u64 invalue = 0; + u32 msrval; msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ @@ -68,12 +70,12 @@ static int powernow_k6_get_cpu_multiplier(void) */ static void powernow_k6_set_state(unsigned int best_i) { - unsigned long outvalue = 0, invalue = 0; - unsigned long msrval; - struct cpufreq_freqs freqs; + unsigned long outvalue = 0, invalue = 0; + unsigned long msrval; + struct cpufreq_freqs freqs; if (clock_ratio[best_i].index > max_multiplier) { - printk(KERN_ERR "cpufreq: invalid target frequency\n"); + printk(KERN_ERR PFX "invalid target frequency\n"); return; } @@ -119,7 +121,8 @@ static int powernow_k6_verify(struct cpufreq_policy *policy) * powernow_k6_setpolicy - sets a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * sets a new CPUFreq policy */ @@ -127,9 +130,10 @@ static int powernow_k6_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - unsigned int newstate = 0; + unsigned int newstate = 0; - if (cpufreq_frequency_table_target(policy, &clock_ratio[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &clock_ratio[0], + target_freq, relation, &newstate)) return -EINVAL; powernow_k6_set_state(newstate); @@ -140,7 +144,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy, static int powernow_k6_cpu_init(struct cpufreq_policy *policy) { - unsigned int i; + unsigned int i, f; int result; if (policy->cpu != 0) @@ -152,10 +156,11 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) /* table init */ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { - if (clock_ratio[i].index > max_multiplier) + f = clock_ratio[i].index; + if (f > max_multiplier) clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; else - clock_ratio[i].frequency = busfreq * clock_ratio[i].index; + clock_ratio[i].frequency = busfreq * f; } /* cpuinfo and default policy values */ @@ -185,7 +190,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) static unsigned int powernow_k6_get(unsigned int cpu) { - return busfreq * powernow_k6_get_cpu_multiplier(); + unsigned int ret; + ret = (busfreq * powernow_k6_get_cpu_multiplier()); + return ret; } static struct freq_attr *powernow_k6_attr[] = { @@ -221,7 +228,7 @@ static int __init powernow_k6_init(void) return -ENODEV; if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { - printk("cpufreq: PowerNow IOPORT region already used.\n"); + printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n"); return -EIO; } @@ -246,7 +253,8 @@ static void __exit powernow_k6_exit(void) } -MODULE_AUTHOR("Arjan van de Ven, Dave Jones , Dominik Brodowski "); +MODULE_AUTHOR("Arjan van de Ven, Dave Jones , " + "Dominik Brodowski "); MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 6072ace436800a011c3cb9a75ee49276bd90445a Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sun, 18 Jan 2009 01:27:35 -0500 Subject: [CPUFREQ] checkpatch cleanups for sc520 Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/sc520_freq.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c index 42da9bd677d..435a996a613 100644 --- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c +++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c @@ -19,17 +19,19 @@ #include #include +#include +#include #include -#include -#include #define MMCR_BASE 0xfffef000 /* The default base address */ #define OFFS_CPUCTL 0x2 /* CPU Control Register */ static __u8 __iomem *cpuctl; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "sc520_freq", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "sc520_freq", msg) +#define PFX "sc520_freq: " static struct cpufreq_frequency_table sc520_freq_table[] = { {0x01, 100000}, @@ -43,7 +45,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) switch (clockspeed_reg & 0x03) { default: - printk(KERN_ERR "sc520_freq: error: cpuctl register has unexpected value %02x\n", clockspeed_reg); + printk(KERN_ERR PFX "error: cpuctl register has unexpected " + "value %02x\n", clockspeed_reg); case 0x01: return 100000; case 0x02: @@ -51,7 +54,7 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) } } -static void sc520_freq_set_cpu_state (unsigned int state) +static void sc520_freq_set_cpu_state(unsigned int state) { struct cpufreq_freqs freqs; @@ -76,18 +79,19 @@ static void sc520_freq_set_cpu_state (unsigned int state) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); }; -static int sc520_freq_verify (struct cpufreq_policy *policy) +static int sc520_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); } -static int sc520_freq_target (struct cpufreq_policy *policy, +static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; - if (cpufreq_frequency_table_target(policy, sc520_freq_table, target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, sc520_freq_table, + target_freq, relation, &newstate)) return -EINVAL; sc520_freq_set_cpu_state(newstate); @@ -116,7 +120,7 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy) result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); if (result) - return (result); + return result; cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); @@ -131,7 +135,7 @@ static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) } -static struct freq_attr* sc520_freq_attr[] = { +static struct freq_attr *sc520_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -155,13 +159,13 @@ static int __init sc520_freq_init(void) int err; /* Test if we have the right hardware */ - if(c->x86_vendor != X86_VENDOR_AMD || - c->x86 != 4 || c->x86_model != 9) { + if (c->x86_vendor != X86_VENDOR_AMD || + c->x86 != 4 || c->x86_model != 9) { dprintk("no Elan SC520 processor found!\n"); return -ENODEV; } cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); - if(!cpuctl) { + if (!cpuctl) { printk(KERN_ERR "sc520_freq: error: failed to remap memory\n"); return -ENOMEM; } -- cgit v1.2.3 From bbfebd66554b934b270c4c49442f4fe5e62df0e5 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 23:55:22 -0500 Subject: [CPUFREQ] checkpatch cleanups for speedstep related drivers. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 60 +++++----- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 70 ++++++------ arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | 129 ++++++++++++--------- arch/x86/kernel/cpu/cpufreq/speedstep-lib.h | 18 +-- arch/x86/kernel/cpu/cpufreq/speedstep-smi.c | 166 +++++++++++++++++----------- 5 files changed, 258 insertions(+), 185 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index b585e04cbc9..46a2a7a5314 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -27,15 +27,16 @@ #include #include #include +#include #include #include -#include #include "speedstep-lib.h" #define PFX "p4-clockmod: " -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "p4-clockmod", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "p4-clockmod", msg) /* * Duty Cycle (3bits), note DC_DISABLE is not specified in @@ -58,7 +59,8 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) { u32 l, h; - if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV)) + if (!cpu_online(cpu) || + (newstate > DC_DISABLE) || (newstate == DC_RESV)) return -EINVAL; rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); @@ -66,7 +68,8 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) if (l & 0x01) dprintk("CPU#%d currently thermal throttled\n", cpu); - if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) + if (has_N44_O17_errata[cpu] && + (newstate == DC_25PT || newstate == DC_DFLT)) newstate = DC_38PT; rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); @@ -112,7 +115,8 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int i; - if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], + target_freq, relation, &newstate)) return -EINVAL; freqs.old = cpufreq_p4_get(policy->cpu); @@ -127,7 +131,8 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } - /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software + /* run on each logical CPU, + * see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ for_each_cpu(i, policy->cpus) @@ -153,28 +158,30 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) { if (c->x86 == 0x06) { if (cpu_has(c, X86_FEATURE_EST)) - printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. " - "The acpi-cpufreq module offers voltage scaling" - " in addition of frequency scaling. You should use " - "that instead of p4-clockmod, if possible.\n"); + printk(KERN_WARNING PFX "Warning: EST-capable CPU " + "detected. The acpi-cpufreq module offers " + "voltage scaling in addition of frequency " + "scaling. You should use that instead of " + "p4-clockmod, if possible.\n"); switch (c->x86_model) { case 0x0E: /* Core */ case 0x0F: /* Core Duo */ case 0x16: /* Celeron Core */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; - return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE); + return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); case 0x0D: /* Pentium M (Dothan) */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; /* fall through */ case 0x09: /* Pentium M (Banias) */ - return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM); + return speedstep_get_frequency(SPEEDSTEP_CPU_PM); } } if (c->x86 != 0xF) { if (!cpu_has(c, X86_FEATURE_EST)) - printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. " - "Please send an e-mail to \n"); + printk(KERN_WARNING PFX "Unknown CPU. " + "Please send an e-mail to " + "\n"); return 0; } @@ -182,16 +189,16 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) * throttling is active or not. */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; - if (speedstep_detect_processor() == SPEEDSTEP_PROCESSOR_P4M) { + if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " "The speedstep-ich or acpi cpufreq modules offer " "voltage scaling in addition of frequency scaling. " "You should use either one instead of p4-clockmod, " "if possible.\n"); - return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4M); + return speedstep_get_frequency(SPEEDSTEP_CPU_P4M); } - return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D); + return speedstep_get_frequency(SPEEDSTEP_CPU_P4D); } @@ -223,8 +230,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) return -EINVAL; /* table init */ - for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { - if ((i<2) && (has_N44_O17_errata[policy->cpu])) + for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if ((i < 2) && (has_N44_O17_errata[policy->cpu])) p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; else p4clockmod_table[i].frequency = (stock_freq * i)/8; @@ -258,12 +265,12 @@ static unsigned int cpufreq_p4_get(unsigned int cpu) l = DC_DISABLE; if (l != DC_DISABLE) - return (stock_freq * l / 8); + return stock_freq * l / 8; return stock_freq; } -static struct freq_attr* p4clockmod_attr[] = { +static struct freq_attr *p4clockmod_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -299,9 +306,10 @@ static int __init cpufreq_p4_init(void) ret = cpufreq_register_driver(&p4clockmod_driver); if (!ret) - printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); + printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock " + "Modulation available\n"); - return (ret); + return ret; } @@ -311,9 +319,9 @@ static void __exit cpufreq_p4_exit(void) } -MODULE_AUTHOR ("Zwane Mwaikambo "); -MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Zwane Mwaikambo "); +MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); +MODULE_LICENSE("GPL"); late_initcall(cpufreq_p4_init); module_exit(cpufreq_p4_exit); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index dedc1e98f16..8bbb11adb31 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -39,7 +39,7 @@ static struct pci_dev *speedstep_chipset_dev; /* speedstep_processor */ -static unsigned int speedstep_processor = 0; +static unsigned int speedstep_processor; static u32 pmbase; @@ -54,7 +54,8 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { }; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-ich", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-ich", msg) /** @@ -62,7 +63,7 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { * * Returns: -ENODEV if no register could be found */ -static int speedstep_find_register (void) +static int speedstep_find_register(void) { if (!speedstep_chipset_dev) return -ENODEV; @@ -90,7 +91,7 @@ static int speedstep_find_register (void) * * Tries to change the SpeedStep state. */ -static void speedstep_set_state (unsigned int state) +static void speedstep_set_state(unsigned int state) { u8 pm2_blk; u8 value; @@ -133,11 +134,11 @@ static void speedstep_set_state (unsigned int state) dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); - if (state == (value & 0x1)) { - dprintk("change to %u MHz succeeded\n", (speedstep_get_processor_frequency(speedstep_processor) / 1000)); - } else { - printk (KERN_ERR "cpufreq: change failed - I/O error\n"); - } + if (state == (value & 0x1)) + dprintk("change to %u MHz succeeded\n", + speedstep_get_frequency(speedstep_processor) / 1000); + else + printk(KERN_ERR "cpufreq: change failed - I/O error\n"); return; } @@ -149,7 +150,7 @@ static void speedstep_set_state (unsigned int state) * Tries to activate the SpeedStep status and control registers. * Returns -EINVAL on an unsupported chipset, and zero on success. */ -static int speedstep_activate (void) +static int speedstep_activate(void) { u16 value = 0; @@ -175,20 +176,18 @@ static int speedstep_activate (void) * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected * chipset, or zero on failure. */ -static unsigned int speedstep_detect_chipset (void) +static unsigned int speedstep_detect_chipset(void) { speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, - PCI_ANY_ID, - PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) return 4; /* 4-M */ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, - PCI_ANY_ID, - PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) return 3; /* 3-M */ @@ -196,8 +195,7 @@ static unsigned int speedstep_detect_chipset (void) speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, - PCI_ANY_ID, - PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) { /* speedstep.c causes lockups on Dell Inspirons 8000 and @@ -208,8 +206,7 @@ static unsigned int speedstep_detect_chipset (void) hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_MC, - PCI_ANY_ID, - PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (!hostbridge) @@ -236,7 +233,7 @@ static unsigned int _speedstep_get(const struct cpumask *cpus) cpus_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpus); - speed = speedstep_get_processor_frequency(speedstep_processor); + speed = speedstep_get_frequency(speedstep_processor); set_cpus_allowed_ptr(current, &cpus_allowed); dprintk("detected %u kHz as current frequency\n", speed); return speed; @@ -251,11 +248,12 @@ static unsigned int speedstep_get(unsigned int cpu) * speedstep_target - set a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * Sets a new CPUFreq policy. */ -static int speedstep_target (struct cpufreq_policy *policy, +static int speedstep_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { @@ -264,7 +262,8 @@ static int speedstep_target (struct cpufreq_policy *policy, cpumask_t cpus_allowed; int i; - if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], + target_freq, relation, &newstate)) return -EINVAL; freqs.old = _speedstep_get(policy->cpus); @@ -308,7 +307,7 @@ static int speedstep_target (struct cpufreq_policy *policy, * Limit must be within speedstep_low_freq and speedstep_high_freq, with * at least one border included. */ -static int speedstep_verify (struct cpufreq_policy *policy) +static int speedstep_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } @@ -344,7 +343,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) return -EIO; dprintk("currently at %s speed setting - %i MHz\n", - (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", + (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) + ? "low" : "high", (speed / 1000)); /* cpuinfo and default policy values */ @@ -352,9 +352,9 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); if (result) - return (result); + return result; - cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); + cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); return 0; } @@ -366,7 +366,7 @@ static int speedstep_cpu_exit(struct cpufreq_policy *policy) return 0; } -static struct freq_attr* speedstep_attr[] = { +static struct freq_attr *speedstep_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -396,13 +396,15 @@ static int __init speedstep_init(void) /* detect processor */ speedstep_processor = speedstep_detect_processor(); if (!speedstep_processor) { - dprintk("Intel(R) SpeedStep(TM) capable processor not found\n"); + dprintk("Intel(R) SpeedStep(TM) capable processor " + "not found\n"); return -ENODEV; } /* detect chipset */ if (!speedstep_detect_chipset()) { - dprintk("Intel(R) SpeedStep(TM) for this chipset not (yet) available.\n"); + dprintk("Intel(R) SpeedStep(TM) for this chipset not " + "(yet) available.\n"); return -ENODEV; } @@ -431,9 +433,11 @@ static void __exit speedstep_exit(void) } -MODULE_AUTHOR ("Dave Jones , Dominik Brodowski "); -MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dave Jones , " + "Dominik Brodowski "); +MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets " + "with ICH-M southbridges."); +MODULE_LICENSE("GPL"); module_init(speedstep_init); module_exit(speedstep_exit); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index cdac7d62369..55c696daa05 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -18,10 +18,13 @@ #include #include "speedstep-lib.h" -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-lib", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-lib", msg) + +#define PFX "speedstep-lib: " #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK -static int relaxed_check = 0; +static int relaxed_check; #else #define relaxed_check 0 #endif @@ -30,14 +33,14 @@ static int relaxed_check = 0; * GET PROCESSOR CORE SPEED IN KHZ * *********************************************************************/ -static unsigned int pentium3_get_frequency (unsigned int processor) +static unsigned int pentium3_get_frequency(unsigned int processor) { - /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ + /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ struct { unsigned int ratio; /* Frequency Multiplier (x10) */ u8 bitmap; /* power on configuration bits [27, 25:22] (in MSR 0x2a) */ - } msr_decode_mult [] = { + } msr_decode_mult[] = { { 30, 0x01 }, { 35, 0x05 }, { 40, 0x02 }, @@ -52,7 +55,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor) { 85, 0x26 }, { 90, 0x20 }, { 100, 0x2b }, - { 0, 0xff } /* error or unknown value */ + { 0, 0xff } /* error or unknown value */ }; /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ @@ -60,7 +63,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor) unsigned int value; /* Front Side Bus speed in MHz */ u8 bitmap; /* power on configuration bits [18: 19] (in MSR 0x2a) */ - } msr_decode_fsb [] = { + } msr_decode_fsb[] = { { 66, 0x0 }, { 100, 0x2 }, { 133, 0x1 }, @@ -85,7 +88,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor) } /* decode the multiplier */ - if (processor == SPEEDSTEP_PROCESSOR_PIII_C_EARLY) { + if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { dprintk("workaround for early PIIIs\n"); msr_lo &= 0x03c00000; } else @@ -97,9 +100,10 @@ static unsigned int pentium3_get_frequency (unsigned int processor) j++; } - dprintk("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); + dprintk("speed is %u\n", + (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); - return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100); + return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; } @@ -112,20 +116,23 @@ static unsigned int pentiumM_get_frequency(void) /* see table B-2 of 24547212.pdf */ if (msr_lo & 0x00040000) { - printk(KERN_DEBUG "speedstep-lib: PM - invalid FSB: 0x%x 0x%x\n", msr_lo, msr_tmp); + printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n", + msr_lo, msr_tmp); return 0; } msr_tmp = (msr_lo >> 22) & 0x1f; - dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000)); + dprintk("bits 22-26 are 0x%x, speed is %u\n", + msr_tmp, (msr_tmp * 100 * 1000)); - return (msr_tmp * 100 * 1000); + return msr_tmp * 100 * 1000; } static unsigned int pentium_core_get_frequency(void) { u32 fsb = 0; u32 msr_lo, msr_tmp; + int ret; rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp); /* see table B-2 of 25366920.pdf */ @@ -153,12 +160,15 @@ static unsigned int pentium_core_get_frequency(void) } rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", + msr_lo, msr_tmp); msr_tmp = (msr_lo >> 22) & 0x1f; - dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb)); + dprintk("bits 22-26 are 0x%x, speed is %u\n", + msr_tmp, (msr_tmp * fsb)); - return (msr_tmp * fsb); + ret = (msr_tmp * fsb); + return ret; } @@ -167,6 +177,7 @@ static unsigned int pentium4_get_frequency(void) struct cpuinfo_x86 *c = &boot_cpu_data; u32 msr_lo, msr_hi, mult; unsigned int fsb = 0; + unsigned int ret; rdmsr(0x2c, msr_lo, msr_hi); @@ -195,44 +206,47 @@ static unsigned int pentium4_get_frequency(void) } if (!fsb) - printk(KERN_DEBUG "speedstep-lib: couldn't detect FSB speed. Please send an e-mail to \n"); + printk(KERN_DEBUG PFX "couldn't detect FSB speed. " + "Please send an e-mail to \n"); /* Multiplier. */ mult = msr_lo >> 24; - dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult)); + dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", + fsb, mult, (fsb * mult)); - return (fsb * mult); + ret = (fsb * mult); + return ret; } -unsigned int speedstep_get_processor_frequency(unsigned int processor) +unsigned int speedstep_get_frequency(unsigned int processor) { switch (processor) { - case SPEEDSTEP_PROCESSOR_PCORE: + case SPEEDSTEP_CPU_PCORE: return pentium_core_get_frequency(); - case SPEEDSTEP_PROCESSOR_PM: + case SPEEDSTEP_CPU_PM: return pentiumM_get_frequency(); - case SPEEDSTEP_PROCESSOR_P4D: - case SPEEDSTEP_PROCESSOR_P4M: + case SPEEDSTEP_CPU_P4D: + case SPEEDSTEP_CPU_P4M: return pentium4_get_frequency(); - case SPEEDSTEP_PROCESSOR_PIII_T: - case SPEEDSTEP_PROCESSOR_PIII_C: - case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: + case SPEEDSTEP_CPU_PIII_T: + case SPEEDSTEP_CPU_PIII_C: + case SPEEDSTEP_CPU_PIII_C_EARLY: return pentium3_get_frequency(processor); default: return 0; }; return 0; } -EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency); +EXPORT_SYMBOL_GPL(speedstep_get_frequency); /********************************************************************* * DETECT SPEEDSTEP-CAPABLE PROCESSOR * *********************************************************************/ -unsigned int speedstep_detect_processor (void) +unsigned int speedstep_detect_processor(void) { struct cpuinfo_x86 *c = &cpu_data(0); u32 ebx, msr_lo, msr_hi; @@ -261,7 +275,7 @@ unsigned int speedstep_detect_processor (void) * sample has ebx = 0x0f, production has 0x0e. */ if ((ebx == 0x0e) || (ebx == 0x0f)) - return SPEEDSTEP_PROCESSOR_P4M; + return SPEEDSTEP_CPU_P4M; break; case 7: /* @@ -272,7 +286,7 @@ unsigned int speedstep_detect_processor (void) * samples are only of B-stepping... */ if (ebx == 0x0e) - return SPEEDSTEP_PROCESSOR_P4M; + return SPEEDSTEP_CPU_P4M; break; case 9: /* @@ -288,10 +302,13 @@ unsigned int speedstep_detect_processor (void) * M-P4-Ms may have either ebx=0xe or 0xf [see above] * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf] * also, M-P4M HTs have ebx=0x8, too - * For now, they are distinguished by the model_id string + * For now, they are distinguished by the model_id + * string */ - if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL)) - return SPEEDSTEP_PROCESSOR_P4M; + if ((ebx == 0x0e) || + (strstr(c->x86_model_id, + "Mobile Intel(R) Pentium(R) 4") != NULL)) + return SPEEDSTEP_CPU_P4M; break; default: break; @@ -301,7 +318,8 @@ unsigned int speedstep_detect_processor (void) switch (c->x86_model) { case 0x0B: /* Intel PIII [Tualatin] */ - /* cpuid_ebx(1) is 0x04 for desktop PIII, 0x06 for mobile PIII-M */ + /* cpuid_ebx(1) is 0x04 for desktop PIII, + * 0x06 for mobile PIII-M */ ebx = cpuid_ebx(0x00000001); dprintk("ebx is %x\n", ebx); @@ -313,14 +331,15 @@ unsigned int speedstep_detect_processor (void) /* So far all PIII-M processors support SpeedStep. See * Intel's 24540640.pdf of June 2003 */ - return SPEEDSTEP_PROCESSOR_PIII_T; + return SPEEDSTEP_CPU_PIII_T; case 0x08: /* Intel PIII [Coppermine] */ /* all mobile PIII Coppermines have FSB 100 MHz * ==> sort out a few desktop PIIIs. */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); - dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); + dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", + msr_lo, msr_hi); msr_lo &= 0x00c0000; if (msr_lo != 0x0080000) return 0; @@ -332,13 +351,15 @@ unsigned int speedstep_detect_processor (void) * bit 56 or 57 is set */ rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); - dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); - if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { + dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", + msr_lo, msr_hi); + if ((msr_hi & (1<<18)) && + (relaxed_check ? 1 : (msr_hi & (3<<24)))) { if (c->x86_mask == 0x01) { dprintk("early PIII version\n"); - return SPEEDSTEP_PROCESSOR_PIII_C_EARLY; + return SPEEDSTEP_CPU_PIII_C_EARLY; } else - return SPEEDSTEP_PROCESSOR_PIII_C; + return SPEEDSTEP_CPU_PIII_C; } default: @@ -369,7 +390,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, dprintk("trying to determine both speeds\n"); /* get current speed */ - prev_speed = speedstep_get_processor_frequency(processor); + prev_speed = speedstep_get_frequency(processor); if (!prev_speed) return -EIO; @@ -379,7 +400,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, /* switch to low state */ set_state(SPEEDSTEP_LOW); - *low_speed = speedstep_get_processor_frequency(processor); + *low_speed = speedstep_get_frequency(processor); if (!*low_speed) { ret = -EIO; goto out; @@ -398,7 +419,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, if (transition_latency) do_gettimeofday(&tv2); - *high_speed = speedstep_get_processor_frequency(processor); + *high_speed = speedstep_get_frequency(processor); if (!*high_speed) { ret = -EIO; goto out; @@ -426,9 +447,12 @@ unsigned int speedstep_get_freqs(unsigned int processor, /* check if the latency measurement is too high or too low * and set it to a safe value (500uSec) in that case */ - if (*transition_latency > 10000000 || *transition_latency < 50000) { - printk (KERN_WARNING "speedstep: frequency transition measured seems out of " - "range (%u nSec), falling back to a safe one of %u nSec.\n", + if (*transition_latency > 10000000 || + *transition_latency < 50000) { + printk(KERN_WARNING PFX "frequency transition " + "measured seems out of range (%u " + "nSec), falling back to a safe one of" + "%u nSec.\n", *transition_latency, 500000); *transition_latency = 500000; } @@ -436,15 +460,16 @@ unsigned int speedstep_get_freqs(unsigned int processor, out: local_irq_restore(flags); - return (ret); + return ret; } EXPORT_SYMBOL_GPL(speedstep_get_freqs); #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK module_param(relaxed_check, int, 0444); -MODULE_PARM_DESC(relaxed_check, "Don't do all checks for speedstep capability."); +MODULE_PARM_DESC(relaxed_check, + "Don't do all checks for speedstep capability."); #endif -MODULE_AUTHOR ("Dominik Brodowski "); -MODULE_DESCRIPTION ("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dominik Brodowski "); +MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); +MODULE_LICENSE("GPL"); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h index b11bcc608ca..2b6c04e5a30 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h @@ -12,17 +12,17 @@ /* processors */ -#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */ -#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */ -#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */ -#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */ +#define SPEEDSTEP_CPU_PIII_C_EARLY 0x00000001 /* Coppermine core */ +#define SPEEDSTEP_CPU_PIII_C 0x00000002 /* Coppermine core */ +#define SPEEDSTEP_CPU_PIII_T 0x00000003 /* Tualatin core */ +#define SPEEDSTEP_CPU_P4M 0x00000004 /* P4-M */ /* the following processors are not speedstep-capable and are not auto-detected * in speedstep_detect_processor(). However, their speed can be detected using - * the speedstep_get_processor_frequency() call. */ -#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */ -#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */ -#define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */ + * the speedstep_get_frequency() call. */ +#define SPEEDSTEP_CPU_PM 0xFFFFFF03 /* Pentium M */ +#define SPEEDSTEP_CPU_P4D 0xFFFFFF04 /* desktop P4 */ +#define SPEEDSTEP_CPU_PCORE 0xFFFFFF05 /* Core */ /* speedstep states -- only two of them */ @@ -34,7 +34,7 @@ extern unsigned int speedstep_detect_processor (void); /* detect the current speed (in khz) of the processor */ -extern unsigned int speedstep_get_processor_frequency(unsigned int processor); +extern unsigned int speedstep_get_frequency(unsigned int processor); /* detect the low and high speeds of the processor. The callback diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index 8a85c93bd62..befea088e4f 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c @@ -19,8 +19,8 @@ #include #include #include +#include #include -#include #include "speedstep-lib.h" @@ -30,12 +30,12 @@ * If user gives it, these are used. * */ -static int smi_port = 0; -static int smi_cmd = 0; -static unsigned int smi_sig = 0; +static int smi_port; +static int smi_cmd; +static unsigned int smi_sig; /* info about the processor */ -static unsigned int speedstep_processor = 0; +static unsigned int speedstep_processor; /* * There are only two frequency states for each processor. Values @@ -56,12 +56,13 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { * of DMA activity going on? */ #define SMI_TRIES 5 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-smi", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-smi", msg) /** * speedstep_smi_ownership */ -static int speedstep_smi_ownership (void) +static int speedstep_smi_ownership(void) { u32 command, result, magic, dummy; u32 function = GET_SPEEDSTEP_OWNER; @@ -70,16 +71,18 @@ static int speedstep_smi_ownership (void) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); magic = virt_to_phys(magic_data); - dprintk("trying to obtain ownership with command %x at port %x\n", command, smi_port); + dprintk("trying to obtain ownership with command %x at port %x\n", + command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp\n" - : "=D" (result), "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy), - "=S" (dummy) + : "=D" (result), + "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy), + "=S" (dummy) : "a" (command), "b" (function), "c" (0), "d" (smi_port), - "D" (0), "S" (magic) + "D" (0), "S" (magic) : "memory" ); @@ -97,10 +100,10 @@ static int speedstep_smi_ownership (void) * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing * shows that the latter occurs if !(ist_info.event & 0xFFFF). */ -static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) +static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) { u32 command, result = 0, edi, high_mhz, low_mhz, dummy; - u32 state=0; + u32 state = 0; u32 function = GET_SPEEDSTEP_FREQS; if (!(ist_info.event & 0xFFFF)) { @@ -110,17 +113,25 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to determine frequencies with command %x at port %x\n", command, smi_port); + dprintk("trying to determine frequencies with command %x at port %x\n", + command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp" - : "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi), "=S" (dummy) - : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0) + : "=a" (result), + "=b" (high_mhz), + "=c" (low_mhz), + "=d" (state), "=D" (edi), "=S" (dummy) + : "a" (command), + "b" (function), + "c" (state), + "d" (smi_port), "S" (0), "D" (0) ); - dprintk("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz); + dprintk("result %x, low_freq %u, high_freq %u\n", + result, low_mhz, high_mhz); /* abort if results are obviously incorrect... */ if ((high_mhz + low_mhz) < 600) @@ -137,26 +148,30 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * */ -static int speedstep_get_state (void) +static int speedstep_get_state(void) { - u32 function=GET_SPEEDSTEP_STATE; + u32 function = GET_SPEEDSTEP_STATE; u32 result, state, edi, command, dummy; command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to determine current setting with command %x at port %x\n", command, smi_port); + dprintk("trying to determine current setting with command %x " + "at port %x\n", command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp\n" - : "=a" (result), "=b" (state), "=D" (edi), "=c" (dummy), "=d" (dummy), "=S" (dummy) - : "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0), "D" (0) + : "=a" (result), + "=b" (state), "=D" (edi), + "=c" (dummy), "=d" (dummy), "=S" (dummy) + : "a" (command), "b" (function), "c" (0), + "d" (smi_port), "S" (0), "D" (0) ); dprintk("state is %x, result is %x\n", state, result); - return (state & 1); + return state & 1; } @@ -165,11 +180,11 @@ static int speedstep_get_state (void) * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * */ -static void speedstep_set_state (unsigned int state) +static void speedstep_set_state(unsigned int state) { unsigned int result = 0, command, new_state, dummy; unsigned long flags; - unsigned int function=SET_SPEEDSTEP_STATE; + unsigned int function = SET_SPEEDSTEP_STATE; unsigned int retry = 0; if (state > 0x1) @@ -180,11 +195,14 @@ static void speedstep_set_state (unsigned int state) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to set frequency to state %u with command %x at port %x\n", state, command, smi_port); + dprintk("trying to set frequency to state %u " + "with command %x at port %x\n", + state, command, smi_port); do { if (retry) { - dprintk("retry %u, previous result %u, waiting...\n", retry, result); + dprintk("retry %u, previous result %u, waiting...\n", + retry, result); mdelay(retry * 50); } retry++; @@ -192,20 +210,26 @@ static void speedstep_set_state (unsigned int state) "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp" - : "=b" (new_state), "=D" (result), "=c" (dummy), "=a" (dummy), - "=d" (dummy), "=S" (dummy) - : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0) + : "=b" (new_state), "=D" (result), + "=c" (dummy), "=a" (dummy), + "=d" (dummy), "=S" (dummy) + : "a" (command), "b" (function), "c" (state), + "d" (smi_port), "S" (0), "D" (0) ); } while ((new_state != state) && (retry <= SMI_TRIES)); /* enable IRQs */ local_irq_restore(flags); - if (new_state == state) { - dprintk("change to %u MHz succeeded after %u tries with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result); - } else { - printk(KERN_ERR "cpufreq: change to state %u failed with new_state %u and result %u\n", state, new_state, result); - } + if (new_state == state) + dprintk("change to %u MHz succeeded after %u tries " + "with result %u\n", + (speedstep_freqs[new_state].frequency / 1000), + retry, result); + else + printk(KERN_ERR "cpufreq: change to state %u " + "failed with new_state %u and result %u\n", + state, new_state, result); return; } @@ -219,13 +243,14 @@ static void speedstep_set_state (unsigned int state) * * Sets a new CPUFreq policy/freq. */ -static int speedstep_target (struct cpufreq_policy *policy, +static int speedstep_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; struct cpufreq_freqs freqs; - if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], + target_freq, relation, &newstate)) return -EINVAL; freqs.old = speedstep_freqs[speedstep_get_state()].frequency; @@ -250,7 +275,7 @@ static int speedstep_target (struct cpufreq_policy *policy, * Limit must be within speedstep_low_freq and speedstep_high_freq, with * at least one border included. */ -static int speedstep_verify (struct cpufreq_policy *policy) +static int speedstep_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } @@ -259,7 +284,8 @@ static int speedstep_verify (struct cpufreq_policy *policy) static int speedstep_cpu_init(struct cpufreq_policy *policy) { int result; - unsigned int speed,state; + unsigned int speed, state; + unsigned int *low, *high; /* capability check */ if (policy->cpu != 0) @@ -272,19 +298,23 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) } /* detect low and high frequency */ - result = speedstep_smi_get_freqs(&speedstep_freqs[SPEEDSTEP_LOW].frequency, - &speedstep_freqs[SPEEDSTEP_HIGH].frequency); + low = &speedstep_freqs[SPEEDSTEP_LOW].frequency; + high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency; + + result = speedstep_smi_get_freqs(low, high); if (result) { - /* fall back to speedstep_lib.c dection mechanism: try both states out */ - dprintk("could not detect low and high frequencies by SMI call.\n"); + /* fall back to speedstep_lib.c dection mechanism: + * try both states out */ + dprintk("could not detect low and high frequencies " + "by SMI call.\n"); result = speedstep_get_freqs(speedstep_processor, - &speedstep_freqs[SPEEDSTEP_LOW].frequency, - &speedstep_freqs[SPEEDSTEP_HIGH].frequency, + low, high, NULL, &speedstep_set_state); if (result) { - dprintk("could not detect two different speeds -- aborting.\n"); + dprintk("could not detect two different speeds" + " -- aborting.\n"); return result; } else dprintk("workaround worked.\n"); @@ -295,7 +325,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) speed = speedstep_freqs[state].frequency; dprintk("currently at %s speed setting - %i MHz\n", - (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", + (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) + ? "low" : "high", (speed / 1000)); /* cpuinfo and default policy values */ @@ -304,7 +335,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); if (result) - return (result); + return result; cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); @@ -321,7 +352,7 @@ static unsigned int speedstep_get(unsigned int cpu) { if (cpu) return -ENODEV; - return speedstep_get_processor_frequency(speedstep_processor); + return speedstep_get_frequency(speedstep_processor); } @@ -335,7 +366,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) return result; } -static struct freq_attr* speedstep_attr[] = { +static struct freq_attr *speedstep_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -364,21 +395,23 @@ static int __init speedstep_init(void) speedstep_processor = speedstep_detect_processor(); switch (speedstep_processor) { - case SPEEDSTEP_PROCESSOR_PIII_T: - case SPEEDSTEP_PROCESSOR_PIII_C: - case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: + case SPEEDSTEP_CPU_PIII_T: + case SPEEDSTEP_CPU_PIII_C: + case SPEEDSTEP_CPU_PIII_C_EARLY: break; default: speedstep_processor = 0; } if (!speedstep_processor) { - dprintk ("No supported Intel CPU detected.\n"); + dprintk("No supported Intel CPU detected.\n"); return -ENODEV; } - dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n", - ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); + dprintk("signature:0x%.8lx, command:0x%.8lx, " + "event:0x%.8lx, perf_level:0x%.8lx.\n", + ist_info.signature, ist_info.command, + ist_info.event, ist_info.perf_level); /* Error if no IST-SMI BIOS or no PARM sig= 'ISGE' aka 'Intel Speedstep Gate E' */ @@ -416,17 +449,20 @@ static void __exit speedstep_exit(void) cpufreq_unregister_driver(&speedstep_driver); } -module_param(smi_port, int, 0444); -module_param(smi_cmd, int, 0444); -module_param(smi_sig, uint, 0444); +module_param(smi_port, int, 0444); +module_param(smi_cmd, int, 0444); +module_param(smi_sig, uint, 0444); -MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value -- Intel's default setting is 0xb2"); -MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value -- Intel's default setting is 0x82"); -MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the SMI interface."); +MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value " + "-- Intel's default setting is 0xb2"); +MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value " + "-- Intel's default setting is 0x82"); +MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the " + "SMI interface."); -MODULE_AUTHOR ("Hiroshi Miura"); -MODULE_DESCRIPTION ("Speedstep driver for IST applet SMI interface."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Hiroshi Miura"); +MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface."); +MODULE_LICENSE("GPL"); module_init(speedstep_init); module_exit(speedstep_exit); -- cgit v1.2.3 From b9e7638a301b1245d4675087a05fa90fb4fa1845 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sun, 18 Jan 2009 00:32:26 -0500 Subject: [CPUFREQ] checkpatch cleanups for powernow-k7 The asm/timer.h warning can be ignored, it's needed for recalibrate_cpu_khz() Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k7.c | 237 +++++++++++++++++------------- 1 file changed, 137 insertions(+), 100 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 9cd73d15743..3c28ccd4974 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -6,10 +6,12 @@ * Licensed under the terms of the GNU GPL License version 2. * Based upon datasheets & sample CPUs kindly provided by AMD. * - * Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt. - * - We cli/sti on stepping A0 CPUs around the FID/VID transition. - * Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect. - * - We disable half multipliers if ACPI is used on A0 stepping CPUs. + * Errata 5: + * CPU may fail to execute a FID/VID change in presence of interrupt. + * - We cli/sti on stepping A0 CPUs around the FID/VID transition. + * Errata 15: + * CPU with half frequency multipliers may hang upon wakeup from disconnect. + * - We disable half multipliers if ACPI is used on A0 stepping CPUs. */ #include @@ -20,11 +22,11 @@ #include #include #include +#include +#include +#include /* Needed for recalibrate_cpu_khz() */ #include -#include -#include -#include #include #ifdef CONFIG_X86_POWERNOW_K7_ACPI @@ -58,9 +60,9 @@ struct pst_s { union powernow_acpi_control_t { struct { unsigned long fid:5, - vid:5, - sgtc:20, - res1:2; + vid:5, + sgtc:20, + res1:2; } bits; unsigned long val; }; @@ -101,7 +103,8 @@ static unsigned int fsb; static unsigned int latency; static char have_a0; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k7", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "powernow-k7", msg) static int check_fsb(unsigned int fsbspeed) { @@ -109,7 +112,7 @@ static int check_fsb(unsigned int fsbspeed) unsigned int f = fsb / 1000; delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed; - return (delta < 5); + return delta < 5; } static int check_powernow(void) @@ -117,24 +120,26 @@ static int check_powernow(void) struct cpuinfo_x86 *c = &cpu_data(0); unsigned int maxei, eax, ebx, ecx, edx; - if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) { + if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 6)) { #ifdef MODULE - printk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n"); + printk(KERN_INFO PFX "This module only works with " + "AMD K7 CPUs\n"); #endif return 0; } /* Get maximum capabilities */ - maxei = cpuid_eax (0x80000000); + maxei = cpuid_eax(0x80000000); if (maxei < 0x80000007) { /* Any powernow info ? */ #ifdef MODULE - printk (KERN_INFO PFX "No powernow capabilities detected\n"); + printk(KERN_INFO PFX "No powernow capabilities detected\n"); #endif return 0; } if ((c->x86_model == 6) && (c->x86_mask == 0)) { - printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n"); + printk(KERN_INFO PFX "K7 660[A0] core detected, " + "enabling errata workarounds\n"); have_a0 = 1; } @@ -144,37 +149,42 @@ static int check_powernow(void) if (!(edx & (1 << 1 | 1 << 2))) return 0; - printk (KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); + printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); if (edx & 1 << 1) { - printk ("frequency"); - can_scale_bus=1; + printk("frequency"); + can_scale_bus = 1; } if ((edx & (1 << 1 | 1 << 2)) == 0x6) - printk (" and "); + printk(" and "); if (edx & 1 << 2) { - printk ("voltage"); - can_scale_vid=1; + printk("voltage"); + can_scale_vid = 1; } - printk (".\n"); + printk(".\n"); return 1; } +static void invalidate_entry(unsigned int entry) +{ + powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; +} -static int get_ranges (unsigned char *pst) +static int get_ranges(unsigned char *pst) { unsigned int j; unsigned int speed; u8 fid, vid; - powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); + powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * + (number_scales + 1)), GFP_KERNEL); if (!powernow_table) return -ENOMEM; - for (j=0 ; j < number_scales; j++) { + for (j = 0 ; j < number_scales; j++) { fid = *pst++; powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10; @@ -182,10 +192,10 @@ static int get_ranges (unsigned char *pst) speed = powernow_table[j].frequency; - if ((fid_codes[fid] % 10)==5) { + if ((fid_codes[fid] % 10) == 5) { #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (have_a0 == 1) - powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID; + invalidate_entry(j); #endif } @@ -197,7 +207,7 @@ static int get_ranges (unsigned char *pst) vid = *pst++; powernow_table[j].index |= (vid << 8); /* upper 8 bits */ - dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " + dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000, vid, mobile_vid_table[vid]/1000, @@ -214,13 +224,13 @@ static void change_FID(int fid) { union msr_fidvidctl fidvidctl; - rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.FID != fid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.FID = fid; fidvidctl.bits.VIDC = 0; fidvidctl.bits.FIDC = 1; - wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); } } @@ -229,18 +239,18 @@ static void change_VID(int vid) { union msr_fidvidctl fidvidctl; - rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.VID != vid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.VID = vid; fidvidctl.bits.FIDC = 0; fidvidctl.bits.VIDC = 1; - wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); } } -static void change_speed (unsigned int index) +static void change_speed(unsigned int index) { u8 fid, vid; struct cpufreq_freqs freqs; @@ -257,7 +267,7 @@ static void change_speed (unsigned int index) freqs.cpu = 0; - rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; freqs.old = fsb * fid_codes[cfid] / 10; @@ -321,12 +331,14 @@ static int powernow_acpi_init(void) goto err1; } - if (acpi_processor_perf->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (acpi_processor_perf->control_register.space_id != + ACPI_ADR_SPACE_FIXED_HARDWARE) { retval = -ENODEV; goto err2; } - if (acpi_processor_perf->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (acpi_processor_perf->status_register.space_id != + ACPI_ADR_SPACE_FIXED_HARDWARE) { retval = -ENODEV; goto err2; } @@ -338,7 +350,8 @@ static int powernow_acpi_init(void) goto err2; } - powernow_table = kzalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL); + powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * + (number_scales + 1)), GFP_KERNEL); if (!powernow_table) { retval = -ENOMEM; goto err2; @@ -352,7 +365,7 @@ static int powernow_acpi_init(void) unsigned int speed, speed_mhz; pc.val = (unsigned long) state->control; - dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", + dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", i, (u32) state->core_frequency, (u32) state->power, @@ -381,12 +394,12 @@ static int powernow_acpi_init(void) if (speed % 1000 > 0) speed_mhz++; - if ((fid_codes[fid] % 10)==5) { + if ((fid_codes[fid] % 10) == 5) { if (have_a0 == 1) - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + invalidate_entry(i); } - dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " + dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed_mhz, vid, mobile_vid_table[vid]/1000, @@ -422,7 +435,8 @@ err1: err05: kfree(acpi_processor_perf); err0: - printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); + printk(KERN_WARNING PFX "ACPI perflib can not be used on " + "this platform\n"); acpi_processor_perf = NULL; return retval; } @@ -435,7 +449,14 @@ static int powernow_acpi_init(void) } #endif -static int powernow_decode_bios (int maxfid, int startvid) +static void print_pst_entry(struct pst_s *pst, unsigned int j) +{ + dprintk("PST:%d (@%p)\n", j, pst); + dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", + pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); +} + +static int powernow_decode_bios(int maxfid, int startvid) { struct psb_s *psb; struct pst_s *pst; @@ -446,61 +467,67 @@ static int powernow_decode_bios (int maxfid, int startvid) etuple = cpuid_eax(0x80000001); - for (i=0xC0000; i < 0xffff0 ; i+=16) { + for (i = 0xC0000; i < 0xffff0 ; i += 16) { p = phys_to_virt(i); - if (memcmp(p, "AMDK7PNOW!", 10) == 0){ - dprintk ("Found PSB header at %p\n", p); + if (memcmp(p, "AMDK7PNOW!", 10) == 0) { + dprintk("Found PSB header at %p\n", p); psb = (struct psb_s *) p; - dprintk ("Table version: 0x%x\n", psb->tableversion); + dprintk("Table version: 0x%x\n", psb->tableversion); if (psb->tableversion != 0x12) { - printk (KERN_INFO PFX "Sorry, only v1.2 tables supported right now\n"); + printk(KERN_INFO PFX "Sorry, only v1.2 tables" + " supported right now\n"); return -ENODEV; } - dprintk ("Flags: 0x%x\n", psb->flags); - if ((psb->flags & 1)==0) { - dprintk ("Mobile voltage regulator\n"); - } else { - dprintk ("Desktop voltage regulator\n"); - } + dprintk("Flags: 0x%x\n", psb->flags); + if ((psb->flags & 1) == 0) + dprintk("Mobile voltage regulator\n"); + else + dprintk("Desktop voltage regulator\n"); latency = psb->settlingtime; if (latency < 100) { - printk(KERN_INFO PFX "BIOS set settling time to %d microseconds. " - "Should be at least 100. Correcting.\n", latency); + printk(KERN_INFO PFX "BIOS set settling time " + "to %d microseconds. " + "Should be at least 100. " + "Correcting.\n", latency); latency = 100; } - dprintk ("Settling Time: %d microseconds.\n", psb->settlingtime); - dprintk ("Has %d PST tables. (Only dumping ones relevant to this CPU).\n", psb->numpst); + dprintk("Settling Time: %d microseconds.\n", + psb->settlingtime); + dprintk("Has %d PST tables. (Only dumping ones " + "relevant to this CPU).\n", + psb->numpst); - p += sizeof (struct psb_s); + p += sizeof(struct psb_s); pst = (struct pst_s *) p; - for (j=0; jnumpst; j++) { + for (j = 0; j < psb->numpst; j++) { pst = (struct pst_s *) p; number_scales = pst->numpstates; - if ((etuple == pst->cpuid) && check_fsb(pst->fsbspeed) && - (maxfid==pst->maxfid) && (startvid==pst->startvid)) - { - dprintk ("PST:%d (@%p)\n", j, pst); - dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", - pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); - - ret = get_ranges ((char *) pst + sizeof (struct pst_s)); + if ((etuple == pst->cpuid) && + check_fsb(pst->fsbspeed) && + (maxfid == pst->maxfid) && + (startvid == pst->startvid)) { + print_pst_entry(pst, j); + p = (char *)pst + sizeof(struct pst_s); + ret = get_ranges(p); return ret; } else { unsigned int k; - p = (char *) pst + sizeof (struct pst_s); - for (k=0; kident); - printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n"); - printk(KERN_WARNING "cpufreq scaling has been disabled as a result of this.\n"); + printk(KERN_WARNING PFX + "%s laptop with broken PST tables in BIOS detected.\n", + d->ident); + printk(KERN_WARNING PFX + "You need to downgrade to 3A21 (09/09/2002), or try a newer " + "BIOS than 3A71 (01/20/2003)\n"); + printk(KERN_WARNING PFX + "cpufreq scaling has been disabled as a result of this.\n"); return 0; } @@ -598,7 +631,7 @@ static struct dmi_system_id __initdata powernow_dmi_table[] = { { } }; -static int __init powernow_cpu_init (struct cpufreq_policy *policy) +static int __init powernow_cpu_init(struct cpufreq_policy *policy) { union msr_fidvidstatus fidvidstatus; int result; @@ -606,7 +639,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) if (policy->cpu != 0) return -ENODEV; - rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); recalibrate_cpu_khz(); @@ -618,19 +651,21 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) dprintk("FSB: %3dMHz\n", fsb/1000); if (dmi_check_system(powernow_dmi_table) || acpi_force) { - printk (KERN_INFO PFX "PSB/PST known to be broken. Trying ACPI instead\n"); + printk(KERN_INFO PFX "PSB/PST known to be broken. " + "Trying ACPI instead\n"); result = powernow_acpi_init(); } else { - result = powernow_decode_bios(fidvidstatus.bits.MFID, fidvidstatus.bits.SVID); + result = powernow_decode_bios(fidvidstatus.bits.MFID, + fidvidstatus.bits.SVID); if (result) { - printk (KERN_INFO PFX "Trying ACPI perflib\n"); + printk(KERN_INFO PFX "Trying ACPI perflib\n"); maximum_speed = 0; minimum_speed = -1; latency = 0; result = powernow_acpi_init(); if (result) { - printk (KERN_INFO PFX "ACPI and legacy methods failed\n"); - printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n"); + printk(KERN_INFO PFX + "ACPI and legacy methods failed\n"); } } else { /* SGTC use the bus clock as timer */ @@ -642,10 +677,11 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) if (result) return result; - printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", + printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", minimum_speed/1000, maximum_speed/1000); - policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency); + policy->cpuinfo.transition_latency = + cpufreq_scale(2000000UL, fsb, latency); policy->cur = powernow_get(0); @@ -654,7 +690,8 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) return cpufreq_frequency_table_cpuinfo(policy, powernow_table); } -static int powernow_cpu_exit (struct cpufreq_policy *policy) { +static int powernow_cpu_exit(struct cpufreq_policy *policy) +{ cpufreq_frequency_table_put_attr(policy->cpu); #ifdef CONFIG_X86_POWERNOW_K7_ACPI @@ -669,7 +706,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) { return 0; } -static struct freq_attr* powernow_table_attr[] = { +static struct freq_attr *powernow_table_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -685,15 +722,15 @@ static struct cpufreq_driver powernow_driver = { .attr = powernow_table_attr, }; -static int __init powernow_init (void) +static int __init powernow_init(void) { - if (check_powernow()==0) + if (check_powernow() == 0) return -ENODEV; return cpufreq_register_driver(&powernow_driver); } -static void __exit powernow_exit (void) +static void __exit powernow_exit(void) { cpufreq_unregister_driver(&powernow_driver); } @@ -701,9 +738,9 @@ static void __exit powernow_exit (void) module_param(acpi_force, int, 0444); MODULE_PARM_DESC(acpi_force, "Force ACPI to be used."); -MODULE_AUTHOR ("Dave Jones "); -MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dave Jones "); +MODULE_DESCRIPTION("Powernow driver for AMD K7 processors."); +MODULE_LICENSE("GPL"); late_initcall(powernow_init); module_exit(powernow_exit); -- cgit v1.2.3 From 0e64a0c982c06a6b8f5e2a7f29eb108fdf257b2f Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 4 Feb 2009 14:37:50 -0500 Subject: [CPUFREQ] checkpatch cleanups for powernow-k8 This driver has so many long function names, and deep nested if's The remaining warnings will need some code restructuring to clean up. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 351 +++++++++++++++++++----------- 1 file changed, 226 insertions(+), 125 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6428aa17b40..4dd7e3bdee2 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -33,10 +33,10 @@ #include #include #include /* for current / set_cpus_allowed() */ +#include +#include #include -#include -#include #ifdef CONFIG_X86_POWERNOW_K8_ACPI #include @@ -71,7 +71,8 @@ static u32 find_khz_freq_from_fid(u32 fid) return 1000 * find_freq_from_fid(fid); } -static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 pstate) +static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, + u32 pstate) { return data[pstate].frequency; } @@ -186,7 +187,9 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) return 1; } - lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; + lo = fid; + lo |= (data->currvid << MSR_C_LO_VID_SHIFT); + lo |= MSR_C_LO_INIT_FID_VID; dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", fid, lo, data->plllock * PLL_LOCK_CONVERSION); @@ -194,7 +197,9 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) do { wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); if (i++ > 100) { - printk(KERN_ERR PFX "Hardware error - pending bit very stuck - no further pstate changes possible\n"); + printk(KERN_ERR PFX + "Hardware error - pending bit very stuck - " + "no further pstate changes possible\n"); return 1; } } while (query_current_values_with_pending_wait(data)); @@ -202,14 +207,16 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) count_off_irt(data); if (savevid != data->currvid) { - printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n", - savevid, data->currvid); + printk(KERN_ERR PFX + "vid change on fid trans, old 0x%x, new 0x%x\n", + savevid, data->currvid); return 1; } if (fid != data->currfid) { - printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid, - data->currfid); + printk(KERN_ERR PFX + "fid trans failed, fid 0x%x, curr 0x%x\n", fid, + data->currfid); return 1; } @@ -228,7 +235,9 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) return 1; } - lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; + lo = data->currfid; + lo |= (vid << MSR_C_LO_VID_SHIFT); + lo |= MSR_C_LO_INIT_FID_VID; dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", vid, lo, STOP_GRANT_5NS); @@ -236,20 +245,24 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) do { wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); if (i++ > 100) { - printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); + printk(KERN_ERR PFX "internal error - pending bit " + "very stuck - no further pstate " + "changes possible\n"); return 1; } } while (query_current_values_with_pending_wait(data)); if (savefid != data->currfid) { - printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n", + printk(KERN_ERR PFX "fid changed on vid trans, old " + "0x%x new 0x%x\n", savefid, data->currfid); return 1; } if (vid != data->currvid) { - printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid, - data->currvid); + printk(KERN_ERR PFX "vid trans failed, vid 0x%x, " + "curr 0x%x\n", + vid, data->currvid); return 1; } @@ -261,7 +274,8 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) * Decreasing vid codes represent increasing voltages: * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off. */ -static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step) +static int decrease_vid_code_by_step(struct powernow_k8_data *data, + u32 reqvid, u32 step) { if ((data->currvid - reqvid) > step) reqvid = data->currvid - step; @@ -283,7 +297,8 @@ static int transition_pstate(struct powernow_k8_data *data, u32 pstate) } /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ -static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid) +static int transition_fid_vid(struct powernow_k8_data *data, + u32 reqfid, u32 reqvid) { if (core_voltage_pre_transition(data, reqvid)) return 1; @@ -298,7 +313,8 @@ static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 req return 1; if ((reqfid != data->currfid) || (reqvid != data->currvid)) { - printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", + printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, " + "curr 0x%x 0x%x\n", smp_processor_id(), reqfid, reqvid, data->currfid, data->currvid); return 1; @@ -311,13 +327,15 @@ static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 req } /* Phase 1 - core voltage transition ... setup voltage */ -static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid) +static int core_voltage_pre_transition(struct powernow_k8_data *data, + u32 reqvid) { u32 rvosteps = data->rvo; u32 savefid = data->currfid; u32 maxvid, lo; - dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n", + dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " + "reqvid 0x%x, rvo 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqvid, data->rvo); @@ -340,7 +358,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid } else { dprintk("ph1: changing vid for rvo, req 0x%x\n", data->currvid - 1); - if (decrease_vid_code_by_step(data, data->currvid - 1, 1)) + if (decrease_vid_code_by_step(data, data->currvid-1, 1)) return 1; rvosteps--; } @@ -350,7 +368,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid return 1; if (savefid != data->currfid) { - printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid); + printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", + data->currfid); return 1; } @@ -363,20 +382,24 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid /* Phase 2 - core frequency transition */ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) { - u32 vcoreqfid, vcocurrfid, vcofiddiff, fid_interval, savevid = data->currvid; + u32 vcoreqfid, vcocurrfid, vcofiddiff; + u32 fid_interval, savevid = data->currvid; - if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { - printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n", - reqfid, data->currfid); + if ((reqfid < HI_FID_TABLE_BOTTOM) && + (data->currfid < HI_FID_TABLE_BOTTOM)) { + printk(KERN_ERR PFX "ph2: illegal lo-lo transition " + "0x%x 0x%x\n", reqfid, data->currfid); return 1; } if (data->currfid == reqfid) { - printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid); + printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", + data->currfid); return 0; } - dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", + dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " + "reqfid 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqfid); @@ -390,14 +413,14 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) if (reqfid > data->currfid) { if (data->currfid > LO_FID_TABLE_TOP) { - if (write_new_fid(data, data->currfid + fid_interval)) { + if (write_new_fid(data, + data->currfid + fid_interval)) return 1; - } } else { if (write_new_fid - (data, 2 + convert_fid_to_vco_fid(data->currfid))) { + (data, + 2 + convert_fid_to_vco_fid(data->currfid))) return 1; - } } } else { if (write_new_fid(data, data->currfid - fid_interval)) @@ -417,7 +440,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) if (data->currfid != reqfid) { printk(KERN_ERR PFX - "ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", + "ph2: mismatch, failed fid transition, " + "curr 0x%x, req 0x%x\n", data->currfid, reqfid); return 1; } @@ -435,7 +459,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) } /* Phase 3 - core voltage transition flow ... jump to the final vid. */ -static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid) +static int core_voltage_post_transition(struct powernow_k8_data *data, + u32 reqvid) { u32 savefid = data->currfid; u32 savereqvid = reqvid; @@ -457,7 +482,8 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi if (data->currvid != reqvid) { printk(KERN_ERR PFX - "ph3: failed vid transition\n, req 0x%x, curr 0x%x", + "ph3: failed vid transition\n, " + "req 0x%x, curr 0x%x", reqvid, data->currvid); return 1; } @@ -508,7 +534,8 @@ static int check_supported_cpu(unsigned int cpu) if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { - printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); + printk(KERN_INFO PFX + "Processor cpuid %x not supported\n", eax); goto out; } @@ -520,8 +547,10 @@ static int check_supported_cpu(unsigned int cpu) } cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); - if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { - printk(KERN_INFO PFX "Power state transitions not supported\n"); + if ((edx & P_STATE_TRANSITION_CAPABLE) + != P_STATE_TRANSITION_CAPABLE) { + printk(KERN_INFO PFX + "Power state transitions not supported\n"); goto out; } } else { /* must be a HW Pstate capable processor */ @@ -539,7 +568,8 @@ out: return rc; } -static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) +static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, + u8 maxvid) { unsigned int j; u8 lastfid = 0xff; @@ -550,12 +580,14 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 j, pst[j].vid); return -EINVAL; } - if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */ + if (pst[j].vid < data->rvo) { + /* vid + rvo >= 0 */ printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate" " %d\n", j); return -ENODEV; } - if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */ + if (pst[j].vid < maxvid + data->rvo) { + /* vid + rvo >= maxvid */ printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate" " %d\n", j); return -ENODEV; @@ -579,23 +611,31 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 return -EINVAL; } if (lastfid > LO_FID_TABLE_TOP) - printk(KERN_INFO FW_BUG PFX "first fid not from lo freq table\n"); + printk(KERN_INFO FW_BUG PFX + "first fid not from lo freq table\n"); return 0; } +static void invalidate_entry(struct powernow_k8_data *data, unsigned int entry) +{ + data->powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; +} + static void print_basics(struct powernow_k8_data *data) { int j; for (j = 0; j < data->numps; j++) { - if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) { + if (data->powernow_table[j].frequency != + CPUFREQ_ENTRY_INVALID) { if (cpu_family == CPU_HW_PSTATE) { - printk(KERN_INFO PFX " %d : pstate %d (%d MHz)\n", - j, + printk(KERN_INFO PFX + " %d : pstate %d (%d MHz)\n", j, data->powernow_table[j].index, data->powernow_table[j].frequency/1000); } else { - printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n", + printk(KERN_INFO PFX + " %d : fid 0x%x (%d MHz), vid 0x%x\n", j, data->powernow_table[j].index & 0xff, data->powernow_table[j].frequency/1000, @@ -604,20 +644,25 @@ static void print_basics(struct powernow_k8_data *data) } } if (data->batps) - printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps); + printk(KERN_INFO PFX "Only %d pstates on battery\n", + data->batps); } -static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) +static int fill_powernow_table(struct powernow_k8_data *data, + struct pst_s *pst, u8 maxvid) { struct cpufreq_frequency_table *powernow_table; unsigned int j; - if (data->batps) { /* use ACPI support to get full speed on mains power */ - printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps); + if (data->batps) { + /* use ACPI support to get full speed on mains power */ + printk(KERN_WARNING PFX + "Only %d pstates usable (use ACPI driver for full " + "range\n", data->batps); data->numps = data->batps; } - for ( j=1; jnumps; j++ ) { + for (j = 1; j < data->numps; j++) { if (pst[j-1].fid >= pst[j].fid) { printk(KERN_ERR PFX "PST out of sequence\n"); return -EINVAL; @@ -640,9 +685,11 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, } for (j = 0; j < data->numps; j++) { + int freq; powernow_table[j].index = pst[j].fid; /* lower 8 bits */ powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */ - powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid); + freq = find_khz_freq_from_fid(pst[j].fid); + powernow_table[j].frequency = freq; } powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; powernow_table[data->numps].index = 0; @@ -658,7 +705,8 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, print_basics(data); for (j = 0; j < data->numps; j++) - if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid)) + if ((pst[j].fid == data->currfid) && + (pst[j].vid == data->currvid)) return 0; dprintk("currfid/vid do not match PST, ignoring\n"); @@ -698,7 +746,8 @@ static int find_psb_table(struct powernow_k8_data *data) } data->vstable = psb->vstable; - dprintk("voltage stabilization time: %d(*20us)\n", data->vstable); + dprintk("voltage stabilization time: %d(*20us)\n", + data->vstable); dprintk("flags2: 0x%x\n", psb->flags2); data->rvo = psb->flags2 & 3; @@ -713,11 +762,12 @@ static int find_psb_table(struct powernow_k8_data *data) dprintk("numpst: 0x%x\n", psb->num_tables); cpst = psb->num_tables; - if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){ + if ((psb->cpuid == 0x00000fc0) || + (psb->cpuid == 0x00000fe0)) { thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); - if ((thiscpuid == 0x00000fc0) || (thiscpuid == 0x00000fe0) ) { + if ((thiscpuid == 0x00000fc0) || + (thiscpuid == 0x00000fe0)) cpst = 1; - } } if (cpst != 1) { printk(KERN_ERR FW_BUG PFX "numpst must be 1\n"); @@ -732,7 +782,8 @@ static int find_psb_table(struct powernow_k8_data *data) data->numps = psb->numps; dprintk("numpstates: 0x%x\n", data->numps); - return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid); + return fill_powernow_table(data, + (struct pst_s *)(psb+1), maxvid); } /* * If you see this message, complain to BIOS manufacturer. If @@ -750,23 +801,27 @@ static int find_psb_table(struct powernow_k8_data *data) } #ifdef CONFIG_X86_POWERNOW_K8_ACPI -static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) +static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, + unsigned int index) { + acpi_integer control; + if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) return; - data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; - data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; - data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; - data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; - data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); - data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; -} + control = data->acpi_data.states[index].control; data->irt = (control + >> IRT_SHIFT) & IRT_MASK; data->rvo = (control >> + RVO_SHIFT) & RVO_MASK; data->exttype = (control + >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; + data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; data->vidmvs = 1 + << ((control >> MVS_SHIFT) & MVS_MASK); data->vstable = + (control >> VST_SHIFT) & VST_MASK; } static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { struct cpufreq_frequency_table *powernow_table; int ret_val = -ENODEV; + acpi_integer space_id; if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { dprintk("register performance failed: bad ACPI data\n"); @@ -779,11 +834,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) goto err_out; } - if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || - (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { + space_id = data->acpi_data.control_register.space_id; + if ((space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || + (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { dprintk("Invalid control/status registers (%x - %x)\n", data->acpi_data.control_register.space_id, - data->acpi_data.status_register.space_id); + space_id); goto err_out; } @@ -802,7 +858,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) if (ret_val) goto err_out_mem; - powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; + powernow_table[data->acpi_data.state_count].frequency = + CPUFREQ_TABLE_END; powernow_table[data->acpi_data.state_count].index = 0; data->powernow_table = powernow_table; @@ -830,13 +887,15 @@ err_out_mem: err_out: acpi_processor_unregister_performance(&data->acpi_data, data->cpu); - /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ + /* data->acpi_data.state_count informs us at ->exit() + * whether ACPI was used */ data->acpi_data.state_count = 0; return ret_val; } -static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) +static int fill_powernow_table_pstate(struct powernow_k8_data *data, + struct cpufreq_frequency_table *powernow_table) { int i; u32 hi = 0, lo = 0; @@ -848,84 +907,101 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf index = data->acpi_data.states[i].control & HW_PSTATE_MASK; if (index > data->max_hw_pstate) { - printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); - printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + printk(KERN_ERR PFX "invalid pstate %d - " + "bad value %d.\n", i, index); + printk(KERN_ERR PFX "Please report to BIOS " + "manufacturer\n"); + invalidate_entry(data, i); continue; } rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); if (!(hi & HW_PSTATE_VALID_MASK)) { dprintk("invalid pstate %d, ignoring\n", index); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + invalidate_entry(data, i); continue; } powernow_table[i].index = index; - powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; + powernow_table[i].frequency = + data->acpi_data.states[i].core_frequency * 1000; } return 0; } -static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) +static int fill_powernow_table_fidvid(struct powernow_k8_data *data, + struct cpufreq_frequency_table *powernow_table) { int i; int cntlofreq = 0; + for (i = 0; i < data->acpi_data.state_count; i++) { u32 fid; u32 vid; + u32 freq, index; + acpi_integer status, control; if (data->exttype) { - fid = data->acpi_data.states[i].status & EXT_FID_MASK; - vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; + status = data->acpi_data.states[i].status; + fid = status & EXT_FID_MASK; + vid = (status >> VID_SHIFT) & EXT_VID_MASK; } else { - fid = data->acpi_data.states[i].control & FID_MASK; - vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; + control = data->acpi_data.states[i].control; + fid = control & FID_MASK; + vid = (control >> VID_SHIFT) & VID_MASK; } dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); - powernow_table[i].index = fid; /* lower 8 bits */ - powernow_table[i].index |= (vid << 8); /* upper 8 bits */ - powernow_table[i].frequency = find_khz_freq_from_fid(fid); + index = fid | (vid<<8); + powernow_table[i].index = index; + + freq = find_khz_freq_from_fid(fid); + powernow_table[i].frequency = freq; /* verify frequency is OK */ - if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) || - (powernow_table[i].frequency < (MIN_FREQ * 1000))) { - dprintk("invalid freq %u kHz, ignoring\n", powernow_table[i].frequency); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { + dprintk("invalid freq %u kHz, ignoring\n", freq); + invalidate_entry(data, i); continue; } - /* verify voltage is OK - BIOSs are using "off" to indicate invalid */ + /* verify voltage is OK - + * BIOSs are using "off" to indicate invalid */ if (vid == VID_OFF) { dprintk("invalid vid %u, ignoring\n", vid); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + invalidate_entry(data, i); continue; } /* verify only 1 entry from the lo frequency table */ if (fid < HI_FID_TABLE_BOTTOM) { if (cntlofreq) { - /* if both entries are the same, ignore this one ... */ - if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) || - (powernow_table[i].index != powernow_table[cntlofreq].index)) { - printk(KERN_ERR PFX "Too many lo freq table entries\n"); + /* if both entries are the same, + * ignore this one ... */ + if ((freq != powernow_table[cntlofreq].frequency) || + (index != powernow_table[cntlofreq].index)) { + printk(KERN_ERR PFX + "Too many lo freq table " + "entries\n"); return 1; } - dprintk("double low frequency table entry, ignoring it.\n"); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + dprintk("double low frequency table entry, " + "ignoring it.\n"); + invalidate_entry(data, i); continue; } else cntlofreq = i; } - if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { - printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", - powernow_table[i].frequency, - (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { + printk(KERN_INFO PFX "invalid freq entries " + "%u kHz vs. %u kHz\n", freq, + (unsigned int) + (data->acpi_data.states[i].core_frequency + * 1000)); + invalidate_entry(data, i); continue; } } @@ -935,7 +1011,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { if (data->acpi_data.state_count) - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); + acpi_processor_unregister_performance(&data->acpi_data, + data->cpu); free_cpumask_var(data->acpi_data.shared_cpu_map); } @@ -954,14 +1031,25 @@ static int get_transition_latency(struct powernow_k8_data *data) } #else -static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } -static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } -static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } +static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) +{ + return -ENODEV; +} +static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) +{ + return; +} +static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, + unsigned int index) +{ + return; +} static int get_transition_latency(struct powernow_k8_data *data) { return 0; } #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ /* Take a frequency, and issue the fid/vid transition command */ -static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned int index) +static int transition_frequency_fidvid(struct powernow_k8_data *data, + unsigned int index) { u32 fid = 0; u32 vid = 0; @@ -989,7 +1077,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i return 0; } - if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { + if ((fid < HI_FID_TABLE_BOTTOM) && + (data->currfid < HI_FID_TABLE_BOTTOM)) { printk(KERN_ERR PFX "ignoring illegal change in lo freq table-%x to 0x%x\n", data->currfid, fid); @@ -1017,7 +1106,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i } /* Take a frequency, and issue the hardware pstate transition command */ -static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index) +static int transition_frequency_pstate(struct powernow_k8_data *data, + unsigned int index) { u32 pstate = 0; int res, i; @@ -1029,7 +1119,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i pstate = index & HW_PSTATE_MASK; if (pstate > data->max_hw_pstate) return 0; - freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); + freqs.old = find_khz_freq_from_pstate(data->powernow_table, + data->currpstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); for_each_cpu_mask_nr(i, *(data->available_cores)) { @@ -1048,7 +1139,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i } /* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) { cpumask_t oldmask; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); @@ -1087,14 +1179,18 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi dprintk("targ: curr fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); - if ((checkvid != data->currvid) || (checkfid != data->currfid)) { + if ((checkvid != data->currvid) || + (checkfid != data->currfid)) { printk(KERN_INFO PFX - "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", - checkfid, data->currfid, checkvid, data->currvid); + "error - out of sync, fix 0x%x 0x%x, " + "vid 0x%x 0x%x\n", + checkfid, data->currfid, + checkvid, data->currvid); } } - if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) + if (cpufreq_frequency_table_target(pol, data->powernow_table, + targfreq, relation, &newstate)) goto err_out; mutex_lock(&fidvid_mutex); @@ -1114,7 +1210,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi mutex_unlock(&fidvid_mutex); if (cpu_family == CPU_HW_PSTATE) - pol->cur = find_khz_freq_from_pstate(data->powernow_table, newstate); + pol->cur = find_khz_freq_from_pstate(data->powernow_table, + newstate); else pol->cur = find_khz_freq_from_fid(data->currfid); ret = 0; @@ -1164,10 +1261,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) */ if (num_online_cpus() != 1) { #ifndef CONFIG_ACPI_PROCESSOR - printk(KERN_ERR PFX "ACPI Processor support is required " - "for SMP systems but is absent. Please load the " - "ACPI Processor module before starting this " - "driver.\n"); + printk(KERN_ERR PFX + "ACPI Processor support is required for " + "SMP systems but is absent. Please load the " + "ACPI Processor module before starting this " + "driver.\n"); #else printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide" " ACPI _PSS objects in a way that Linux " @@ -1228,7 +1326,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) data->available_cores = pol->cpus; if (cpu_family == CPU_HW_PSTATE) - pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); + pol->cur = find_khz_freq_from_pstate(data->powernow_table, + data->currpstate); else pol->cur = find_khz_freq_from_fid(data->currfid); dprintk("policy current frequency %d kHz\n", pol->cur); @@ -1245,7 +1344,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); if (cpu_family == CPU_HW_PSTATE) - dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate); + dprintk("cpu_init done, current pstate 0x%x\n", + data->currpstate); else dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); @@ -1262,7 +1362,7 @@ err_out: return -ENODEV; } -static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) +static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); @@ -1279,7 +1379,7 @@ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) return 0; } -static unsigned int powernowk8_get (unsigned int cpu) +static unsigned int powernowk8_get(unsigned int cpu) { struct powernow_k8_data *data; cpumask_t oldmask = current->cpus_allowed; @@ -1315,7 +1415,7 @@ out: return khz; } -static struct freq_attr* powernow_k8_attr[] = { +static struct freq_attr *powernow_k8_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -1360,7 +1460,8 @@ static void __exit powernowk8_exit(void) cpufreq_unregister_driver(&cpufreq_amd64_driver); } -MODULE_AUTHOR("Paul Devriendt and Mark Langsdorf "); +MODULE_AUTHOR("Paul Devriendt and " + "Mark Langsdorf "); MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 57f4fa699195b761cbea90db5e38b4bc15610c7c Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Wed, 4 Feb 2009 01:17:45 +0100 Subject: [CPUFREQ] powernow-k8: Always compile powernow-k8 driver with ACPI support powernow-k8 driver should always try to get cpufreq info from ACPI. Otherwise it will not be able to detect the transition latency correctly which results in ondemand governor taking a wrong sampling rate which will then result in sever performance loss. Let the user not shoot himself in the foot and always compile in ACPI support for powernow-k8. This also fixes a wrong message if ACPI_PROCESSOR is compiled as a module and #ifndef CONFIG_ACPI_PROCESSOR path is chosen. Signed-off-by: Thomas Renninger Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/Kconfig | 19 ++----------------- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 28 ---------------------------- arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 5 +---- 3 files changed, 3 insertions(+), 49 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index 65792c2cc46..52c83987547 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig @@ -87,30 +87,15 @@ config X86_POWERNOW_K7_ACPI config X86_POWERNOW_K8 tristate "AMD Opteron/Athlon64 PowerNow!" select CPU_FREQ_TABLE + depends on ACPI && ACPI_PROCESSOR help - This adds the CPUFreq driver for mobile AMD Opteron/Athlon64 processors. + This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors. To compile this driver as a module, choose M here: the module will be called powernow-k8. For details, take a look at . - If in doubt, say N. - -config X86_POWERNOW_K8_ACPI - bool - prompt "ACPI Support" if X86_32 - depends on ACPI && X86_POWERNOW_K8 && ACPI_PROCESSOR - depends on !(X86_POWERNOW_K8 = y && ACPI_PROCESSOR = m) - default y - help - This provides access to the K8s Processor Performance States via ACPI. - This driver is probably required for CPUFreq to work with multi-socket and - SMP systems. It is not required on at least some single-socket yet - multi-core systems, even if SMP is enabled. - - It is safe to say Y here. - config X86_GX_SUSPMOD tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" depends on X86_32 && PCI diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 4dd7e3bdee2..acc06b03194 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -38,11 +38,9 @@ #include -#ifdef CONFIG_X86_POWERNOW_K8_ACPI #include #include #include -#endif #define PFX "powernow-k8: " #define VERSION "version 2.20.00" @@ -800,7 +798,6 @@ static int find_psb_table(struct powernow_k8_data *data) return -ENODEV; } -#ifdef CONFIG_X86_POWERNOW_K8_ACPI static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { @@ -1030,23 +1027,6 @@ static int get_transition_latency(struct powernow_k8_data *data) return 1000 * max_latency; } -#else -static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) -{ - return -ENODEV; -} -static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) -{ - return; -} -static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, - unsigned int index) -{ - return; -} -static int get_transition_latency(struct powernow_k8_data *data) { return 0; } -#endif /* CONFIG_X86_POWERNOW_K8_ACPI */ - /* Take a frequency, and issue the fid/vid transition command */ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned int index) @@ -1260,19 +1240,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { -#ifndef CONFIG_ACPI_PROCESSOR - printk(KERN_ERR PFX - "ACPI Processor support is required for " - "SMP systems but is absent. Please load the " - "ACPI Processor module before starting this " - "driver.\n"); -#else printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide" " ACPI _PSS objects in a way that Linux " "understands. Please report this to the Linux " "ACPI maintainers and complain to your BIOS " "vendor.\n"); -#endif kfree(data); return -ENODEV; } diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 8ecc75b6c7c..6c6698feade 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -45,11 +45,10 @@ struct powernow_k8_data { * frequency is in kHz */ struct cpufreq_frequency_table *powernow_table; -#ifdef CONFIG_X86_POWERNOW_K8_ACPI /* the acpi table needs to be kept. it's only available if ACPI was * used to determine valid frequency/vid/fid states */ struct acpi_processor_performance acpi_data; -#endif + /* we need to keep track of associated cores, but let cpufreq * handle hotplug events - so just point at cpufreq pol->cpus * structure */ @@ -222,10 +221,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); -#ifdef CONFIG_X86_POWERNOW_K8_ACPI static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); -#endif #ifdef CONFIG_SMP static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) -- cgit v1.2.3 From 79cc56af9fdbeaa91f50289b932d0959b41f9467 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Wed, 4 Feb 2009 11:56:11 +0100 Subject: [CPUFREQ] powernow-k8: Only print error message once, not per core. This is the typical message you get if you plug in a CPU which is newer than your BIOS. It's annoying seeing this message for each core. Signed-off-by: Thomas Renninger Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index acc06b03194..c44853fc827 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -794,7 +794,7 @@ static int find_psb_table(struct powernow_k8_data *data) * BIOS and Kernel Developer's Guide, which is available on * www.amd.com */ - printk(KERN_ERR PFX "BIOS error - no PSB or ACPI _PSS objects\n"); + printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n"); return -ENODEV; } @@ -1218,6 +1218,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) struct powernow_k8_data *data; cpumask_t oldmask; int rc; + static int print_once; if (!cpu_online(pol->cpu)) return -ENODEV; @@ -1240,11 +1241,19 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { - printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide" - " ACPI _PSS objects in a way that Linux " - "understands. Please report this to the Linux " - "ACPI maintainers and complain to your BIOS " - "vendor.\n"); + /* + * Replace this one with print_once as soon as such a + * thing gets introduced + */ + if (!print_once) { + WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS " + "does not provide ACPI _PSS objects " + "in a way that Linux understands. " + "Please report this to the Linux ACPI" + " maintainers and complain to your " + "BIOS vendor.\n"); + print_once++; + } kfree(data); return -ENODEV; } -- cgit v1.2.3 From 3a58df35a64a1e0ac32c30ea629a513dec2fe711 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:36:14 -0500 Subject: [CPUFREQ] checkpatch cleanups for acpi-cpufreq Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 36 +++++++++++++++--------------- 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c..3babe1f1e91 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -1,5 +1,5 @@ /* - * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $) + * acpi-cpufreq.c - ACPI Processor P-States Driver * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh @@ -36,16 +36,18 @@ #include #include +#include +#include +#include + #include -#include #include #include #include -#include -#include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "acpi-cpufreq", msg) MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); @@ -95,7 +97,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) perf = data->acpi_data; - for (i=0; istate_count; i++) { + for (i = 0; i < perf->state_count; i++) { if (value == perf->states[i].status) return data->freq_table[i].frequency; } @@ -110,7 +112,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) msr &= INTEL_MSR_RANGE; perf = data->acpi_data; - for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { if (msr == perf->states[data->freq_table[i].index].status) return data->freq_table[i].frequency; } @@ -138,15 +140,13 @@ struct io_addr { u8 bit_width; }; -typedef union { - struct msr_addr msr; - struct io_addr io; -} drv_addr_union; - struct drv_cmd { unsigned int type; const struct cpumask *mask; - drv_addr_union addr; + union { + struct msr_addr msr; + struct io_addr io; + } addr; u32 val; }; @@ -369,7 +369,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, unsigned int cur_freq; unsigned int i; - for (i=0; i<100; i++) { + for (i = 0; i < 100; i++) { cur_freq = extract_freq(get_cur_val(mask), data); if (cur_freq == freq) return 1; @@ -494,7 +494,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) unsigned long freq; unsigned long freqn = perf->states[0].core_frequency * 1000; - for (i=0; i<(perf->state_count-1); i++) { + for (i = 0; i < (perf->state_count-1); i++) { freq = freqn; freqn = perf->states[i+1].core_frequency * 1000; if ((2 * cpu_khz) > (freqn + freq)) { @@ -673,7 +673,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) /* detect transition latency */ policy->cpuinfo.transition_latency = 0; - for (i=0; istate_count; i++) { + for (i = 0; i < perf->state_count; i++) { if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) policy->cpuinfo.transition_latency = @@ -682,8 +682,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->max_freq = perf->states[0].core_frequency * 1000; /* table init */ - for (i=0; istate_count; i++) { - if (i>0 && perf->states[i].core_frequency >= + for (i = 0; i < perf->state_count; i++) { + if (i > 0 && perf->states[i].core_frequency >= data->freq_table[valid_states-1].frequency / 1000) continue; -- cgit v1.2.3 From 91420220d278584693d11a800b78fdc20e8fe10e Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 4 Feb 2009 15:28:54 -0500 Subject: [CPUFREQ] Use swap() in longhaul.c Remove hand-coded implementation of swap() Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/longhaul.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index dc03622a83a..f1c51aea064 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -511,13 +511,10 @@ static int __init longhaul_get_ranges(void) } } if (min_i != j) { - unsigned int temp; - temp = longhaul_table[j].frequency; - longhaul_table[j].frequency = longhaul_table[min_i].frequency; - longhaul_table[min_i].frequency = temp; - temp = longhaul_table[j].index; - longhaul_table[j].index = longhaul_table[min_i].index; - longhaul_table[min_i].index = temp; + swap(longhaul_table[j].frequency, + longhaul_table[min_i].frequency); + swap(longhaul_table[j].index, + longhaul_table[min_i].index); } } -- cgit v1.2.3 From de3ed81d746d7cfc22a0c645244ed9fa71e4a833 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Wed, 18 Feb 2009 18:28:22 +0000 Subject: [CPUFREQ] Change link order of x86 cpufreq modules Change the link order of the cpufreq modules to ensure that they're probed in the preferred order when statically linked in. Signed-off-by: Matthew Garrett Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile index 560f7760dae..509296df294 100644 --- a/arch/x86/kernel/cpu/cpufreq/Makefile +++ b/arch/x86/kernel/cpu/cpufreq/Makefile @@ -1,6 +1,11 @@ +# Link order matters. K8 is preferred to ACPI because of firmware bugs in early +# K8 systems. ACPI is preferred to all other hardware-specific drivers. +# speedstep-* is preferred over p4-clockmod. + +obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o +obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o -obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o obj-$(CONFIG_X86_LONGHAUL) += longhaul.o obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o @@ -10,7 +15,6 @@ obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o -obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o -- cgit v1.2.3 From 0cb8bc256093e716d2a0a4a721f36c625a3f7634 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 18 Feb 2009 14:11:00 -0500 Subject: [CPUFREQ] powernow-k8: Use a common exit path. a0abd520fd69295f4a3735e29a9448a32e101d47 introduced a slew of extra kfree/return -ENODEV pairs. This replaces them all with gotos. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c44853fc827..a15ac94e0b9 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1254,21 +1254,18 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) "BIOS vendor.\n"); print_once++; } - kfree(data); - return -ENODEV; + goto err_out; } if (pol->cpu != 0) { printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " "CPU other than CPU0. Complain to your BIOS " "vendor.\n"); - kfree(data); - return -ENODEV; + goto err_out; } rc = find_psb_table(data); - if (rc) { - kfree(data); - return -ENODEV; - } + if (rc) + goto err_out; + /* Take a crude guess here. * That guess was in microseconds, so multiply with 1000 */ pol->cpuinfo.transition_latency = ( @@ -1283,16 +1280,16 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) if (smp_processor_id() != pol->cpu) { printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out; + goto err_out_unmask; } if (pending_bit_stuck()) { printk(KERN_ERR PFX "failing init, change pending bit set\n"); - goto err_out; + goto err_out_unmask; } if (query_current_values_with_pending_wait(data)) - goto err_out; + goto err_out_unmask; if (cpu_family == CPU_OPTERON) fidvid_msr_init(); @@ -1335,10 +1332,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) return 0; -err_out: +err_out_unmask: set_cpus_allowed_ptr(current, &oldmask); powernow_k8_cpu_exit_acpi(data); +err_out: kfree(data); return -ENODEV; } -- cgit v1.2.3 From 199785eac892a1fa1b71cc22bec58e8b156d9311 Mon Sep 17 00:00:00 2001 From: Matthias-Christian Ott Date: Fri, 20 Feb 2009 20:52:17 -0500 Subject: [CPUFREQ] p4-clockmod reports wrong frequency. http://bugzilla.kernel.org/show_bug.cgi?id=10968 [ Updated for current tree, and fixed compile failure when p4-clockmod was built modular -- davej] From: Matthias-Christian Ott Signed-off-by: Dominik Brodowski Signed-off-by: Dave Jones --- arch/x86/include/asm/timer.h | 2 +- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 7 ++++++ arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | 34 +++++++++++++++++------------ arch/x86/kernel/tsc.c | 3 --- 4 files changed, 28 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 2bb6a835c45..4f5c2472485 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -11,8 +11,8 @@ unsigned long native_calibrate_tsc(void); #ifdef CONFIG_X86_32 extern int timer_ack; +#endif extern int recalibrate_cpu_khz(void); -#endif /* CONFIG_X86_32 */ extern int no_timer_check; diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 46a2a7a5314..1778402305e 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -31,6 +31,7 @@ #include #include +#include #include "speedstep-lib.h" @@ -224,6 +225,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) dprintk("has errata -- disabling low frequencies\n"); } + if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && + c->x86_model < 2) { + /* switch to maximum frequency and measure result */ + cpufreq_p4_setdc(policy->cpu, DC_DISABLE); + recalibrate_cpu_khz(); + } /* get max frequency */ stock_freq = cpufreq_p4_get_frequency(c); if (!stock_freq) diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index 55c696daa05..2e3c6862657 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -16,6 +16,7 @@ #include #include +#include #include "speedstep-lib.h" #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ @@ -178,6 +179,15 @@ static unsigned int pentium4_get_frequency(void) u32 msr_lo, msr_hi, mult; unsigned int fsb = 0; unsigned int ret; + u8 fsb_code; + + /* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency + * to System Bus Frequency Ratio Field in the Processor Frequency + * Configuration Register of the MSR. Therefore the current + * frequency cannot be calculated and has to be measured. + */ + if (c->x86_model < 2) + return cpu_khz; rdmsr(0x2c, msr_lo, msr_hi); @@ -188,21 +198,17 @@ static unsigned int pentium4_get_frequency(void) * revision #12 in Table B-1: MSRs in the Pentium 4 and * Intel Xeon Processors, on page B-4 and B-5. */ - if (c->x86_model < 2) + fsb_code = (msr_lo >> 16) & 0x7; + switch (fsb_code) { + case 0: fsb = 100 * 1000; - else { - u8 fsb_code = (msr_lo >> 16) & 0x7; - switch (fsb_code) { - case 0: - fsb = 100 * 1000; - break; - case 1: - fsb = 13333 * 10; - break; - case 2: - fsb = 200 * 1000; - break; - } + break; + case 1: + fsb = 13333 * 10; + break; + case 2: + fsb = 200 * 1000; + break; } if (!fsb) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 599e5816863..5ad22f8f5f3 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -523,8 +523,6 @@ unsigned long native_calibrate_tsc(void) return tsc_pit_min; } -#ifdef CONFIG_X86_32 -/* Only called from the Powernow K7 cpu freq driver */ int recalibrate_cpu_khz(void) { #ifndef CONFIG_SMP @@ -546,7 +544,6 @@ int recalibrate_cpu_khz(void) EXPORT_SYMBOL(recalibrate_cpu_khz); -#endif /* CONFIG_X86_32 */ /* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) -- cgit v1.2.3 From eb3092cee79e4efa5d3e9c81c7e6ca90318cebb8 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Sat, 21 Feb 2009 01:58:47 +0000 Subject: [CPUFREQ] Make cpufreq-nforce2 less obnoxious Not owning an nforce2 is a sign of good taste, not an error. Signed-off-by: Matthew Garrett Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 99262906838..733093d6043 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -424,7 +424,7 @@ static int __init nforce2_init(void) /* detect chipset */ if (nforce2_detect_chipset()) { - printk(KERN_ERR PFX "No nForce2 chipset.\n"); + printk(KERN_INFO PFX "No nForce2 chipset.\n"); return -ENODEV; } -- cgit v1.2.3 From 3255aa2eb636a508fc82a73fabbb8aaf2ff23c0f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 25 Feb 2009 08:21:52 +0100 Subject: x86, mm: pass in 'total' to __copy_from_user_*nocache() Impact: cleanup, enable future change Add a 'total bytes copied' parameter to __copy_from_user_*nocache(), and update all the callsites. The parameter is not used yet - architecture code can use it to more intelligently decide whether the copy should be cached or non-temporal. Cc: Salman Qazi Cc: Nick Piggin Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess_32.h | 4 ++-- arch/x86/include/asm/uaccess_64.h | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 5e06259e90e..a0ba6138697 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) } static __always_inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n) + const void __user *from, unsigned long n, unsigned long total) { might_fault(); if (__builtin_constant_p(n)) { @@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, - unsigned long n) + unsigned long n, unsigned long total) { return __copy_from_user_ll_nocache_nozero(to, from, n); } diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 987a2c10fe2..a748253db0c 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -189,7 +189,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); static inline int __copy_from_user_nocache(void *dst, const void __user *src, - unsigned size) + unsigned size, unsigned long total) { might_sleep(); /* @@ -205,8 +205,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, } static inline int __copy_from_user_inatomic_nocache(void *dst, - const void __user *src, - unsigned size) + const void __user *src, unsigned size, unsigned total) { if (likely(size >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 0); -- cgit v1.2.3 From 95108fa34a83ffd97e0af959e4b28d7c62008781 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 25 Feb 2009 08:22:20 +0100 Subject: x86: usercopy: check for total size when deciding non-temporal cutoff Impact: make more types of copies non-temporal This change makes the following simple fix: 30d697f: x86: fix performance regression in write() syscall A bit more sophisticated: we check the 'total' number of bytes written to decide whether to copy in a cached or a non-temporal way. This will for example cause the tail (modulo 4096 bytes) chunk of a large write() to be non-temporal too - not just the page-sized chunks. Cc: Salman Qazi Cc: Nick Piggin Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index a748253db0c..dcaa0404cf7 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -198,7 +198,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, * non-temporal stores here. Smaller writes get handled * via regular __copy_from_user(): */ - if (likely(size >= PAGE_SIZE)) + if (likely(total >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 1); else return __copy_from_user(dst, src, size); @@ -207,7 +207,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size, unsigned total) { - if (likely(size >= PAGE_SIZE)) + if (likely(total >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 0); else return __copy_from_user_inatomic(dst, src, size); -- cgit v1.2.3 From 40823f737e5bd186a1156fb1c28f360260e1e084 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 25 Feb 2009 11:26:18 +0100 Subject: x86: memtest: reuse test patterns when memtest parameter exceeds number of available patterns Impact: fix unexpected behaviour when pattern number is out of range Current implementation provides 4 patterns for memtest. The code doesn't check whether the memtest parameter value exceeds the maximum pattern number. Instead the memtest code pretends to test with non-existing patterns, e.g. when booting with memtest=10 I've observed the following ... early_memtest: pattern num 10 0000001000 - 0000006000 pattern 0 ... 0000001000 - 0000006000 pattern 1 ... 0000001000 - 0000006000 pattern 2 ... 0000001000 - 0000006000 pattern 3 ... 0000001000 - 0000006000 pattern 4 ... 0000001000 - 0000006000 pattern 5 ... 0000001000 - 0000006000 pattern 6 ... 0000001000 - 0000006000 pattern 7 ... 0000001000 - 0000006000 pattern 8 ... 0000001000 - 0000006000 pattern 9 ... But in fact Linux didn't test anything for patterns > 4 as the default case in memtest() is to leave the function. I suggest to use the memtest parameter as the number of tests to be performed and to re-iterate over all existing patterns. Signed-off-by: Andreas Herrmann Signed-off-by: Ingo Molnar --- arch/x86/mm/memtest.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 9cab18b0b85..00b8bdc64c3 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c @@ -9,6 +9,8 @@ #include +#define _MAX_MEM_PATTERNS 4 + static void __init memtest(unsigned long start_phys, unsigned long size, unsigned pattern) { @@ -21,6 +23,8 @@ static void __init memtest(unsigned long start_phys, unsigned long size, unsigned long count; unsigned long incr; + pattern = pattern % _MAX_MEM_PATTERNS; + switch (pattern) { case 0: val = 0UL; @@ -110,8 +114,9 @@ void __init early_memtest(unsigned long start, unsigned long end) t_size = end - t_start; printk(KERN_CONT "\n %010llx - %010llx pattern %d", - (unsigned long long)t_start, - (unsigned long long)t_start + t_size, pattern); + (unsigned long long)t_start, + (unsigned long long)t_start + t_size, + pattern % _MAX_MEM_PATTERNS); memtest(t_start, t_size, pattern); -- cgit v1.2.3 From 6d74171bf7315257d276aa35400c5a8d6a993f19 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 25 Feb 2009 11:27:27 +0100 Subject: x86: memtest: introduce array to select memtest patterns Impact: code cleanup Signed-off-by: Andreas Herrmann Signed-off-by: Ingo Molnar --- arch/x86/mm/memtest.c | 65 ++++++++++++++++++--------------------------------- 1 file changed, 23 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 00b8bdc64c3..827f94044cf 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c @@ -9,48 +9,25 @@ #include -#define _MAX_MEM_PATTERNS 4 +static u64 patterns[] __initdata = { + 0, + 0xffffffffffffffffULL, + 0x5555555555555555ULL, + 0xaaaaaaaaaaaaaaaaULL, +}; static void __init memtest(unsigned long start_phys, unsigned long size, - unsigned pattern) + u64 pattern) { unsigned long i; - unsigned long *start; + u64 *start; unsigned long start_bad; unsigned long last_bad; - unsigned long val; unsigned long start_phys_aligned; unsigned long count; unsigned long incr; - pattern = pattern % _MAX_MEM_PATTERNS; - - switch (pattern) { - case 0: - val = 0UL; - break; - case 1: - val = -1UL; - break; - case 2: -#ifdef CONFIG_X86_64 - val = 0x5555555555555555UL; -#else - val = 0x55555555UL; -#endif - break; - case 3: -#ifdef CONFIG_X86_64 - val = 0xaaaaaaaaaaaaaaaaUL; -#else - val = 0xaaaaaaaaUL; -#endif - break; - default: - return; - } - - incr = sizeof(unsigned long); + incr = sizeof(pattern); start_phys_aligned = ALIGN(start_phys, incr); count = (size - (start_phys_aligned - start_phys))/incr; start = __va(start_phys_aligned); @@ -58,15 +35,16 @@ static void __init memtest(unsigned long start_phys, unsigned long size, last_bad = 0; for (i = 0; i < count; i++) - start[i] = val; + start[i] = pattern; for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { - if (*start != val) { + if (*start != pattern) { if (start_phys_aligned == last_bad + incr) { last_bad += incr; } else { if (start_bad) { - printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved", - val, start_bad, last_bad + incr); + printk(KERN_CONT "\n %016llx bad mem addr %010lx - %010lx reserved", + (unsigned long long) pattern, + start_bad, last_bad + incr); reserve_early(start_bad, last_bad + incr, "BAD RAM"); } start_bad = last_bad = start_phys_aligned; @@ -74,8 +52,9 @@ static void __init memtest(unsigned long start_phys, unsigned long size, } } if (start_bad) { - printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved", - val, start_bad, last_bad + incr); + printk(KERN_CONT "\n %016llx bad mem addr %010lx - %010lx reserved", + (unsigned long long) pattern, start_bad, + last_bad + incr); reserve_early(start_bad, last_bad + incr, "BAD RAM"); } } @@ -95,13 +74,16 @@ early_param("memtest", parse_memtest); void __init early_memtest(unsigned long start, unsigned long end) { u64 t_start, t_size; - unsigned pattern; + unsigned int i; + u64 pattern; if (!memtest_pattern) return; printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); - for (pattern = 0; pattern < memtest_pattern; pattern++) { + for (i = 0; i < memtest_pattern; i++) { + unsigned int idx = i % ARRAY_SIZE(patterns); + pattern = patterns[idx]; t_start = start; t_size = 0; while (t_start < end) { @@ -115,8 +97,7 @@ void __init early_memtest(unsigned long start, unsigned long end) printk(KERN_CONT "\n %010llx - %010llx pattern %d", (unsigned long long)t_start, - (unsigned long long)t_start + t_size, - pattern % _MAX_MEM_PATTERNS); + (unsigned long long)t_start + t_size, idx); memtest(t_start, t_size, pattern); -- cgit v1.2.3 From 7dad169e57eda1f0aa6dc5ac43a898b4b0ced2c7 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 25 Feb 2009 11:28:07 +0100 Subject: x86: memtest: cleanup memtest function Impact: code cleanup Signed-off-by: Andreas Herrmann Signed-off-by: Ingo Molnar --- arch/x86/mm/memtest.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 827f94044cf..01a72d6825b 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c @@ -16,6 +16,15 @@ static u64 patterns[] __initdata = { 0xaaaaaaaaaaaaaaaaULL, }; +static void __init reserve_bad_mem(u64 pattern, unsigned long start_bad, + unsigned long end_bad) +{ + printk(KERN_CONT "\n %016llx bad mem addr " + "%010lx - %010lx reserved", + (unsigned long long) pattern, start_bad, end_bad); + reserve_early(start_bad, end_bad, "BAD RAM"); +} + static void __init memtest(unsigned long start_phys, unsigned long size, u64 pattern) { @@ -37,26 +46,18 @@ static void __init memtest(unsigned long start_phys, unsigned long size, for (i = 0; i < count; i++) start[i] = pattern; for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { - if (*start != pattern) { - if (start_phys_aligned == last_bad + incr) { - last_bad += incr; - } else { - if (start_bad) { - printk(KERN_CONT "\n %016llx bad mem addr %010lx - %010lx reserved", - (unsigned long long) pattern, - start_bad, last_bad + incr); - reserve_early(start_bad, last_bad + incr, "BAD RAM"); - } - start_bad = last_bad = start_phys_aligned; - } + if (*start == pattern) + continue; + if (start_phys_aligned == last_bad + incr) { + last_bad += incr; + continue; } + if (start_bad) + reserve_bad_mem(pattern, start_bad, last_bad + incr); + start_bad = last_bad = start_phys_aligned; } - if (start_bad) { - printk(KERN_CONT "\n %016llx bad mem addr %010lx - %010lx reserved", - (unsigned long long) pattern, start_bad, - last_bad + incr); - reserve_early(start_bad, last_bad + incr, "BAD RAM"); - } + if (start_bad) + reserve_bad_mem(pattern, start_bad, last_bad + incr); } /* default is disabled */ -- cgit v1.2.3 From 570c9e69aaa84689fb8ed2a3a4af39ca54ba7a47 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 25 Feb 2009 11:28:58 +0100 Subject: x86: memtest: adapt log messages - print test pattern instead of pattern number, - show pattern as stored in memory, - use proper priority flags, - consistent use of u64 throughout the code Signed-off-by: Andreas Herrmann Signed-off-by: Ingo Molnar --- arch/x86/mm/memtest.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 01a72d6825b..3232397783a 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c @@ -16,25 +16,22 @@ static u64 patterns[] __initdata = { 0xaaaaaaaaaaaaaaaaULL, }; -static void __init reserve_bad_mem(u64 pattern, unsigned long start_bad, - unsigned long end_bad) +static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) { - printk(KERN_CONT "\n %016llx bad mem addr " - "%010lx - %010lx reserved", - (unsigned long long) pattern, start_bad, end_bad); + printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n", + (unsigned long long) pattern, + (unsigned long long) start_bad, + (unsigned long long) end_bad); reserve_early(start_bad, end_bad, "BAD RAM"); } -static void __init memtest(unsigned long start_phys, unsigned long size, - u64 pattern) +static void __init memtest(u64 pattern, u64 start_phys, u64 size) { - unsigned long i; + u64 i, count; u64 *start; - unsigned long start_bad; - unsigned long last_bad; - unsigned long start_phys_aligned; - unsigned long count; - unsigned long incr; + u64 start_bad, last_bad; + u64 start_phys_aligned; + size_t incr; incr = sizeof(pattern); start_phys_aligned = ALIGN(start_phys, incr); @@ -81,7 +78,7 @@ void __init early_memtest(unsigned long start, unsigned long end) if (!memtest_pattern) return; - printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); + printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); for (i = 0; i < memtest_pattern; i++) { unsigned int idx = i % ARRAY_SIZE(patterns); pattern = patterns[idx]; @@ -96,14 +93,13 @@ void __init early_memtest(unsigned long start, unsigned long end) if (t_start + t_size > end) t_size = end - t_start; - printk(KERN_CONT "\n %010llx - %010llx pattern %d", - (unsigned long long)t_start, - (unsigned long long)t_start + t_size, idx); - - memtest(t_start, t_size, pattern); + printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", + (unsigned long long) t_start, + (unsigned long long) t_start + t_size, + (unsigned long long) cpu_to_be64(pattern)); + memtest(pattern, t_start, t_size); t_start += t_size; } } - printk(KERN_CONT "\n"); } -- cgit v1.2.3 From bfb4dc0da45f8fddc76eba7e62919420c7d6dae2 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 25 Feb 2009 11:30:04 +0100 Subject: x86: memtest: wipe out test pattern from memory Signed-off-by: Andreas Herrmann Signed-off-by: Ingo Molnar --- arch/x86/mm/memtest.c | 56 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 3232397783a..9c52ef19e6f 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c @@ -57,6 +57,29 @@ static void __init memtest(u64 pattern, u64 start_phys, u64 size) reserve_bad_mem(pattern, start_bad, last_bad + incr); } +static void __init do_one_pass(u64 pattern, u64 start, u64 end) +{ + u64 size = 0; + + while (start < end) { + start = find_e820_area_size(start, &size, 1); + + /* done ? */ + if (start >= end) + break; + if (start + size > end) + size = end - start; + + printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", + (unsigned long long) start, + (unsigned long long) start + size, + (unsigned long long) cpu_to_be64(pattern)); + memtest(pattern, start, size); + + start += size; + } +} + /* default is disabled */ static int memtest_pattern __initdata; @@ -71,35 +94,22 @@ early_param("memtest", parse_memtest); void __init early_memtest(unsigned long start, unsigned long end) { - u64 t_start, t_size; unsigned int i; - u64 pattern; + unsigned int idx = 0; if (!memtest_pattern) return; printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); for (i = 0; i < memtest_pattern; i++) { - unsigned int idx = i % ARRAY_SIZE(patterns); - pattern = patterns[idx]; - t_start = start; - t_size = 0; - while (t_start < end) { - t_start = find_e820_area_size(t_start, &t_size, 1); - - /* done ? */ - if (t_start >= end) - break; - if (t_start + t_size > end) - t_size = end - t_start; - - printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", - (unsigned long long) t_start, - (unsigned long long) t_start + t_size, - (unsigned long long) cpu_to_be64(pattern)); - memtest(pattern, t_start, t_size); - - t_start += t_size; - } + idx = i % ARRAY_SIZE(patterns); + do_one_pass(patterns[idx], start, end); + } + + if (idx > 0) { + printk(KERN_INFO "early_memtest: wipe out " + "test pattern from memory\n"); + /* additional test with pattern 0 will do this */ + do_one_pass(0, start, end); } } -- cgit v1.2.3 From 63823126c221dd721ce7351b596b3b73aa943613 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 25 Feb 2009 11:31:49 +0100 Subject: x86: memtest: add additional (regular) test patterns Signed-off-by: Andreas Herrmann Signed-off-by: Ingo Molnar --- arch/x86/mm/memtest.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 9c52ef19e6f..0bcd7883d03 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c @@ -14,6 +14,19 @@ static u64 patterns[] __initdata = { 0xffffffffffffffffULL, 0x5555555555555555ULL, 0xaaaaaaaaaaaaaaaaULL, + 0x1111111111111111ULL, + 0x2222222222222222ULL, + 0x4444444444444444ULL, + 0x8888888888888888ULL, + 0x3333333333333333ULL, + 0x6666666666666666ULL, + 0x9999999999999999ULL, + 0xccccccccccccccccULL, + 0x7777777777777777ULL, + 0xbbbbbbbbbbbbbbbbULL, + 0xddddddddddddddddULL, + 0xeeeeeeeeeeeeeeeeULL, + 0x7a6c7258554e494cULL, /* yeah ;-) */ }; static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) -- cgit v1.2.3 From 7880f7464546842ee14179bef16a6e14381ea638 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Tue, 24 Feb 2009 17:35:13 -0800 Subject: gpu/drm, x86, PAT: routine to keep identity map in sync Add a function to check and keep identity maps in sync, when changing any memory type. One of the follow on patches will also use this routine. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Cc: Dave Airlie Cc: Jesse Barnes Cc: Eric Anholt Cc: Keith Packard Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pat.h | 3 +++ arch/x86/mm/pat.c | 46 +++++++++++++++++++++++++++++----------------- 2 files changed, 32 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index b8493b3b989..abb3c29fc9d 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -19,4 +19,7 @@ extern int free_memtype(u64 start, u64 end); extern void pat_disable(char *reason); +extern int kernel_map_sync_memtype(u64 base, unsigned long size, + unsigned long flag); + #endif /* _ASM_X86_PAT_H */ diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index aebbf67a79d..399383c6f61 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -624,6 +624,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) free_memtype(addr, addr + size); } +/* + * Change the memory type for the physial address range in kernel identity + * mapping space if that range is a part of identity map. + */ +int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) +{ + unsigned long id_sz; + + if (!pat_enabled || base >= __pa(high_memory)) + return 0; + + id_sz = (__pa(high_memory) < base + size) ? + __pa(high_memory) - base : + size; + + if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { + printk(KERN_INFO + "%s:%d ioremap_change_attr failed %s " + "for %Lx-%Lx\n", + current->comm, current->pid, + cattr_name(flags), + base, (unsigned long long)(base + size)); + return -EINVAL; + } + return 0; +} + /* * Internal interface to reserve a range of physical memory with prot. * Reserved non RAM regions only and after successful reserve_memtype, @@ -633,7 +660,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, int strict_prot) { int is_ram = 0; - int id_sz, ret; + int ret; unsigned long flags; unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); @@ -670,23 +697,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, flags); } - /* Need to keep identity mapping in sync */ - if (paddr >= __pa(high_memory)) - return 0; - - id_sz = (__pa(high_memory) < paddr + size) ? - __pa(high_memory) - paddr : - size; - - if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) { + if (kernel_map_sync_memtype(paddr, size, flags) < 0) { free_memtype(paddr, paddr + size); - printk(KERN_ERR - "%s:%d reserve_pfn_range ioremap_change_attr failed %s " - "for %Lx-%Lx\n", - current->comm, current->pid, - cattr_name(flags), - (unsigned long long)paddr, - (unsigned long long)(paddr + size)); return -EINVAL; } return 0; -- cgit v1.2.3 From 17581ad812a9abb0182260374ef2e52d4a808a64 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Tue, 24 Feb 2009 17:35:14 -0800 Subject: gpu/drm, x86, PAT: PAT support for io_mapping_* Make io_mapping_create_wc and io_mapping_free go through PAT to make sure that there are no memory type aliases. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Cc: Dave Airlie Cc: Jesse Barnes Cc: Eric Anholt Cc: Keith Packard Signed-off-by: Ingo Molnar --- arch/x86/include/asm/iomap.h | 5 ++++- arch/x86/mm/iomap_32.c | 42 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index 86af26091d6..bd46495ff7d 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h @@ -24,7 +24,10 @@ #include int -is_io_mapping_possible(resource_size_t base, unsigned long size); +reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot); + +void +free_io_memtype(u64 base, unsigned long size); void * iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 6c2b1af1692..94596f794b1 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -21,13 +21,13 @@ #include #ifdef CONFIG_X86_PAE -int +static int is_io_mapping_possible(resource_size_t base, unsigned long size) { return 1; } #else -int +static int is_io_mapping_possible(resource_size_t base, unsigned long size) { /* There is no way to map greater than 1 << 32 address without PAE */ @@ -38,6 +38,44 @@ is_io_mapping_possible(resource_size_t base, unsigned long size) } #endif +int +reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot) +{ + unsigned long ret_flag; + + if (!is_io_mapping_possible(base, size)) + goto out_err; + + if (!pat_enabled) { + *prot = pgprot_noncached(PAGE_KERNEL); + return 0; + } + + if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag)) + goto out_err; + + if (ret_flag == _PAGE_CACHE_WB) + goto out_free; + + if (kernel_map_sync_memtype(base, size, ret_flag)) + goto out_free; + + *prot = __pgprot(__PAGE_KERNEL | ret_flag); + return 0; + +out_free: + free_memtype(base, base + size); +out_err: + return -EINVAL; +} + +void +free_io_memtype(u64 base, unsigned long size) +{ + if (pat_enabled) + free_memtype(base, base + size); +} + /* Map 'pfn' using fixed map 'type' and protections 'prot' */ void * -- cgit v1.2.3 From 34754b69a6f87aa6aa2860525a82f12532f83afd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Feb 2009 16:04:03 +0100 Subject: x86: make vmap yell louder when it is used under irqs_disabled() Signed-off-by: Ingo Molnar --- arch/x86/kernel/alternative.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a84ac7b570e..6907b8e85d5 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len) */ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) { - unsigned long flags; char *vaddr; int nr_pages = 2; struct page *pages[2]; int i; + might_sleep(); if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); pages[1] = vmalloc_to_page(addr + PAGE_SIZE); @@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) nr_pages = 1; vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); BUG_ON(!vaddr); - local_irq_save(flags); + local_irq_disable(); memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); - local_irq_restore(flags); + local_irq_enable(); vunmap(vaddr); sync_core(); /* Could also do a CLFLUSH here to speed up CPU recovery; but -- cgit v1.2.3 From 13093cb0e59053bf97910de3a24f07cdff71c62c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Feb 2009 03:16:47 +0100 Subject: gpu/drm, x86, PAT: PAT support for io_mapping_*, export symbols for modules Impact: build fix ERROR: "reserve_io_memtype_wc" [drivers/gpu/drm/i915/i915.ko] undefined! ERROR: "free_io_memtype" [drivers/gpu/drm/i915/i915.ko] undefined! Signed-off-by: Ingo Molnar --- arch/x86/mm/iomap_32.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 94596f794b1..d5e28424622 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -68,6 +68,7 @@ out_free: out_err: return -EINVAL; } +EXPORT_SYMBOL_GPL(reserve_io_memtype_wc); void free_io_memtype(u64 base, unsigned long size) @@ -75,6 +76,7 @@ free_io_memtype(u64 base, unsigned long size) if (pat_enabled) free_memtype(base, base + size); } +EXPORT_SYMBOL_GPL(free_io_memtype); /* Map 'pfn' using fixed map 'type' and protections 'prot' */ -- cgit v1.2.3 From 2b6163bf5772644068694583816fa41e8474239f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Feb 2009 20:50:49 -0800 Subject: x86: remove update_apic from x86_quirks Impact: cleanup x86_quirks->update_apic() calling looks crazy. so try to remove it: 1. every apic take wakeup_cpu member directly 2. separate es7000_apic to es7000_apic_cluster 3. use uv_wakeup_cpu directly Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 7 +- arch/x86/include/asm/setup.h | 3 - arch/x86/include/asm/uv/uv.h | 3 - arch/x86/kernel/apic/apic_flat_64.c | 4 +- arch/x86/kernel/apic/bigsmp_32.c | 2 +- arch/x86/kernel/apic/es7000_32.c | 127 ++++++++++++++++++++++++++-------- arch/x86/kernel/apic/numaq_32.c | 10 +-- arch/x86/kernel/apic/probe_32.c | 16 +---- arch/x86/kernel/apic/probe_64.c | 3 - arch/x86/kernel/apic/summit_32.c | 2 +- arch/x86/kernel/apic/x2apic_cluster.c | 2 +- arch/x86/kernel/apic/x2apic_phys.c | 2 +- arch/x86/kernel/apic/x2apic_uv_x.c | 11 +-- arch/x86/kernel/setup.c | 14 +--- arch/x86/kernel/smpboot.c | 8 +-- 15 files changed, 121 insertions(+), 93 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index a6208dc7463..860504178e9 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -325,6 +325,9 @@ struct apic { }; extern struct apic *apic; +extern atomic_t init_deasserted; +extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); +extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); static inline u32 apic_read(u32 reg) { @@ -384,9 +387,7 @@ static inline unsigned default_get_apic_id(unsigned long x) #define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 #define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 -#ifdef CONFIG_X86_32 -extern void es7000_update_apic_to_cluster(void); -#else +#ifdef CONFIG_X86_64 extern struct apic apic_flat; extern struct apic apic_physflat; extern struct apic apic_x2apic_cluster; diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 66801cb72f6..126877e502e 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -31,7 +31,6 @@ struct x86_quirks { void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, unsigned short oemsize); int (*setup_ioapic_ids)(void); - int (*update_apic)(void); }; extern void x86_quirk_pre_intr_init(void); @@ -77,8 +76,6 @@ static inline void visws_early_detect(void) { } static inline int is_visws_box(void) { return 0; } #endif -extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); -extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); extern struct x86_quirks *x86_quirks; extern unsigned long saved_video_mode; diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index 8242bf96581..c0a01b5d985 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h @@ -12,7 +12,6 @@ extern enum uv_system_type get_uv_system_type(void); extern int is_uv_system(void); extern void uv_cpu_init(void); extern void uv_system_init(void); -extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va, @@ -24,8 +23,6 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } static inline int is_uv_system(void) { return 0; } static inline void uv_cpu_init(void) { } static inline void uv_system_init(void) { } -static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) -{ return 1; } static inline const struct cpumask * uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va, unsigned int cpu) diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 3b002995e14..00595bc2da8 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -222,7 +222,7 @@ struct apic apic_flat = { .send_IPI_all = flat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, @@ -373,7 +373,7 @@ struct apic apic_physflat = { .send_IPI_all = physflat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 0b1093394fd..8c25917b51a 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -256,7 +256,7 @@ struct apic apic_bigsmp = { .send_IPI_all = bigsmp_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 320f2d2e4e5..9f6102fc87a 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -163,17 +163,12 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) return 0; } -static int __init es7000_update_apic(void) +static int __init es7000_apic_is_cluster(void) { - apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; - /* MPENTIUMIII */ if (boot_cpu_data.x86 == 6 && - (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { - es7000_update_apic_to_cluster(); - apic->wait_for_init_deassert = NULL; - apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; - } + (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) + return 1; return 0; } @@ -192,8 +187,6 @@ static void __init setup_unisys(void) else es7000_plat = ES7000_CLASSIC; ioapic_renumber_irq = es7000_rename_gsi; - - x86_quirks->update_apic = es7000_update_apic; } /* @@ -310,6 +303,8 @@ static int es7000_check_dsdt(void) return 0; } +static int __initdata es7000_acpi_ret; + /* Hook from generic ACPI tables.c */ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { @@ -332,8 +327,19 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) */ unmap_unisys_acpi_oem_table(oem_addr); } - return ret; + + es7000_acpi_ret = ret; + + return ret && !es7000_apic_is_cluster(); } +static int __init es7000_acpi_madt_oem_check_cluster(char *oem_id, + char *oem_table_id) +{ + int ret = es7000_acpi_ret; + + return ret && es7000_apic_is_cluster(); +} + #else /* !CONFIG_ACPI: */ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { @@ -416,11 +422,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) static void es7000_wait_for_init_deassert(atomic_t *deassert) { -#ifndef CONFIG_ES7000_CLUSTERED_APIC while (!atomic_read(deassert)) cpu_relax(); -#endif - return; } static unsigned int es7000_get_apic_id(unsigned long x) @@ -659,37 +662,103 @@ static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) return cpuid_apic >> index_msb; } -void __init es7000_update_apic_to_cluster(void) -{ - apic->target_cpus = target_cpus_cluster; - apic->irq_delivery_mode = dest_LowestPrio; - /* logical delivery broadcast to all procs: */ - apic->irq_dest_mode = 1; - - apic->init_apic_ldr = es7000_init_apic_ldr_cluster; - - apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster; -} - static int probe_es7000(void) { /* probed later in mptable/ACPI hooks */ return 0; } +static int __initdata es7000_mps_ret; static __init int es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { + int ret = 0; + if (mpc->oemptr) { struct mpc_oemtable *oem_table = (struct mpc_oemtable *)mpc->oemptr; if (!strncmp(oem, "UNISYS", 6)) - return parse_unisys_oem((char *)oem_table); + ret = parse_unisys_oem((char *)oem_table); } - return 0; + + es7000_mps_ret = ret; + + return ret && !es7000_apic_is_cluster(); +} + +static __init int +es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, char *productid) +{ + int ret = es7000_mps_ret; + + return ret && es7000_apic_is_cluster(); } +struct apic apic_es7000_cluster = { + + .name = "es7000", + .probe = probe_es7000, + .acpi_madt_oem_check = es7000_acpi_madt_oem_check_cluster, + .apic_id_registered = es7000_apic_id_registered, + + .irq_delivery_mode = dest_LowestPrio, + /* logical delivery broadcast to all procs: */ + .irq_dest_mode = 1, + + .target_cpus = target_cpus_cluster, + .disable_esr = 1, + .dest_logical = 0, + .check_apicid_used = es7000_check_apicid_used, + .check_apicid_present = es7000_check_apicid_present, + + .vector_allocation_domain = es7000_vector_allocation_domain, + .init_apic_ldr = es7000_init_apic_ldr_cluster, + + .ioapic_phys_id_map = es7000_ioapic_phys_id_map, + .setup_apic_routing = es7000_setup_apic_routing, + .multi_timer_check = NULL, + .apicid_to_node = es7000_apicid_to_node, + .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, + .cpu_present_to_apicid = es7000_cpu_present_to_apicid, + .apicid_to_cpu_present = es7000_apicid_to_cpu_present, + .setup_portio_remap = NULL, + .check_phys_apicid_present = es7000_check_phys_apicid_present, + .enable_apic_mode = es7000_enable_apic_mode, + .phys_pkg_id = es7000_phys_pkg_id, + .mps_oem_check = es7000_mps_oem_check_cluster, + + .get_apic_id = es7000_get_apic_id, + .set_apic_id = NULL, + .apic_id_mask = 0xFF << 24, + + .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster, + .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, + + .send_IPI_mask = es7000_send_IPI_mask, + .send_IPI_mask_allbutself = NULL, + .send_IPI_allbutself = es7000_send_IPI_allbutself, + .send_IPI_all = es7000_send_IPI_all, + .send_IPI_self = default_send_IPI_self, + + .wakeup_cpu = wakeup_secondary_cpu_via_mip, + + .trampoline_phys_low = 0x467, + .trampoline_phys_high = 0x469, + + .wait_for_init_deassert = NULL, + + /* Nothing to do for most platforms, since cleared by the INIT cycle: */ + .smp_callin_clear_local_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, + + .read = native_apic_mem_read, + .write = native_apic_mem_write, + .icr_read = native_apic_icr_read, + .icr_write = native_apic_icr_write, + .wait_icr_idle = native_apic_wait_icr_idle, + .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, +}; struct apic apic_es7000 = { @@ -737,7 +806,7 @@ struct apic apic_es7000 = { .send_IPI_all = es7000_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = 0x467, .trampoline_phys_high = 0x469, diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index d9d6d61eed8..c503c1799d6 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -256,13 +256,6 @@ static int __init numaq_setup_ioapic_ids(void) return 1; } -static int __init numaq_update_apic(void) -{ - apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; - - return 0; -} - static struct x86_quirks numaq_x86_quirks __initdata = { .arch_pre_time_init = numaq_pre_time_init, .arch_time_init = NULL, @@ -278,7 +271,6 @@ static struct x86_quirks numaq_x86_quirks __initdata = { .mpc_oem_pci_bus = mpc_oem_pci_bus, .smp_read_mpc_oem = smp_read_mpc_oem, .setup_ioapic_ids = numaq_setup_ioapic_ids, - .update_apic = numaq_update_apic, }; static __init void early_check_numaq(void) @@ -546,7 +538,7 @@ struct apic apic_numaq = { .send_IPI_all = numaq_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_nmi, .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 3a730fa574b..13c6fc7dff9 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -138,7 +138,7 @@ struct apic apic_default = { .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, @@ -159,6 +159,7 @@ extern struct apic apic_numaq; extern struct apic apic_summit; extern struct apic apic_bigsmp; extern struct apic apic_es7000; +extern struct apic apic_es7000_cluster; extern struct apic apic_default; struct apic *apic = &apic_default; @@ -176,6 +177,7 @@ static struct apic *apic_probe[] __initdata = { #endif #ifdef CONFIG_X86_ES7000 &apic_es7000, + &apic_es7000_cluster, #endif &apic_default, /* must be last */ NULL, @@ -197,9 +199,6 @@ static int __init parse_apic(char *arg) } } - if (x86_quirks->update_apic) - x86_quirks->update_apic(); - /* Parsed again by __setup for debug/verbose */ return 0; } @@ -218,8 +217,6 @@ void __init generic_bigsmp_probe(void) if (!cmdline_apic && apic == &apic_default) { if (apic_bigsmp.probe()) { apic = &apic_bigsmp; - if (x86_quirks->update_apic) - x86_quirks->update_apic(); printk(KERN_INFO "Overriding APIC driver with %s\n", apic->name); } @@ -240,9 +237,6 @@ void __init generic_apic_probe(void) /* Not visible without early console */ if (!apic_probe[i]) panic("Didn't find an APIC driver"); - - if (x86_quirks->update_apic) - x86_quirks->update_apic(); } printk(KERN_INFO "Using APIC driver %s\n", apic->name); } @@ -262,8 +256,6 @@ generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) if (!cmdline_apic) { apic = apic_probe[i]; - if (x86_quirks->update_apic) - x86_quirks->update_apic(); printk(KERN_INFO "Switched to APIC driver `%s'.\n", apic->name); } @@ -284,8 +276,6 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) if (!cmdline_apic) { apic = apic_probe[i]; - if (x86_quirks->update_apic) - x86_quirks->update_apic(); printk(KERN_INFO "Switched to APIC driver `%s'.\n", apic->name); } diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index e7c163661c7..8d7748efe6a 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -68,9 +68,6 @@ void __init default_setup_apic_routing(void) apic = &apic_physflat; printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); } - - if (x86_quirks->update_apic) - x86_quirks->update_apic(); } /* Same for both flat and physical. */ diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 32838b57a94..5a75d563f67 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -574,7 +574,7 @@ struct apic apic_summit = { .send_IPI_all = summit_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 354b9c45601..561a6b1042a 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -224,7 +224,7 @@ struct apic apic_x2apic_cluster = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 5bcb174409b..785f8ee4b1d 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -213,7 +213,7 @@ struct apic apic_x2apic_phys = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 20b4ad07c3a..6d7b9d960dd 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -91,7 +91,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) cpumask_set_cpu(cpu, retmask); } -int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) +static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) { unsigned long val; int pnode; @@ -99,16 +99,19 @@ int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) pnode = uv_apicid_to_pnode(phys_apicid); val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | - (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | + ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_INIT; uv_write_global_mmr64(pnode, UVH_IPI_INT, val); mdelay(10); val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | - (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | + ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_STARTUP; uv_write_global_mmr64(pnode, UVH_IPI_INT, val); + + atomic_set(&init_deasserted, 1); + return 0; } @@ -285,7 +288,7 @@ struct apic apic_x2apic_uv_x = { .send_IPI_all = uv_send_IPI_all, .send_IPI_self = uv_send_IPI_self, - .wakeup_cpu = NULL, + .wakeup_cpu = uv_wakeup_secondary, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5b85759e797..2280d93c5b9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -600,19 +600,7 @@ static int __init setup_elfcorehdr(char *arg) early_param("elfcorehdr", setup_elfcorehdr); #endif -static int __init default_update_apic(void) -{ -#ifdef CONFIG_SMP - if (!apic->wakeup_cpu) - apic->wakeup_cpu = wakeup_secondary_cpu_via_init; -#endif - - return 0; -} - -static struct x86_quirks default_x86_quirks __initdata = { - .update_apic = default_update_apic, -}; +static struct x86_quirks default_x86_quirks __initdata; struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9ce666387f3..9b338aa03b4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -112,7 +112,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); -static atomic_t init_deasserted; +atomic_t init_deasserted; /* Set if we find a B stepping CPU */ @@ -614,12 +614,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) unsigned long send_status, accept_status = 0; int maxlvt, num_starts, j; - if (get_uv_system_type() == UV_NON_UNIQUE_APIC) { - send_status = uv_wakeup_secondary(phys_apicid, start_eip); - atomic_set(&init_deasserted, 1); - return send_status; - } - maxlvt = lapic_get_maxlvt(); /* -- cgit v1.2.3 From 129d8bc828e011bda0b7110a097bf3a0167f966e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Feb 2009 21:20:50 -0800 Subject: x86: don't compile vsmp_64 for 32bit Impact: cleanup that is only needed when CONFIG_X86_VSMP is defined with 64bit also remove dead code about PCI, because CONFIG_X86_VSMP depends on PCI Signed-off-by: Yinghai Lu Cc: Ravikiran Thirumalai Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 7 +++++++ arch/x86/include/asm/setup.h | 4 ++++ arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/setup.c | 2 -- arch/x86/kernel/vsmp_64.c | 12 +----------- 5 files changed, 13 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 860504178e9..24e21273e30 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -75,7 +75,14 @@ static inline void default_inquire_remote_apic(int apicid) #define setup_secondary_clock setup_secondary_APIC_clock #endif +#ifdef CONFIG_X86_VSMP extern int is_vsmp_box(void); +#else +static inline int is_vsmp_box(void) +{ + return 0; +} +#endif extern void xapic_wait_icr_idle(void); extern u32 safe_xapic_wait_icr_idle(void); extern void xapic_icr_write(u32, u32); diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 126877e502e..05c6f6b11fd 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -64,7 +64,11 @@ extern void x86_quirk_time_init(void); #include /* Interrupt control for vSMPowered x86_64 systems */ +#ifdef CONFIG_X86_VSMP void vsmp_init(void); +#else +static inline void vsmp_init(void) { } +#endif void setup_bios_corruption_check(void); diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index de5657c039e..95f216bbfaf 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -70,7 +70,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o -obj-y += vsmp_64.o +obj-$(CONFIG_X86_VSMP) += vsmp_64.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_MODULES) += module_$(BITS).o obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 2280d93c5b9..4c54bc0d8ff 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -863,9 +863,7 @@ void __init setup_arch(char **cmdline_p) reserve_initrd(); -#ifdef CONFIG_X86_64 vsmp_init(); -#endif io_delay_init(); diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index c609205df59..74de562812c 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -22,7 +22,7 @@ #include #include -#if defined CONFIG_PCI && defined CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT /* * Interrupt control on vSMPowered systems: * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' @@ -114,7 +114,6 @@ static void __init set_vsmp_pv_ops(void) } #endif -#ifdef CONFIG_PCI static int is_vsmp = -1; static void __init detect_vsmp_box(void) @@ -139,15 +138,6 @@ int is_vsmp_box(void) return 0; } } -#else -static void __init detect_vsmp_box(void) -{ -} -int is_vsmp_box(void) -{ - return 0; -} -#endif void __init vsmp_init(void) { -- cgit v1.2.3 From 0917c01f8e793f57a53cf886533d4c75c67f6e89 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Feb 2009 12:47:40 +0100 Subject: x86: remove update_apic from x86_quirks, fix Impact: build fix wakeup_secondary_cpu_via_init(), the default platform method for booting a secondary CPU, is always used on UP due to probe_32.c, if CONFIG_X86_LOCAL_APIC is enabled but SMP is off. So provide a UP wrapper inline as well. Cc: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 24e21273e30..0fbf6f1520f 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -331,10 +331,27 @@ struct apic { u32 (*safe_wait_icr_idle)(void); }; +/* + * Pointer to the local APIC driver in use on this system (there's + * always just one such driver in use - the kernel decides via an + * early probing process which one it picks - and then sticks to it): + */ extern struct apic *apic; + +/* + * APIC functionality to boot other CPUs - only used on SMP: + */ +#ifdef CONFIG_SMP extern atomic_t init_deasserted; extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); +#else +static inline int +wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip) +{ + return 0; +} +#endif static inline u32 apic_read(u32 reg) { -- cgit v1.2.3 From 1f5bcabf1b997d6b76a09114b5a79423495a1263 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Feb 2009 13:51:40 +0100 Subject: x86: apic: simplify secondary CPU wakeup methods Impact: cleanup - rename apic->wakeup_cpu to apic->wakeup_secondary_cpu, to make it apparent that this is an SMP-only method - handle NULL ->wakeup_secondary_cpus to mean the default INIT wakeup sequence - this allows simplification of the APIC driver templates. Cc: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 9 +-------- arch/x86/kernel/apic/apic_flat_64.c | 2 -- arch/x86/kernel/apic/bigsmp_32.c | 1 - arch/x86/kernel/apic/es7000_32.c | 4 +--- arch/x86/kernel/apic/numaq_32.c | 2 +- arch/x86/kernel/apic/probe_32.c | 1 - arch/x86/kernel/apic/summit_32.c | 1 - arch/x86/kernel/apic/x2apic_cluster.c | 1 - arch/x86/kernel/apic/x2apic_phys.c | 1 - arch/x86/kernel/apic/x2apic_uv_x.c | 2 +- arch/x86/kernel/smpboot.c | 11 ++++++++--- 11 files changed, 12 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 0fbf6f1520f..4ef949c1972 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -313,7 +313,7 @@ struct apic { void (*send_IPI_self)(int vector); /* wakeup_secondary_cpu */ - int (*wakeup_cpu)(int apicid, unsigned long start_eip); + int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); int trampoline_phys_low; int trampoline_phys_high; @@ -344,13 +344,6 @@ extern struct apic *apic; #ifdef CONFIG_SMP extern atomic_t init_deasserted; extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); -extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); -#else -static inline int -wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip) -{ - return 0; -} #endif static inline u32 apic_read(u32 reg) diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 00595bc2da8..f933822dba1 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -222,7 +222,6 @@ struct apic apic_flat = { .send_IPI_all = flat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, @@ -373,7 +372,6 @@ struct apic apic_physflat = { .send_IPI_all = physflat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 8c25917b51a..69c512e23a9 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -256,7 +256,6 @@ struct apic apic_bigsmp = { .send_IPI_all = bigsmp_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 9f6102fc87a..b4838ed3f26 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -741,7 +741,7 @@ struct apic apic_es7000_cluster = { .send_IPI_all = es7000_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_mip, + .wakeup_secondary_cpu = wakeup_secondary_cpu_via_mip, .trampoline_phys_low = 0x467, .trampoline_phys_high = 0x469, @@ -806,8 +806,6 @@ struct apic apic_es7000 = { .send_IPI_all = es7000_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_init, - .trampoline_phys_low = 0x467, .trampoline_phys_high = 0x469, diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index c503c1799d6..a7f711f5110 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -538,7 +538,7 @@ struct apic apic_numaq = { .send_IPI_all = numaq_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_nmi, + .wakeup_secondary_cpu = wakeup_secondary_cpu_via_nmi, .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 13c6fc7dff9..141c99a1c26 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -138,7 +138,6 @@ struct apic apic_default = { .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 5a75d563f67..0a1135c5a6d 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -574,7 +574,6 @@ struct apic apic_summit = { .send_IPI_all = summit_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 561a6b1042a..8fb87b6dd63 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -224,7 +224,6 @@ struct apic apic_x2apic_cluster = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 785f8ee4b1d..23625b9f98b 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -213,7 +213,6 @@ struct apic apic_x2apic_phys = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, - .wakeup_cpu = wakeup_secondary_cpu_via_init, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 6d7b9d960dd..7151de74a39 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -288,7 +288,7 @@ struct apic apic_x2apic_uv_x = { .send_IPI_all = uv_send_IPI_all, .send_IPI_self = uv_send_IPI_self, - .wakeup_cpu = uv_wakeup_secondary, + .wakeup_secondary_cpu = uv_wakeup_secondary, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9b338aa03b4..249334f5080 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -742,7 +742,8 @@ static void __cpuinit do_fork_idle(struct work_struct *work) /* * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad * (ie clustered apic addressing mode), this is a LOGICAL apic ID. - * Returns zero if CPU booted OK, else error code from ->wakeup_cpu. + * Returns zero if CPU booted OK, else error code from + * ->wakeup_secondary_cpu. */ static int __cpuinit do_boot_cpu(int apicid, int cpu) { @@ -829,9 +830,13 @@ do_rest: } /* - * Starting actual IPI sequence... + * Kick the secondary CPU. Use the method in the APIC driver + * if it's defined - or use an INIT boot APIC message otherwise: */ - boot_error = apic->wakeup_cpu(apicid, start_ip); + if (apic->wakeup_secondary_cpu) + boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); + else + boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); if (!boot_error) { /* -- cgit v1.2.3 From 0b1da1c8fc1a0cb71f17701efad06855a059f752 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Feb 2009 14:10:10 +0100 Subject: x86: apic: simplify secondary CPU wakeup methods, fix Impact: build fix init_deasserted is only available on SMP. Make the secondary-wakeup function conditional on SMP. Also clean up the file some. Cc: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/x2apic_uv_x.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 7151de74a39..1bd6da1f8fa 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -7,28 +7,28 @@ * * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. */ - -#include -#include -#include #include +#include +#include +#include +#include +#include #include #include -#include #include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include + #include #include +#include +#include #include +#include +#include +#include +#include DEFINE_PER_CPU(int, x2apic_extra_bits); @@ -93,6 +93,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) { +#ifdef CONFIG_SMP unsigned long val; int pnode; @@ -111,7 +112,7 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) uv_write_global_mmr64(pnode, UVH_IPI_INT, val); atomic_set(&init_deasserted, 1); - +#endif return 0; } @@ -368,7 +369,7 @@ static __init void map_high(char *id, unsigned long base, int shift, paddr = base << shift; bytes = (1UL << shift) * (max_pnode + 1); printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, - paddr + bytes); + paddr + bytes); if (map_type == map_uc) init_extra_mapping_uc(paddr, bytes); else @@ -531,7 +532,7 @@ late_initcall(uv_init_heartbeat); /* * Called on each cpu to initialize the per_cpu UV data area. - * ZZZ hotplug not supported yet + * FIXME: hotplug not supported yet */ void __cpuinit uv_cpu_init(void) { -- cgit v1.2.3 From 3b900d44190c7da8681101c57a5be6b354dab2c7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Feb 2009 14:34:08 +0100 Subject: x86: fix !ACPI build for es7000_32.c arch/x86/kernel/apic/es7000_32.c:702: error: 'es7000_acpi_madt_oem_check_cluster' undeclared here (not in a function) Provide a es7000_acpi_madt_oem_check_cluster() definition in the !ACPI case too. Cc: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/es7000_32.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index b4838ed3f26..da37e2c59fe 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -332,8 +332,9 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return ret && !es7000_apic_is_cluster(); } -static int __init es7000_acpi_madt_oem_check_cluster(char *oem_id, - char *oem_table_id) + +static int __init +es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) { int ret = es7000_acpi_ret; @@ -345,6 +346,12 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } + +static int __init +es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) +{ + return 0; +} #endif /* !CONFIG_ACPI */ static void es7000_spin(int n) -- cgit v1.2.3 From 83ce400928680a6c8123d492684b27857f5a2d95 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Feb 2009 20:16:58 +0100 Subject: x86: set X86_FEATURE_TSC_RELIABLE If the TSC is constant and non-stop, also set it reliable. (We will turn this off in DMI quirks for multi-chassis systems) The performance number on a 16-way Nehalem system running 32 tasks that context-switch between each other is significant: sched_clock_stable=0 sched_clock_stable=1 .................... .................... 22.456925 million/sec 24.306972 million/sec [+8.2%] lmbench's "lat_ctx -s 0 2" goes from 0.63 microseconds to 0.59 microseconds - a 6.7% increase in context-switching performance. Perfstat of 1 million pipe context switches between two tasks: Performance counter stats for './pipe-test-1m': [before] [after] ............ ............ 37621.421089 36436.848378 task clock ticks (msecs) 0 0 CPU migrations (events) 2000274 2000189 context switches (events) 194 193 pagefaults (events) 8433799643 8171016416 CPU cycles (events) -3.21% 8370133368 8180999694 instructions (events) -2.31% 4158565 3895941 cache references (events) -6.74% 44312 46264 cache misses (events) 2349.287976 2279.362465 wall-time (msecs) -3.06% The speedup comes straight from the reduction in the instruction count. sched_clock_cpu() got simpler and the whole workload thus executes faster. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 24ff26a38ad..5fff00c70de 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate - * with P/T states and does not stop in deep C-states + * with P/T states and does not stop in deep C-states. + * + * It is also reliable across cores and sockets. (but not across + * cabinets - we turn it off in that case explicitly.) */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); + sched_clock_stable = 1; } } -- cgit v1.2.3 From ffba3f48bcd2d8af0570e7d4ce9b86fc4de9b10d Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Thu, 26 Feb 2009 22:40:38 -0800 Subject: fec: add FEC platform support to ColdFire CPU's setup code m68knommu: add FEC platform support to ColdFire CPU's setup code Move the per-CPU FEC driver setup code into the actual platform setup code for each ColdFire CPU varient. Signed-off-by: Greg Ungerer Signed-off-by: David S. Miller --- arch/m68knommu/platform/520x/config.c | 56 +++++++++++++++++ arch/m68knommu/platform/523x/config.c | 51 +++++++++++++++ arch/m68knommu/platform/5272/config.c | 48 +++++++++++++++ arch/m68knommu/platform/527x/config.c | 113 +++++++++++++++++++++++++++++++++- arch/m68knommu/platform/528x/config.c | 58 +++++++++++++++++ arch/m68knommu/platform/532x/config.c | 49 +++++++++++++++ 6 files changed, 374 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c index 06d887cdcbf..855fc6a79d7 100644 --- a/arch/m68knommu/platform/520x/config.c +++ b/arch/m68knommu/platform/520x/config.c @@ -49,8 +49,39 @@ static struct platform_device m520x_uart = { .dev.platform_data = m520x_uart_platform, }; +static struct resource m520x_fec_resources[] = { + { + .start = MCF_MBAR + 0x30000, + .end = MCF_MBAR + 0x30000 + 0x7ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 64 + 36, + .end = 64 + 36, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 40, + .end = 64 + 40, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 42, + .end = 64 + 42, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device m520x_fec = { + .name = "fec", + .id = 0, + .num_resources = ARRAY_SIZE(m520x_fec_resources), + .resource = m520x_fec_resources, +}; + static struct platform_device *m520x_devices[] __initdata = { &m520x_uart, + &m520x_fec, }; /***************************************************************************/ @@ -103,6 +134,30 @@ static void __init m520x_uarts_init(void) /***************************************************************************/ +static void __init m520x_fec_init(void) +{ + u32 imr; + u8 v; + + /* Unmask FEC interrupts at ColdFire interrupt controller */ + writeb(0x4, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 36); + writeb(0x4, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 40); + writeb(0x4, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 42); + + imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH); + imr &= ~0x0001FFF0; + writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH); + + /* Set multi-function pins to ethernet mode */ + v = readb(MCF_IPSBAR + MCF_GPIO_PAR_FEC); + writeb(v | 0xf0, MCF_IPSBAR + MCF_GPIO_PAR_FEC); + + v = readb(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); + writeb(v | 0x0f, MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); +} + +/***************************************************************************/ + /* * Program the vector to be an auto-vectored. */ @@ -118,6 +173,7 @@ void __init config_BSP(char *commandp, int size) { mach_reset = coldfire_reset; m520x_uarts_init(); + m520x_fec_init(); } /***************************************************************************/ diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c index 13f02611ea2..74133f27b30 100644 --- a/arch/m68knommu/platform/523x/config.c +++ b/arch/m68knommu/platform/523x/config.c @@ -50,8 +50,39 @@ static struct platform_device m523x_uart = { .dev.platform_data = m523x_uart_platform, }; +static struct resource m523x_fec_resources[] = { + { + .start = MCF_MBAR + 0x1000, + .end = MCF_MBAR + 0x1000 + 0x7ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 64 + 23, + .end = 64 + 23, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 27, + .end = 64 + 27, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 29, + .end = 64 + 29, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device m523x_fec = { + .name = "fec", + .id = 0, + .num_resources = ARRAY_SIZE(m523x_fec_resources), + .resource = m523x_fec_resources, +}; + static struct platform_device *m523x_devices[] __initdata = { &m523x_uart, + &m523x_fec, }; /***************************************************************************/ @@ -83,6 +114,25 @@ static void __init m523x_uarts_init(void) /***************************************************************************/ +static void __init m523x_fec_init(void) +{ + u32 imr; + + /* Unmask FEC interrupts at ColdFire interrupt controller */ + writeb(0x28, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 23); + writeb(0x27, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 27); + writeb(0x26, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 29); + + imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH); + imr &= ~0xf; + writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH); + imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL); + imr &= ~0xff800001; + writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL); +} + +/***************************************************************************/ + void mcf_disableall(void) { *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH)) = 0xffffffff; @@ -103,6 +153,7 @@ void __init config_BSP(char *commandp, int size) mcf_disableall(); mach_reset = coldfire_reset; m523x_uarts_init(); + m523x_fec_init(); } /***************************************************************************/ diff --git a/arch/m68knommu/platform/5272/config.c b/arch/m68knommu/platform/5272/config.c index 230bae691a7..e049245f409 100644 --- a/arch/m68knommu/platform/5272/config.c +++ b/arch/m68knommu/platform/5272/config.c @@ -55,8 +55,39 @@ static struct platform_device m5272_uart = { .dev.platform_data = m5272_uart_platform, }; +static struct resource m5272_fec_resources[] = { + { + .start = MCF_MBAR + 0x840, + .end = MCF_MBAR + 0x840 + 0x1cf, + .flags = IORESOURCE_MEM, + }, + { + .start = 86, + .end = 86, + .flags = IORESOURCE_IRQ, + }, + { + .start = 87, + .end = 87, + .flags = IORESOURCE_IRQ, + }, + { + .start = 88, + .end = 88, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device m5272_fec = { + .name = "fec", + .id = 0, + .num_resources = ARRAY_SIZE(m5272_fec_resources), + .resource = m5272_fec_resources, +}; + static struct platform_device *m5272_devices[] __initdata = { &m5272_uart, + &m5272_fec, }; /***************************************************************************/ @@ -91,6 +122,22 @@ static void __init m5272_uarts_init(void) /***************************************************************************/ +static void __init m5272_fec_init(void) +{ + u32 imr; + + /* Unmask FEC interrupts at ColdFire interrupt controller */ + imr = readl(MCF_MBAR + MCFSIM_ICR3); + imr = (imr & ~0x00000fff) | 0x00000ddd; + writel(imr, MCF_MBAR + MCFSIM_ICR3); + + imr = readl(MCF_MBAR + MCFSIM_ICR1); + imr = (imr & ~0x0f000000) | 0x0d000000; + writel(imr, MCF_MBAR + MCFSIM_ICR1); +} + +/***************************************************************************/ + void mcf_disableall(void) { volatile unsigned long *icrp; @@ -155,6 +202,7 @@ void __init config_BSP(char *commandp, int size) static int __init init_BSP(void) { m5272_uarts_init(); + m5272_fec_init(); platform_add_devices(m5272_devices, ARRAY_SIZE(m5272_devices)); return 0; } diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c index 73cd1aef4a9..49343fb157b 100644 --- a/arch/m68knommu/platform/527x/config.c +++ b/arch/m68knommu/platform/527x/config.c @@ -50,8 +50,73 @@ static struct platform_device m527x_uart = { .dev.platform_data = m527x_uart_platform, }; +static struct resource m527x_fec0_resources[] = { + { + .start = MCF_MBAR + 0x1000, + .end = MCF_MBAR + 0x1000 + 0x7ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 64 + 23, + .end = 64 + 23, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 27, + .end = 64 + 27, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 29, + .end = 64 + 29, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource m527x_fec1_resources[] = { + { + .start = MCF_MBAR + 0x1800, + .end = MCF_MBAR + 0x1800 + 0x7ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 128 + 23, + .end = 128 + 23, + .flags = IORESOURCE_IRQ, + }, + { + .start = 128 + 27, + .end = 128 + 27, + .flags = IORESOURCE_IRQ, + }, + { + .start = 128 + 29, + .end = 128 + 29, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device m527x_fec[] = { + { + .name = "fec", + .id = 0, + .num_resources = ARRAY_SIZE(m527x_fec0_resources), + .resource = m527x_fec0_resources, + }, + { + .name = "fec", + .id = 1, + .num_resources = ARRAY_SIZE(m527x_fec1_resources), + .resource = m527x_fec1_resources, + }, +}; + static struct platform_device *m527x_devices[] __initdata = { &m527x_uart, + &m527x_fec[0], +#ifdef CONFIG_FEC2 + &m527x_fec[1], +#endif }; /***************************************************************************/ @@ -97,6 +162,51 @@ static void __init m527x_uarts_init(void) /***************************************************************************/ +static void __init m527x_fec_irq_init(int nr) +{ + unsigned long base; + u32 imr; + + base = MCF_IPSBAR + (nr ? MCFICM_INTC1 : MCFICM_INTC0); + + writeb(0x28, base + MCFINTC_ICR0 + 23); + writeb(0x27, base + MCFINTC_ICR0 + 27); + writeb(0x26, base + MCFINTC_ICR0 + 29); + + imr = readl(base + MCFINTC_IMRH); + imr &= ~0xf; + writel(imr, base + MCFINTC_IMRH); + imr = readl(base + MCFINTC_IMRL); + imr &= ~0xff800001; + writel(imr, base + MCFINTC_IMRL); +} + +static void __init m527x_fec_init(void) +{ + u16 par; + u8 v; + + m527x_fec_irq_init(0); + + /* Set multi-function pins to ethernet mode for fec0 */ + par = readw(MCF_IPSBAR + 0x100082); + writew(par | 0xf00, MCF_IPSBAR + 0x100082); + v = readb(MCF_IPSBAR + 0x100078); + writeb(v | 0xc0, MCF_IPSBAR + 0x100078); + +#ifdef CONFIG_FEC2 + m527x_fec_irq_init(1); + + /* Set multi-function pins to ethernet mode for fec1 */ + par = readw(MCF_IPSBAR + 0x100082); + writew(par | 0xa0, MCF_IPSBAR + 0x100082); + v = readb(MCF_IPSBAR + 0x100079); + writeb(v | 0xc0, MCF_IPSBAR + 0x100079); +#endif +} + +/***************************************************************************/ + void mcf_disableall(void) { *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH)) = 0xffffffff; @@ -116,13 +226,14 @@ void __init config_BSP(char *commandp, int size) { mcf_disableall(); mach_reset = coldfire_reset; + m527x_uarts_init(); + m527x_fec_init(); } /***************************************************************************/ static int __init init_BSP(void) { - m527x_uarts_init(); platform_add_devices(m527x_devices, ARRAY_SIZE(m527x_devices)); return 0; } diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c index dfdb5c2ed8e..2ffb549876f 100644 --- a/arch/m68knommu/platform/528x/config.c +++ b/arch/m68knommu/platform/528x/config.c @@ -285,8 +285,40 @@ static struct platform_device m528x_uart = { .dev.platform_data = m528x_uart_platform, }; +static struct resource m528x_fec_resources[] = { + { + .start = MCF_MBAR + 0x1000, + .end = MCF_MBAR + 0x1000 + 0x7ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 64 + 23, + .end = 64 + 23, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 27, + .end = 64 + 27, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 29, + .end = 64 + 29, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device m528x_fec = { + .name = "fec", + .id = 0, + .num_resources = ARRAY_SIZE(m528x_fec_resources), + .resource = m528x_fec_resources, +}; + + static struct platform_device *m528x_devices[] __initdata = { &m528x_uart, + &m528x_fec, }; /***************************************************************************/ @@ -327,6 +359,31 @@ static void __init m528x_uarts_init(void) /***************************************************************************/ +static void __init m528x_fec_init(void) +{ + u32 imr; + u16 v16; + + /* Unmask FEC interrupts at ColdFire interrupt controller */ + writeb(0x28, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 23); + writeb(0x27, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 27); + writeb(0x26, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 29); + + imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH); + imr &= ~0xf; + writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH); + imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL); + imr &= ~0xff800001; + writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL); + + /* Set multi-function pins to ethernet mode for fec0 */ + v16 = readw(MCF_IPSBAR + 0x100056); + writew(v16 | 0xf00, MCF_IPSBAR + 0x100056); + writeb(0xc0, MCF_IPSBAR + 0x100058); +} + +/***************************************************************************/ + void mcf_disableall(void) { *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH)) = 0xffffffff; @@ -386,6 +443,7 @@ void __init config_BSP(char *commandp, int size) static int __init init_BSP(void) { m528x_uarts_init(); + m528x_fec_init(); platform_add_devices(m528x_devices, ARRAY_SIZE(m528x_devices)); return 0; } diff --git a/arch/m68knommu/platform/532x/config.c b/arch/m68knommu/platform/532x/config.c index a347623d6ee..591f2f80113 100644 --- a/arch/m68knommu/platform/532x/config.c +++ b/arch/m68knommu/platform/532x/config.c @@ -61,8 +61,38 @@ static struct platform_device m532x_uart = { .dev.platform_data = m532x_uart_platform, }; +static struct resource m532x_fec_resources[] = { + { + .start = 0xfc030000, + .end = 0xfc0307ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 64 + 36, + .end = 64 + 36, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 40, + .end = 64 + 40, + .flags = IORESOURCE_IRQ, + }, + { + .start = 64 + 42, + .end = 64 + 42, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device m532x_fec = { + .name = "fec", + .id = 0, + .num_resources = ARRAY_SIZE(m532x_fec_resources), + .resource = m532x_fec_resources, +}; static struct platform_device *m532x_devices[] __initdata = { &m532x_uart, + &m532x_fec, }; /***************************************************************************/ @@ -93,6 +123,24 @@ static void __init m532x_uarts_init(void) for (line = 0; (line < nrlines); line++) m532x_uart_init_line(line, m532x_uart_platform[line].irq); } +/***************************************************************************/ + +static void __init m532x_fec_init(void) +{ + /* Unmask FEC interrupts at ColdFire interrupt controller */ + MCF_INTC0_ICR36 = 0x2; + MCF_INTC0_ICR40 = 0x2; + MCF_INTC0_ICR42 = 0x2; + + MCF_INTC0_IMRH &= ~(MCF_INTC_IMRH_INT_MASK36 | + MCF_INTC_IMRH_INT_MASK40 | MCF_INTC_IMRH_INT_MASK42); + + /* Set multi-function pins to ethernet mode for fec0 */ + MCF_GPIO_PAR_FECI2C |= (MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC | + MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO); + MCF_GPIO_PAR_FEC = (MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC | + MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC); +} /***************************************************************************/ @@ -150,6 +198,7 @@ void __init config_BSP(char *commandp, int size) static int __init init_BSP(void) { m532x_uarts_init(); + m532x_fec_init(); platform_add_devices(m532x_devices, ARRAY_SIZE(m532x_devices)); return 0; } -- cgit v1.2.3 From 1d015cf02a1fd46385c03cf3ce8958dbea705dd3 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 23 Feb 2009 07:14:02 +0000 Subject: sh: shared register saving code for sh3/sh4/sh4a This patch reworks the sh3/sh4/sh4a register saving code in the following ways: - break out prepare_stack_save_dsp() from handle_exception() - break out save_regs() from handle_exception() - the register saving order is unchanged - align new functions to fit in cache lines - separate exception code from interrupt code - keep main code flow in a single cache line per exception vector - use bsr/rts for regular functions (save pr first) - keep data in one shared cache line (exception_data) - document the functions - tie in the hp6xx code Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-hp6xx/pm_wakeup.S | 31 +--- arch/sh/kernel/cpu/sh3/entry.S | 273 ++++++++++++++++++---------------- 2 files changed, 155 insertions(+), 149 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-hp6xx/pm_wakeup.S b/arch/sh/boards/mach-hp6xx/pm_wakeup.S index 44b648cf6f2..4f18d44e054 100644 --- a/arch/sh/boards/mach-hp6xx/pm_wakeup.S +++ b/arch/sh/boards/mach-hp6xx/pm_wakeup.S @@ -10,47 +10,32 @@ #include #include -#define k0 r0 -#define k1 r1 -#define k2 r2 -#define k3 r3 -#define k4 r4 - /* * Kernel mode register usage: * k0 scratch * k1 scratch - * k2 scratch (Exception code) - * k3 scratch (Return address) - * k4 scratch - * k5 reserved - * k6 Global Interrupt Mask (0--15 << 4) - * k7 CURRENT_THREAD_INFO (pointer to current thread info) + * For more details, please have a look at entry.S */ +#define k0 r0 +#define k1 r1 + ENTRY(wakeup_start) ! clear STBY bit - mov #-126, k2 + mov #-126, k1 and #127, k0 - mov.b k0, @k2 + mov.b k0, @k1 ! enable refresh mov.l 5f, k1 mov.w 6f, k0 mov.w k0, @k1 ! jump to handler - mov.l 2f, k2 - mov.l 3f, k3 - mov.l @k2, k2 - mov.l 4f, k1 jmp @k1 - nop + nop .align 2 -1: .long EXPEVT -2: .long INTEVT -3: .long ret_from_irq -4: .long handle_exception +4: .long handle_interrupt 5: .long 0xffffff68 6: .word 0x0524 diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index b4106d0c68e..f3db143e70b 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -16,6 +16,7 @@ #include #include #include +#include ! NOTE: ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address @@ -294,7 +295,7 @@ skip_restore: mov #0xf0, k1 extu.b k1, k1 not k1, k1 - and k1, k2 ! Mask orignal SR value + and k1, k2 ! Mask original SR value ! mov k3, k0 ! Calculate IMASK-bits shlr2 k0 @@ -335,82 +336,54 @@ skip_restore: .balign 4096,0,4096 ENTRY(vbr_base) .long 0 +! +! 0x100: General exception vector ! .balign 256,0,256 general_exception: - mov.l 1f, k2 - mov.l 2f, k3 -#ifdef CONFIG_CPU_SUBTYPE_SHX3 - mov.l @k2, k2 +#ifndef CONFIG_CPU_SUBTYPE_SHX3 + bra handle_exception + sts pr, k3 ! save original pr value in k3 +#else + mov.l 1f, k4 + mov.l @k4, k4 ! Is EXPEVT larger than 0x800? mov #0x8, k0 shll8 k0 - cmp/hs k0, k2 + cmp/hs k0, k4 bf 0f ! then add 0x580 (k2 is 0xd80 or 0xda0) mov #0x58, k0 shll2 k0 shll2 k0 - add k0, k2 + add k0, k4 0: - bra handle_exception + ! Setup stack and save DSP context (k0 contains original r15 on return) + bsr prepare_stack_save_dsp nop -#else - bra handle_exception - mov.l @k2, k2 -#endif - .align 2 -1: .long EXPEVT -2: .long ret_from_exception -! -! - .balign 1024,0,1024 -tlb_miss: - mov.l 1f, k2 - mov.l 4f, k3 - bra handle_exception - mov.l @k2, k2 -! - .balign 512,0,512 -interrupt: - mov.l 3f, k3 -#if defined(CONFIG_KGDB) - mov.l 2f, k2 - ! Debounce (filter nested NMI) - mov.l @k2, k0 - mov.l 5f, k1 - cmp/eq k1, k0 - bf 0f - mov.l 6f, k1 - tas.b @k1 - bt 0f - rte + ! Save registers / Switch to bank 0 + bsr save_regs ! needs original pr value in k3 + mov.l k4, k2 ! keep vector in k2 + + bra handle_exception_special nop - .align 2 -2: .long INTEVT -5: .long NMI_VEC -6: .long in_nmi -0: -#endif /* defined(CONFIG_KGDB) */ - bra handle_exception - mov #-1, k2 ! interrupt exception marker .align 2 1: .long EXPEVT -3: .long ret_from_irq -4: .long ret_from_exception +#endif -! -! - .align 2 -ENTRY(handle_exception) - ! Using k0, k1 for scratch registers (r0_bank1, r1_bank), - ! save all registers onto stack. - ! +! prepare_stack_save_dsp() +! - roll back gRB +! - switch to kernel stack +! - save DSP +! k0 returns original sp (after roll back) +! k1 trashed +! k2 trashed +prepare_stack_save_dsp: #ifdef CONFIG_GUSA ! Check for roll back gRB (User and Kernel) mov r15, k0 @@ -430,7 +403,7 @@ ENTRY(handle_exception) 2: mov k1, r15 ! SP = r1 1: #endif - + ! Switch to kernel stack if needed stc ssr, k0 ! Is it from kernel space? shll k0 ! Check MD bit (bit30) by shifting it into... shll k0 ! ...the T bit @@ -443,18 +416,17 @@ ENTRY(handle_exception) add current, k1 mov k1, r15 ! change to kernel stack ! -1: mov.l 2f, k1 - ! +1: #ifdef CONFIG_SH_DSP - mov.l r2, @-r15 ! Save r2, we need another reg - stc sr, k4 - mov.l 1f, r2 - tst r2, k4 ! Check if in DSP mode - mov.l @r15+, r2 ! Restore r2 now + ! Save DSP context if needed + stc sr, k1 + mov #0x10, k2 + shll8 k2 ! DSP=1 (0x00001000) + tst k2, k1 ! Check if in DSP mode (passed in k2) bt/s skip_save - mov #0, k4 ! Set marker for no stack frame + mov #0, k1 ! Set marker for no stack frame - mov r2, k4 ! Backup r2 (in k4) for later + mov k2, k1 ! Save has-frame marker ! Save DSP registers on stack stc.l mod, @-r15 @@ -473,35 +445,73 @@ ENTRY(handle_exception) ! as we're not at all interested in supporting ancient toolchains at ! this point. -- PFM. - mov r15, r2 + mov r15, k2 .word 0xf653 ! movs.l a1, @-r2 .word 0xf6f3 ! movs.l a0g, @-r2 .word 0xf6d3 ! movs.l a1g, @-r2 .word 0xf6c3 ! movs.l m0, @-r2 .word 0xf6e3 ! movs.l m1, @-r2 - mov r2, r15 + mov k2, r15 - mov k4, r2 ! Restore r2 - mov.l 1f, k4 ! Force DSP stack frame skip_save: - mov.l k4, @-r15 ! Push DSP mode marker onto stack + mov.l k1, @-r15 ! Push DSP mode marker onto stack #endif - ! Save the user registers on the stack. - mov.l k2, @-r15 ! EXPEVT + rts + nop +! +! 0x400: Instruction and Data TLB miss exception vector +! + .balign 1024,0,1024 +tlb_miss: + sts pr, k3 ! save original pr value in k3 - mov #-1, k4 - mov.l k4, @-r15 ! set TRA (default: -1) - ! +handle_exception: + ! Setup stack and save DSP context (k0 contains original r15 on return) + bsr prepare_stack_save_dsp + nop + + ! Save registers / Switch to bank 0 + mov.l 5f, k2 ! vector register address + bsr save_regs ! needs original pr value in k3 + mov.l @k2, k2 ! read out vector and keep in k2 + +handle_exception_special: + ! Setup return address and jump to exception handler + mov.l 7f, r9 ! fetch return address + stc r2_bank, r0 ! k2 (vector) + mov.l 6f, r10 + shlr2 r0 + shlr r0 + mov.l @(r0, r10), r10 + jmp @r10 + lds r9, pr ! put return address in pr + + .align L1_CACHE_SHIFT + +! save_regs() +! - save vector, default tra, macl, mach, gbr, ssr, pr* and spc on the stack +! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack +! - switch bank +! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack +! k0 contains original stack pointer* +! k1 trashed +! k2 passes vector (EXPEVT) +! k3 passes original pr* +! k4 trashed +! BL=1 on entry, on exit BL=0. + +save_regs: + mov #-1, r1 + mov.l k2, @-r15 ! vector in k2 + mov.l k1, @-r15 ! set TRA (default: -1) sts.l macl, @-r15 sts.l mach, @-r15 stc.l gbr, @-r15 stc.l ssr, @-r15 - sts.l pr, @-r15 + mov.l k3, @-r15 ! original pr in k3 stc.l spc, @-r15 - ! - lds k3, pr ! Set the return address to pr - ! - mov.l k0, @-r15 ! save orignal stack + + mov.l k0, @-r15 ! original stack pointer in k0 mov.l r14, @-r15 mov.l r13, @-r15 mov.l r12, @-r15 @@ -509,13 +519,15 @@ skip_save: mov.l r10, @-r15 mov.l r9, @-r15 mov.l r8, @-r15 - ! - stc sr, r8 ! Back to normal register bank, and - or k1, r8 ! Block all interrupts - mov.l 3f, k1 - and k1, r8 ! ... - ldc r8, sr ! ...changed here. - ! + + mov.l 0f, k3 ! SR bits to set in k3 + mov.l 1f, k4 ! SR bits to clear in k4 + + stc sr, r8 + or k3, r8 + and k4, r8 + ldc r8, sr + mov.l r7, @-r15 mov.l r6, @-r15 mov.l r5, @-r15 @@ -523,52 +535,61 @@ skip_save: mov.l r3, @-r15 mov.l r2, @-r15 mov.l r1, @-r15 - mov.l r0, @-r15 - - /* - * This gets a bit tricky.. in the INTEVT case we don't want to use - * the VBR offset as a destination in the jump call table, since all - * of the destinations are the same. In this case, (interrupt) sets - * a marker in r2 (now r2_bank since SR.RB changed), which we check - * to determine the exception type. For all other exceptions, we - * forcibly read EXPEVT from memory and fix up the jump address, in - * the interrupt exception case we jump to do_IRQ() and defer the - * INTEVT read until there. As a bonus, we can also clean up the SR.RB - * checks that do_IRQ() was doing.. - */ - stc r2_bank, r8 - cmp/pz r8 - bf interrupt_exception - shlr2 r8 - shlr r8 - mov.l 4f, r9 - add r8, r9 - mov.l @r9, r9 - jmp @r9 - nop rts - nop + mov.l r0, @-r15 +! +! 0x600: Interrupt / NMI vector +! + .balign 512,0,512 +ENTRY(handle_interrupt) +#if defined(CONFIG_KGDB) + mov.l 2f, k2 + ! Debounce (filter nested NMI) + mov.l @k2, k0 + mov.l 9f, k1 + cmp/eq k1, k0 + bf 11f + mov.l 10f, k1 + tas.b @k1 + bt 11f + rte + nop .align 2 -1: .long 0x00001000 ! DSP=1 -2: .long 0x000080f0 ! FD=1, IMASK=15 -3: .long 0xcfffffff ! RB=0, BL=0 -4: .long exception_handling_table +9: .long NMI_VEC +10: .long in_nmi +11: +#endif /* defined(CONFIG_KGDB) */ + sts pr, k3 ! save original pr value in k3 -interrupt_exception: - mov.l 1f, r9 - mov.l 2f, r4 - mov.l @r4, r4 - jmp @r9 - mov r15, r5 - rts + ! Setup stack and save DSP context (k0 contains original r15 on return) + bsr prepare_stack_save_dsp nop - .align 2 -1: .long do_IRQ -2: .long INTEVT + ! Save registers / Switch to bank 0 + bsr save_regs ! needs original pr value in k3 + mov #-1, k2 ! default vector kept in k2 + + ! Setup return address and jump to do_IRQ + mov.l 4f, r9 ! fetch return address + lds r9, pr ! put return address in pr + mov.l 2f, r4 + mov.l 3f, r9 + mov.l @r4, r4 ! pass INTEVT vector as arg0 + jmp @r9 + mov r15, r5 ! pass saved registers as arg1 - .align 2 ENTRY(exception_none) rts nop + + .align L1_CACHE_SHIFT +exception_data: +0: .long 0x000080f0 ! FD=1, IMASK=15 +1: .long 0xcfffffff ! RB=0, BL=0 +2: .long INTEVT +3: .long do_IRQ +4: .long ret_from_irq +5: .long EXPEVT +6: .long exception_handling_table +7: .long ret_from_exception -- cgit v1.2.3 From 1dd22722f6bf9be9821657a2d59fae4d4365fb32 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 23 Feb 2009 07:15:07 +0000 Subject: sh: rework register restore code for sh3/sh4/sh4a This patch reworks the sh3/sh4/sh4a register restore code in the following ways: - break out restore_regs() from restore_all() - the register saving order is unchanged - use restore_regs() in sh_bios_handler and restore_all - document the function Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 68 ++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index f3db143e70b..cbffbffce35 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -188,44 +188,35 @@ call_dae: #if defined(CONFIG_SH_STANDARD_BIOS) /* Unwind the stack and jmp to the debug entry */ ENTRY(sh_bios_handler) - mov.l @r15+, r0 - mov.l @r15+, r1 - mov.l @r15+, r2 - mov.l @r15+, r3 - mov.l @r15+, r4 - mov.l @r15+, r5 - mov.l @r15+, r6 - mov.l @r15+, r7 - stc sr, r8 - mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F - or r9, r8 - ldc r8, sr ! here, change the register bank - mov.l @r15+, r8 - mov.l @r15+, r9 - mov.l @r15+, r10 - mov.l @r15+, r11 - mov.l @r15+, r12 - mov.l @r15+, r13 - mov.l @r15+, r14 - mov.l @r15+, k0 - ldc.l @r15+, spc - lds.l @r15+, pr - mov.l @r15+, k1 - ldc.l @r15+, gbr - lds.l @r15+, mach - lds.l @r15+, macl - mov k0, r15 + mov.l 1f, r8 + bsr restore_regs + nop + + lds k2, pr ! restore pr + mov k4, r15 ! mov.l 2f, k0 mov.l @k0, k0 jmp @k0 - ldc k1, ssr + ldc k3, ssr .align 2 1: .long 0x300000f0 2: .long gdb_vbr_vector #endif /* CONFIG_SH_STANDARD_BIOS */ -restore_all: +! restore_regs() +! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack +! - switch bank +! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack +! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra +! k2 returns original pr +! k3 returns original sr +! k4 returns original stack pointer +! r8 passes SR bitmask, overwritten with restored data on return +! r9 trashed +! BL=0 on entry, on exit BL=1 (depending on r8). + +restore_regs: mov.l @r15+, r0 mov.l @r15+, r1 mov.l @r15+, r2 @@ -235,10 +226,9 @@ restore_all: mov.l @r15+, r6 mov.l @r15+, r7 ! - stc sr, r8 - mov.l 7f, r9 - or r9, r8 ! BL =1, RB=1 - ldc r8, sr ! here, change the register bank + stc sr, r9 + or r8, r9 + ldc r9, sr ! mov.l @r15+, r8 mov.l @r15+, r9 @@ -249,12 +239,20 @@ restore_all: mov.l @r15+, r14 mov.l @r15+, k4 ! original stack pointer ldc.l @r15+, spc - lds.l @r15+, pr + mov.l @r15+, k2 ! original PR mov.l @r15+, k3 ! original SR ldc.l @r15+, gbr lds.l @r15+, mach lds.l @r15+, macl - add #4, r15 ! Skip syscall number + rts + add #4, r15 ! Skip syscall number + +restore_all: + mov.l 7f, r8 + bsr restore_regs + nop + + lds k2, pr ! restore pr ! #ifdef CONFIG_SH_DSP mov.l @r15+, k0 ! DSP mode marker -- cgit v1.2.3 From 4f099ebb27211d378304ddcfa507097f5128f5b9 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 23 Feb 2009 07:16:34 +0000 Subject: sh: remove EXPEVT vector from stack on sh3/sh4/sh4a Remove EXPEVT vector from the stack, lookup_exception_vector() for sh3/sh4/sh4a is already using k2 to get the vector. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/ptrace.h | 8 +++----- arch/sh/kernel/cpu/sh3/entry.S | 5 +---- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index 12912ab80c1..81c6568fdb3 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -122,14 +122,12 @@ extern void user_disable_single_step(struct task_struct *); #ifdef CONFIG_SH_DSP #define task_pt_regs(task) \ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ - - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1) + - sizeof(struct pt_dspregs)) - 1) #define task_pt_dspregs(task) \ - ((struct pt_dspregs *) (task_stack_page(task) + THREAD_SIZE \ - - sizeof(unsigned long)) - 1) + ((struct pt_dspregs *) (task_stack_page(task) + THREAD_SIZE) - 1) #else #define task_pt_regs(task) \ - ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ - - sizeof(unsigned long)) - 1) + ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1) #endif static inline unsigned long profile_pc(struct pt_regs *regs) diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index cbffbffce35..0271fe08de2 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -312,7 +312,6 @@ skip_restore: mov #0, k1 mov.b k1, @k0 #endif - mov.l @r15+, k2 ! restore EXPEVT mov k4, r15 rte nop @@ -487,20 +486,18 @@ handle_exception_special: .align L1_CACHE_SHIFT ! save_regs() -! - save vector, default tra, macl, mach, gbr, ssr, pr* and spc on the stack +! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack ! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack ! - switch bank ! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack ! k0 contains original stack pointer* ! k1 trashed -! k2 passes vector (EXPEVT) ! k3 passes original pr* ! k4 trashed ! BL=1 on entry, on exit BL=0. save_regs: mov #-1, r1 - mov.l k2, @-r15 ! vector in k2 mov.l k1, @-r15 ! set TRA (default: -1) sts.l macl, @-r15 sts.l mach, @-r15 -- cgit v1.2.3 From 0197f21ca5c5ed0df2a14a60ef073e8163e6533b Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 27 Feb 2009 16:41:17 +0900 Subject: sh: prefetch early exception data on sh4/sh4a. Prefetch early exception data. There is unused space in our exception handler cache line anyway, so this is almost free. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/entry-macros.S | 5 +++++ arch/sh/kernel/cpu/sh3/entry.S | 7 +++++-- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S index 2dab0b8d945..3a4752a6572 100644 --- a/arch/sh/include/asm/entry-macros.S +++ b/arch/sh/include/asm/entry-macros.S @@ -31,3 +31,8 @@ #endif .endm +#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4) +# define PREF(x) pref @x +#else +# define PREF(x) nop +#endif diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 0271fe08de2..e984e94394e 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -463,9 +463,11 @@ tlb_miss: sts pr, k3 ! save original pr value in k3 handle_exception: + mova exception_data, k0 + ! Setup stack and save DSP context (k0 contains original r15 on return) bsr prepare_stack_save_dsp - nop + PREF(k0) ! Save registers / Switch to bank 0 mov.l 5f, k2 ! vector register address @@ -556,10 +558,11 @@ ENTRY(handle_interrupt) 11: #endif /* defined(CONFIG_KGDB) */ sts pr, k3 ! save original pr value in k3 + mova exception_data, k0 ! Setup stack and save DSP context (k0 contains original r15 on return) bsr prepare_stack_save_dsp - nop + PREF(k0) ! Save registers / Switch to bank 0 bsr save_regs ! needs original pr value in k3 -- cgit v1.2.3 From a73090ffaf0f6853880d9ac3fff7e5d88215131a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 27 Feb 2009 16:42:05 +0900 Subject: sh: Disable unsupportable prefetching on SH-3. The SH-3 does not support 'pref'-based prefetching, only SH-2A and SH-4A parts do. Remove SH-3 from the list. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor_32.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index dcaed24c71f..efdd78a53b1 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -191,8 +191,7 @@ extern unsigned long get_wchan(struct task_struct *p); #define user_stack_pointer(_regs) ((_regs)->regs[15]) -#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ - defined(CONFIG_CPU_SH4) +#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4) #define PREFETCH_STRIDE L1_CACHE_BYTES #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW -- cgit v1.2.3 From 3e91faec47e9e12b965c952d698b0bb64847af06 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 24 Feb 2009 22:23:51 +0900 Subject: sh: fix P4 iounmap() pass-through Fix iounmap() of pass-through P4 addresses. Without this patch iounmap() on the sh7780 rtc area results in a warning message. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 32946fba123..8dc77026a0b 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -119,7 +119,7 @@ void __iounmap(void __iomem *addr) unsigned long seg = PXSEG(vaddr); struct vm_struct *p; - if (seg < P3SEG || seg >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) + if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) return; #ifdef CONFIG_32BIT -- cgit v1.2.3 From bdaa6e8062d7f8085d8ed94ff88c99406ad53d79 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 24 Feb 2009 22:58:57 +0900 Subject: sh: multiple vectors per irq - base Instead of keeping the single vector -> single linux irq mapping we extend the intc code to support merging of vectors to a single linux irq. This helps processors such as sh7750, sh7780 and sh7785 which have more vectors than masking ability. With this patch in place we can modify the intc tables to use one irq per maskable irq source. Please note the following: - If multiple vectors share the same enum then only the first vector will be available as a linux irq. - Drivers may need to be rewritten to get pending irq source from the hardware block instead of irq number. This patch together with the sh7785 specific intc tables solves DMA controller irq issues related to buggy interrupt masking. Reported-by: Yoshihiro Shimoda Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 64b7690c664..90d63aefd27 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -106,7 +106,7 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) } #endif - irq = irq_demux(evt2irq(irq)); + irq = irq_demux(intc_evt2irq(irq)); #ifdef CONFIG_IRQSTACKS curctx = (union irq_ctx *)current_thread_info(); -- cgit v1.2.3 From 69977e7e25a291fd71c6dcaf2c5ea9e776afede5 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 24 Feb 2009 22:59:04 +0900 Subject: sh: multiple vectors per irq - sh7750 Update intc tables and platform data to use one linux irq per maskable interrupt source instead of keeping the one-to-one mapping between vectors and linux irqs. This fixes potential irq masking issues for sh775x hardware blocks such as SCI/SCIF/RTC/DMAC/TMU2/REF. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4/setup-sh7750.c | 87 ++++++++++------------------------- 1 file changed, 25 insertions(+), 62 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index ec884039b91..a1c80d909cd 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -21,17 +21,7 @@ static struct resource rtc_resources[] = { .flags = IORESOURCE_IO, }, [1] = { - /* Period IRQ */ - .start = 21, - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* Carry IRQ */ - .start = 22, - .flags = IORESOURCE_IRQ, - }, - [3] = { - /* Alarm IRQ */ + /* Shared Period/Carry/Alarm IRQ */ .start = 20, .flags = IORESOURCE_IRQ, }, @@ -50,13 +40,13 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCI, - .irqs = { 23, 24, 25, 0 }, + .irqs = { 23, 23, 23, 0 }, }, { #endif .mapbase = 0xffe80000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 40, 41, 43, 42 }, + .irqs = { 40, 40, 40, 40 }, }, { .flags = 0, } @@ -87,43 +77,27 @@ enum { /* interrupt sources */ IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */ - HUDI, GPIOI, - DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3, - DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7, - DMAC_DMAE, + HUDI, GPIOI, DMAC, PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3, - TMU3, TMU4, TMU0, TMU1, TMU2_TUNI, TMU2_TICPI, - RTC_ATI, RTC_PRI, RTC_CUI, - SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI, - SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI, - WDT, - REF_RCMI, REF_ROVI, + TMU3, TMU4, TMU0, TMU1, TMU2, RTC, SCI1, SCIF, WDT, REF, /* interrupt groups */ - DMAC, PCIC1, TMU2, RTC, SCI1, SCIF, REF, + PCIC1, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), - INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), - INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), - INTC_VECT(RTC_CUI, 0x4c0), - INTC_VECT(SCI1_ERI, 0x4e0), INTC_VECT(SCI1_RXI, 0x500), - INTC_VECT(SCI1_TXI, 0x520), INTC_VECT(SCI1_TEI, 0x540), - INTC_VECT(SCIF_ERI, 0x700), INTC_VECT(SCIF_RXI, 0x720), - INTC_VECT(SCIF_BRI, 0x740), INTC_VECT(SCIF_TXI, 0x760), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(SCI1, 0x4e0), INTC_VECT(SCI1, 0x500), + INTC_VECT(SCI1, 0x520), INTC_VECT(SCI1, 0x540), + INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720), + INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760), INTC_VECT(WDT, 0x560), - INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0), -}; - -static struct intc_group groups[] __initdata = { - INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI), - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), - INTC_GROUP(SCI1, SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI), - INTC_GROUP(SCIF, SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI), - INTC_GROUP(REF, REF_RCMI, REF_ROVI), + INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0), }; static struct intc_prio_reg prio_registers[] __initdata = { @@ -136,7 +110,7 @@ static struct intc_prio_reg prio_registers[] __initdata = { PCIC1, PCIC0_PCISERR } }, }; -static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups, +static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, NULL, NULL, prio_registers, NULL); /* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */ @@ -145,39 +119,28 @@ static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups, defined(CONFIG_CPU_SUBTYPE_SH7751) || \ defined(CONFIG_CPU_SUBTYPE_SH7091) static struct intc_vect vectors_dma4[] __initdata = { - INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), - INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), - INTC_VECT(DMAC_DMAE, 0x6c0), -}; - -static struct intc_group groups_dma4[] __initdata = { - INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, - DMAC_DMTE3, DMAC_DMAE), + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x6c0), }; static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4", - vectors_dma4, groups_dma4, + vectors_dma4, NULL, NULL, prio_registers, NULL); #endif /* SH7750R and SH7751R both have 8-channel DMA controllers */ #if defined(CONFIG_CPU_SUBTYPE_SH7750R) || defined(CONFIG_CPU_SUBTYPE_SH7751R) static struct intc_vect vectors_dma8[] __initdata = { - INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), - INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), - INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0), - INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0), - INTC_VECT(DMAC_DMAE, 0x6c0), -}; - -static struct intc_group groups_dma8[] __initdata = { - INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, - DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5, - DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE), + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), + INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0), + INTC_VECT(DMAC, 0x6c0), }; static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8", - vectors_dma8, groups_dma8, + vectors_dma8, NULL, NULL, prio_registers, NULL); #endif -- cgit v1.2.3 From a842fb2d11ee478dc2fd09b736b1bc62c386f18a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 24 Feb 2009 22:59:12 +0900 Subject: sh: multiple vectors per irq - sh7780 Update intc tables and platform data to use one linux irq per maskable interrupt source instead of keeping the one-to-one mapping between vectors and linux irqs. This fixes potential irq masking issues for sh7780 hardware blocks such as SCIF/RTC/DMAC/PCIC5/MMCIF/FLCTL/GPIO Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7780.c | 97 +++++++++++----------------------- 1 file changed, 30 insertions(+), 67 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index fb8200cc744..6f7227cd65b 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -20,17 +20,7 @@ static struct resource rtc_resources[] = { .flags = IORESOURCE_IO, }, [1] = { - /* Period IRQ */ - .start = 21, - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* Carry IRQ */ - .start = 22, - .flags = IORESOURCE_IRQ, - }, - [3] = { - /* Alarm IRQ */ + /* Shared Period/Carry/Alarm IRQ */ .start = 20, .flags = IORESOURCE_IRQ, }, @@ -48,12 +38,12 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 40, 41, 43, 42 }, + .irqs = { 40, 40, 40, 40 }, }, { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 76, 77, 79, 78 }, + .irqs = { 76, 76, 76, 76 }, }, { .flags = 0, } @@ -90,82 +80,55 @@ enum { IRL_HHLL, IRL_HHLH, IRL_HHHL, IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, - RTC_ATI, RTC_PRI, RTC_CUI, - WDT, - TMU0, TMU1, TMU2, TMU2_TICPI, - HUDI, - DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE, - SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, - DMAC0_DMINT4, DMAC0_DMINT5, DMAC1_DMINT6, DMAC1_DMINT7, - CMT, HAC, - PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, - PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0, - SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, - SIOF, HSPI, - MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY, - DMAC1_DMINT8, DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11, - TMU3, TMU4, TMU5, - SSI, - FLCTL_FLSTE, FLCTL_FLEND, FLCTL_FLTRQ0, FLCTL_FLTRQ1, - GPIOI0, GPIOI1, GPIOI2, GPIOI3, + RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI, + HUDI, DMAC0, SCIF0, DMAC1, CMT, HAC, + PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5, + SCIF1, SIOF, HSPI, MMCIF, TMU3, TMU4, TMU5, SSI, FLCTL, GPIO, /* interrupt groups */ - RTC, TMU012, DMAC0, SCIF0, DMAC45, DMAC1, - PCIC5, SCIF1, MMCIF, TMU345, FLCTL, GPIO, + TMU012, TMU345, }; static struct intc_vect vectors[] __initdata = { - INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), - INTC_VECT(RTC_CUI, 0x4c0), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600), - INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660), - INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0), - INTC_VECT(DMAC0_DMAE, 0x6c0), - INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720), - INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), - INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0), - INTC_VECT(DMAC1_DMINT6, 0x7c0), INTC_VECT(DMAC1_DMINT7, 0x7e0), + INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660), + INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0), + INTC_VECT(DMAC0, 0x6c0), + INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720), + INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760), + INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0), + INTC_VECT(DMAC1, 0x7c0), INTC_VECT(DMAC1, 0x7e0), INTC_VECT(CMT, 0x900), INTC_VECT(HAC, 0x980), INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20), INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60), - INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0), - INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0), - INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20), - INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0), - INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0), + INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0), + INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0), + INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20), + INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0), + INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0), INTC_VECT(SIOF, 0xc00), INTC_VECT(HSPI, 0xc80), - INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20), - INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60), - INTC_VECT(DMAC1_DMINT8, 0xd80), INTC_VECT(DMAC1_DMINT9, 0xda0), - INTC_VECT(DMAC1_DMINT10, 0xdc0), INTC_VECT(DMAC1_DMINT11, 0xde0), + INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20), + INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60), + INTC_VECT(DMAC1, 0xd80), INTC_VECT(DMAC1, 0xda0), + INTC_VECT(DMAC1, 0xdc0), INTC_VECT(DMAC1, 0xde0), INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), INTC_VECT(TMU5, 0xe40), INTC_VECT(SSI, 0xe80), - INTC_VECT(FLCTL_FLSTE, 0xf00), INTC_VECT(FLCTL_FLEND, 0xf20), - INTC_VECT(FLCTL_FLTRQ0, 0xf40), INTC_VECT(FLCTL_FLTRQ1, 0xf60), - INTC_VECT(GPIOI0, 0xf80), INTC_VECT(GPIOI1, 0xfa0), - INTC_VECT(GPIOI2, 0xfc0), INTC_VECT(GPIOI3, 0xfe0), + INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20), + INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60), + INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0), + INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0), }; static struct intc_group groups[] __initdata = { - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), - INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, - DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), - INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), - INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, - DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11), - INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0), - INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), - INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY), INTC_GROUP(TMU345, TMU3, TMU4, TMU5), - INTC_GROUP(FLCTL, FLCTL_FLSTE, FLCTL_FLEND, - FLCTL_FLTRQ0, FLCTL_FLTRQ1), - INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3), }; static struct intc_mask_reg mask_registers[] __initdata = { -- cgit v1.2.3 From 57e41c86e21c03941d17df29e0793fd04585d9ee Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 24 Feb 2009 22:59:19 +0900 Subject: sh: multiple vectors per irq - sh7785 Update intc tables and platform data to use one linux irq per maskable interrupt source instead of keeping the one-to-one mapping between vectors and linux irqs. This fixes potential irq masking issues for sh7785 hardware blocks such as SCIF/DMAC/PCIC5/MMCIF/GDTA/FLCTL/GPIO Signed-off-by: Magnus Damm Tested-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7785.c | 95 ++++++++++++---------------------- 1 file changed, 32 insertions(+), 63 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index 30baa63b24c..d80802a49db 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -20,18 +20,13 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xffea0000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 40, 41, 43, 42 }, + .irqs = { 40, 40, 40, 40 }, }, { .mapbase = 0xffeb0000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 44, 45, 47, 46 }, - }, - - /* - * The rest of these all have multiplexed IRQs - */ - { + .irqs = { 44, 44, 44, 44 }, + }, { .mapbase = 0xffec0000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, @@ -91,33 +86,19 @@ enum { IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, - WDT, - TMU0, TMU1, TMU2, TMU2_TICPI, - HUDI, - DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, - DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE, - SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, - SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, - DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9, - DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE, - HSPI, + WDT, TMU0, TMU1, TMU2, TMU2_TICPI, + HUDI, DMAC0, SCIF0, SCIF1, DMAC1, HSPI, SCIF2, SCIF3, SCIF4, SCIF5, - PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, - PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0, - SIOF, - MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY, - DU, - GDTA_GACLI, GDTA_GAMCI, GDTA_GAERI, + PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5, + SIOF, MMCIF, DU, GDTA, TMU3, TMU4, TMU5, SSI0, SSI1, HAC0, HAC1, - FLCTL_FLSTE, FLCTL_FLEND, FLCTL_FLTRQ0, FLCTL_FLTRQ1, - GPIOI0, GPIOI1, GPIOI2, GPIOI3, + FLCTL, GPIO, /* interrupt groups */ - TMU012, DMAC0, SCIF0, SCIF1, DMAC1, - PCIC5, MMCIF, GDTA, TMU345, FLCTL, GPIO + TMU012, TMU345 }; static struct intc_vect vectors[] __initdata = { @@ -125,57 +106,45 @@ static struct intc_vect vectors[] __initdata = { INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600), - INTC_VECT(DMAC0_DMINT0, 0x620), INTC_VECT(DMAC0_DMINT1, 0x640), - INTC_VECT(DMAC0_DMINT2, 0x660), INTC_VECT(DMAC0_DMINT3, 0x680), - INTC_VECT(DMAC0_DMINT4, 0x6a0), INTC_VECT(DMAC0_DMINT5, 0x6c0), - INTC_VECT(DMAC0_DMAE, 0x6e0), - INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720), - INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), - INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0), - INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0), - INTC_VECT(DMAC1_DMINT6, 0x880), INTC_VECT(DMAC1_DMINT7, 0x8a0), - INTC_VECT(DMAC1_DMINT8, 0x8c0), INTC_VECT(DMAC1_DMINT9, 0x8e0), - INTC_VECT(DMAC1_DMINT10, 0x900), INTC_VECT(DMAC1_DMINT11, 0x920), - INTC_VECT(DMAC1_DMAE, 0x940), + INTC_VECT(DMAC0, 0x620), INTC_VECT(DMAC0, 0x640), + INTC_VECT(DMAC0, 0x660), INTC_VECT(DMAC0, 0x680), + INTC_VECT(DMAC0, 0x6a0), INTC_VECT(DMAC0, 0x6c0), + INTC_VECT(DMAC0, 0x6e0), + INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720), + INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760), + INTC_VECT(SCIF1, 0x780), INTC_VECT(SCIF1, 0x7a0), + INTC_VECT(SCIF1, 0x7c0), INTC_VECT(SCIF1, 0x7e0), + INTC_VECT(DMAC1, 0x880), INTC_VECT(DMAC1, 0x8a0), + INTC_VECT(DMAC1, 0x8c0), INTC_VECT(DMAC1, 0x8e0), + INTC_VECT(DMAC1, 0x900), INTC_VECT(DMAC1, 0x920), + INTC_VECT(DMAC1, 0x940), INTC_VECT(HSPI, 0x960), INTC_VECT(SCIF2, 0x980), INTC_VECT(SCIF3, 0x9a0), INTC_VECT(SCIF4, 0x9c0), INTC_VECT(SCIF5, 0x9e0), INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20), INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60), - INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0), - INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0), - INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20), + INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0), + INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0), + INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20), INTC_VECT(SIOF, 0xc00), - INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20), - INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60), + INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20), + INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60), INTC_VECT(DU, 0xd80), - INTC_VECT(GDTA_GACLI, 0xda0), INTC_VECT(GDTA_GAMCI, 0xdc0), - INTC_VECT(GDTA_GAERI, 0xde0), + INTC_VECT(GDTA, 0xda0), INTC_VECT(GDTA, 0xdc0), + INTC_VECT(GDTA, 0xde0), INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), INTC_VECT(TMU5, 0xe40), INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0), INTC_VECT(HAC0, 0xec0), INTC_VECT(HAC1, 0xee0), - INTC_VECT(FLCTL_FLSTE, 0xf00), INTC_VECT(FLCTL_FLEND, 0xf20), - INTC_VECT(FLCTL_FLTRQ0, 0xf40), INTC_VECT(FLCTL_FLTRQ1, 0xf60), - INTC_VECT(GPIOI0, 0xf80), INTC_VECT(GPIOI1, 0xfa0), - INTC_VECT(GPIOI2, 0xfc0), INTC_VECT(GPIOI3, 0xfe0), + INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20), + INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60), + INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0), + INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0), }; static struct intc_group groups[] __initdata = { INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), - INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, - DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), - INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), - INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), - INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, - DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE), - INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0), - INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY), - INTC_GROUP(GDTA, GDTA_GACLI, GDTA_GAMCI, GDTA_GAERI), INTC_GROUP(TMU345, TMU3, TMU4, TMU5), - INTC_GROUP(FLCTL, FLCTL_FLSTE, FLCTL_FLEND, - FLCTL_FLTRQ0, FLCTL_FLTRQ1), - INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3), }; static struct intc_mask_reg mask_registers[] __initdata = { -- cgit v1.2.3 From 0d5e19ab07cf61cb0794cfac4df0a1bd5d1e19d7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 27 Feb 2009 17:02:28 +0900 Subject: sh: Fix up SH-X3 general exception handler build. With the recent entry.S refactoring, the SH-X3 path had a mov.l for a register to register copy, resulting in: AS arch/sh/kernel/cpu/sh4/../sh3/entry.o arch/sh/kernel/cpu/sh4/../sh3/entry.S: Assembler messages: arch/sh/kernel/cpu/sh4/../sh3/entry.S:366: Error: invalid operands for opcode make[3]: *** [arch/sh/kernel/cpu/sh4/../sh3/entry.o] Error 1 Switch it over to a mov to fix it up. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index e984e94394e..c4829d6dee5 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -363,7 +363,7 @@ general_exception: ! Save registers / Switch to bank 0 bsr save_regs ! needs original pr value in k3 - mov.l k4, k2 ! keep vector in k2 + mov k4, k2 ! keep vector in k2 bra handle_exception_special nop -- cgit v1.2.3 From d09375a9ec80d8bae06196bb247460e0829d884c Mon Sep 17 00:00:00 2001 From: "Gustavo F. Padovan" Date: Sun, 15 Feb 2009 21:48:52 -0300 Subject: x86, fixmap: rename __FIXADDR_SIZE and __FIXADDR_BOOT_SIZE Impact: rename Rename __FIXADDR_SIZE to FIXADDR_SIZE and __FIXADDR_BOOT_SIZE to FIXADDR_BOOT_SIZE. Signed-off-by: Gustavo F. Padovan Acked-by: Glauber Costa Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/fixmap_32.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h index 047d9bab2b3..d3fd0b92b4e 100644 --- a/arch/x86/include/asm/fixmap_32.h +++ b/arch/x86/include/asm/fixmap_32.h @@ -106,10 +106,10 @@ extern void reserve_top_address(unsigned long reserve); #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) -#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) -#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_32_H */ -- cgit v1.2.3 From ab93e3c45dbac66c2e8e24fd560c052a3907903e Mon Sep 17 00:00:00 2001 From: "Gustavo F. Padovan" Date: Sun, 15 Feb 2009 21:48:53 -0300 Subject: x86, fixmap: define FIXADDR_BOOT_* and redefine FIX_ADDR_SIZE Impact: new interface, not yet used Now, with these macros, x86_64 code can know where start the permanent and non-permanent fixed mapped address. This patch make these macros equal fixmap_32.h for future x86 integration. Signed-off-by: Gustavo F. Padovan Acked-by: Glauber Costa Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/fixmap_64.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h index 298d9ba3fae..d905de08675 100644 --- a/arch/x86/include/asm/fixmap_64.h +++ b/arch/x86/include/asm/fixmap_64.h @@ -69,8 +69,11 @@ enum fixed_addresses { }; #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) /* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ #define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) -- cgit v1.2.3 From fd862dde18c3e360f510780e1d1bf615866b11c2 Mon Sep 17 00:00:00 2001 From: "Gustavo F. Padovan" Date: Sun, 15 Feb 2009 21:48:54 -0300 Subject: x86, fixmap: define reserve_top_address for x86_64 Impact: new interface (not yet use) Define reserve_top_address for x86_64; only for later x86 integration. Signed-off-by: Gustavo F. Padovan Acked-by: Glauber Costa Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/fixmap_64.h | 2 ++ arch/x86/mm/pgtable.c | 18 ++++++++++++++++++ arch/x86/mm/pgtable_32.c | 16 ---------------- 3 files changed, 20 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h index d905de08675..b6c06dd55f1 100644 --- a/arch/x86/include/asm/fixmap_64.h +++ b/arch/x86/include/asm/fixmap_64.h @@ -68,6 +68,8 @@ enum fixed_addresses { __end_of_fixed_addresses }; +extern void reserve_top_address(unsigned long reserve); + #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 86f2ffc43c3..5b7c7c8464f 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -313,6 +313,24 @@ int ptep_clear_flush_young(struct vm_area_struct *vma, return young; } +/** + * reserve_top_address - reserves a hole in the top of kernel address space + * @reserve - size of hole to reserve + * + * Can be used to relocate the fixmap area and poke a hole in the top + * of kernel address space to make room for a hypervisor. + */ +void __init reserve_top_address(unsigned long reserve) +{ +#ifdef CONFIG_X86_32 + BUG_ON(fixmaps_set > 0); + printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", + (int)-reserve); + __FIXADDR_TOP = -reserve - PAGE_SIZE; + __VMALLOC_RESERVE += reserve; +#endif +} + int fixmaps_set; void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 0951db9ee51..c3cf6e11576 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -97,22 +97,6 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) unsigned long __FIXADDR_TOP = 0xfffff000; EXPORT_SYMBOL(__FIXADDR_TOP); -/** - * reserve_top_address - reserves a hole in the top of kernel address space - * @reserve - size of hole to reserve - * - * Can be used to relocate the fixmap area and poke a hole in the top - * of kernel address space to make room for a hypervisor. - */ -void __init reserve_top_address(unsigned long reserve) -{ - BUG_ON(fixmaps_set > 0); - printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", - (int)-reserve); - __FIXADDR_TOP = -reserve - PAGE_SIZE; - __VMALLOC_RESERVE += reserve; -} - /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the -- cgit v1.2.3 From 2ae38daf25204b7c3725e053052f77469eff62cf Mon Sep 17 00:00:00 2001 From: "Gustavo F. Padovan" Date: Sun, 15 Feb 2009 21:48:55 -0300 Subject: x86, fixmap: add CONFIG_X86_{LOCAL,IO}_APIC Impact: New fixmap allocations Add CONFIG_X86_{LOCAL,IO}_APIC to enum fixed_address. FIX_APIC_BASE is used only when CONFIG_X86_LOCAL_APIC is enabled and FIX_IO_APIC_BASE_* are used only when CONFIG_X86_IO_APIC is enabled. Signed-off-by: Gustavo F. Padovan Acked-by: Glauber Costa Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/fixmap_64.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h index b6c06dd55f1..7a518ec8263 100644 --- a/arch/x86/include/asm/fixmap_64.h +++ b/arch/x86/include/asm/fixmap_64.h @@ -40,9 +40,13 @@ enum fixed_addresses { VSYSCALL_HPET, FIX_DBGP_BASE, FIX_EARLYCON_MEM_BASE, +#ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ +#endif +#ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, +#endif FIX_EFI_IO_MAP_LAST_PAGE, FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE + MAX_EFI_IO_PAGES - 1, -- cgit v1.2.3 From 5f403fa9de5b3bb1309ec5f72b1e52e5b51321d4 Mon Sep 17 00:00:00 2001 From: "Gustavo F. Padovan" Date: Sun, 15 Feb 2009 21:48:56 -0300 Subject: x86, fixmap: add CONFIG_EFI Impact: new fixmap allocation FIX_EFI_IO_MAP_FIRST_PAGE is used only when EFI is enabled. Signed-off-by: Gustavo F. Padovan Acked-by: Glauber Costa Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/fixmap_64.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h index 7a518ec8263..baff6d0da99 100644 --- a/arch/x86/include/asm/fixmap_64.h +++ b/arch/x86/include/asm/fixmap_64.h @@ -16,7 +16,9 @@ #include #include #include +#ifdef CONFIG_EFI #include +#endif /* * Here we define all the compile-time 'special' virtual @@ -47,9 +49,11 @@ enum fixed_addresses { FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, #endif +#ifdef CONFIG_EFI FIX_EFI_IO_MAP_LAST_PAGE, FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE + MAX_EFI_IO_PAGES - 1, +#endif #ifdef CONFIG_PARAVIRT FIX_PARAVIRT_BOOTMAP, #endif -- cgit v1.2.3 From e365bcd9214db993436de7bc915b76ab8402045f Mon Sep 17 00:00:00 2001 From: "Gustavo F. Padovan" Date: Sun, 15 Feb 2009 21:48:57 -0300 Subject: x86, fixmap: prepare fixmap_64.h for unification Impact: cleanup Just prepare fixmap for later mechanic unification. No real modification on code. text data bss dec hex filename 4312362 527192 421924 5261478 5048a6 vmlinux-64.after 4312362 527192 421924 5261478 5048a6 vmlinux-64.before Signed-off-by: Gustavo F. Padovan Acked-by: Glauber Costa Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/fixmap_64.h | 76 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 69 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h index baff6d0da99..95acae0daa5 100644 --- a/arch/x86/include/asm/fixmap_64.h +++ b/arch/x86/include/asm/fixmap_64.h @@ -6,25 +6,63 @@ * for more details. * * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 */ #ifndef _ASM_X86_FIXMAP_64_H #define _ASM_X86_FIXMAP_64_H +#ifndef __ASSEMBLY__ #include #include #include #include +#ifdef CONFIG_X86_32 +#include +#include +#else #include #ifdef CONFIG_EFI #include #endif +#endif + +/* + * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall + * uses fixmaps that relies on FIXADDR_TOP for proper address calculation. + * Because of this, FIXADDR_TOP x86 integration was left as later work. + */ +#ifdef CONFIG_X86_32 +/* used by vmalloc.c, vsyscall.lds.S. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap. + */ +extern unsigned long __FIXADDR_TOP; +#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) + +#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) +#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) +#else +#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) + +/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ +#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) +#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) +#endif /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. + * for x86_32: We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. * * These 'compile-time allocated' memory buffers are * fixed-size 4k pages (or larger if used with an increment @@ -34,12 +72,16 @@ * TLB entries of such buffers will not be flushed across * task switches. */ - enum fixed_addresses { +#ifdef CONFIG_X86_32 + FIX_HOLE, + FIX_VDSO, +#else VSYSCALL_LAST_PAGE, VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, VSYSCALL_HPET, +#endif FIX_DBGP_BASE, FIX_EARLYCON_MEM_BASE, #ifdef CONFIG_X86_LOCAL_APIC @@ -49,11 +91,32 @@ enum fixed_addresses { FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, #endif +#ifdef CONFIG_X86_64 #ifdef CONFIG_EFI FIX_EFI_IO_MAP_LAST_PAGE, FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE + MAX_EFI_IO_PAGES - 1, #endif +#endif +#ifdef CONFIG_X86_VISWS_APIC + FIX_CO_CPU, /* Cobalt timer */ + FIX_CO_APIC, /* Cobalt APIC Redirection Table */ + FIX_LI_PCIA, /* Lithium PCI Bridge A */ + FIX_LI_PCIB, /* Lithium PCI Bridge B */ +#endif +#ifdef CONFIG_X86_F00F_BUG + FIX_F00F_IDT, /* Virtual mapping for IDT */ +#endif +#ifdef CONFIG_X86_CYCLONE_TIMER + FIX_CYCLONE_TIMER, /*cyclone timer register*/ +#endif +#ifdef CONFIG_X86_32 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, +#ifdef CONFIG_PCI_MMCONFIG + FIX_PCIE_MCFG, +#endif +#endif #ifdef CONFIG_PARAVIRT FIX_PARAVIRT_BOOTMAP, #endif @@ -73,20 +136,19 @@ enum fixed_addresses { FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - (__end_of_permanent_fixed_addresses & 255), FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, +#ifdef CONFIG_X86_32 + FIX_WP_TEST, +#endif __end_of_fixed_addresses }; -extern void reserve_top_address(unsigned long reserve); -#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) +extern void reserve_top_address(unsigned long reserve); #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) -/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ -#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) -#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) - +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_64_H */ -- cgit v1.2.3 From c78f322ce8cea6052bc99513b57283d4d325a13e Mon Sep 17 00:00:00 2001 From: "Gustavo F. Padovan" Date: Sun, 15 Feb 2009 21:48:58 -0300 Subject: x86, fixmap: prepare fixmap_32.h for unification Impact: cleanup Just prepare fixmap for later mechanic unification. No real modification on code. text data bss dec hex filename 3831152 353188 372736 4557076 458914 vmlinux-32.after 3831152 353188 372736 4557076 458914 vmlinux-32.before Signed-off-by: Gustavo F. Padovan Acked-by: Glauber Costa Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/fixmap_32.h | 73 ++++++++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h index d3fd0b92b4e..8384df78eee 100644 --- a/arch/x86/include/asm/fixmap_32.h +++ b/arch/x86/include/asm/fixmap_32.h @@ -8,50 +8,80 @@ * Copyright (C) 1998 Ingo Molnar * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 */ #ifndef _ASM_X86_FIXMAP_32_H #define _ASM_X86_FIXMAP_32_H +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#ifdef CONFIG_X86_32 +#include +#include +#else +#include +#ifdef CONFIG_EFI +#include +#endif +#endif +/* + * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall + * uses fixmaps that relies on FIXADDR_TOP for proper address calculation. + * Because of this, FIXADDR_TOP x86 integration was left as later work. + */ +#ifdef CONFIG_X86_32 /* used by vmalloc.c, vsyscall.lds.S. * * Leave one empty page between vmalloc'ed areas and * the start of the fixmap. */ extern unsigned long __FIXADDR_TOP; +#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) + #define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) #define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) +#else +#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) -#ifndef __ASSEMBLY__ -#include -#include -#include -#include -#include -#include +/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ +#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) +#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) +#endif /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only - * in the boot process. We allocate these special addresses + * in the boot process. + * for x86_32: We allocate these special addresses * from the end of virtual memory (0xfffff000) backwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. * - * these 'compile-time allocated' memory buffers are - * fixed-size 4k pages. (or larger if used with an increment - * highger than 1) use fixmap_set(idx,phys) to associate + * These 'compile-time allocated' memory buffers are + * fixed-size 4k pages (or larger if used with an increment + * higher than 1). Use set_fixmap(idx,phys) to associate * physical memory with fixmap indices. * * TLB entries of such buffers will not be flushed across * task switches. */ enum fixed_addresses { +#ifdef CONFIG_X86_32 FIX_HOLE, FIX_VDSO, +#else + VSYSCALL_LAST_PAGE, + VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, + VSYSCALL_HPET, +#endif FIX_DBGP_BASE, FIX_EARLYCON_MEM_BASE, #ifdef CONFIG_X86_LOCAL_APIC @@ -59,7 +89,14 @@ enum fixed_addresses { #endif #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, - FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, + FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, +#endif +#ifdef CONFIG_X86_64 +#ifdef CONFIG_EFI + FIX_EFI_IO_MAP_LAST_PAGE, + FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE + + MAX_EFI_IO_PAGES - 1, +#endif #endif #ifdef CONFIG_X86_VISWS_APIC FIX_CO_CPU, /* Cobalt timer */ @@ -73,15 +110,20 @@ enum fixed_addresses { #ifdef CONFIG_X86_CYCLONE_TIMER FIX_CYCLONE_TIMER, /*cyclone timer register*/ #endif +#ifdef CONFIG_X86_32 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #ifdef CONFIG_PCI_MMCONFIG FIX_PCIE_MCFG, #endif +#endif #ifdef CONFIG_PARAVIRT FIX_PARAVIRT_BOOTMAP, #endif __end_of_permanent_fixed_addresses, +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT + FIX_OHCI1394_BASE, +#endif /* * 256 temporary boot-time mappings, used by early_ioremap(), * before ioremap() is functional. @@ -94,17 +136,14 @@ enum fixed_addresses { FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - (__end_of_permanent_fixed_addresses & 255), FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, +#ifdef CONFIG_X86_32 FIX_WP_TEST, -#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT - FIX_OHCI1394_BASE, #endif __end_of_fixed_addresses }; -extern void reserve_top_address(unsigned long reserve); - -#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) +extern void reserve_top_address(unsigned long reserve); #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -- cgit v1.2.3 From c577b098f9bf467fb05dc279ba83604cb3f7cea0 Mon Sep 17 00:00:00 2001 From: "Gustavo F. Padovan" Date: Sun, 15 Feb 2009 21:48:59 -0300 Subject: x86, fixmap: unify fixmap.h Impact: unification This patch unify fixmap_32.h and fixmap_64.h into fixmap.h. Things that we can't merge now are using CONFIG_X86_{32,64} (e.g.:vsyscall and EFI) Signed-off-by: Gustavo F. Padovan Acked-by: Glauber Costa Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/fixmap.h | 149 ++++++++++++++++++++++++++++++++++++- arch/x86/include/asm/fixmap_32.h | 154 --------------------------------------- arch/x86/include/asm/fixmap_64.h | 154 --------------------------------------- 3 files changed, 147 insertions(+), 310 deletions(-) delete mode 100644 arch/x86/include/asm/fixmap_32.h delete mode 100644 arch/x86/include/asm/fixmap_64.h (limited to 'arch') diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 23696d44a0a..dca8f03da5b 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -1,11 +1,155 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 + */ + #ifndef _ASM_X86_FIXMAP_H #define _ASM_X86_FIXMAP_H +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#ifdef CONFIG_X86_32 +#include +#include +#else +#include +#ifdef CONFIG_EFI +#include +#endif +#endif + +/* + * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall + * uses fixmaps that relies on FIXADDR_TOP for proper address calculation. + * Because of this, FIXADDR_TOP x86 integration was left as later work. + */ +#ifdef CONFIG_X86_32 +/* used by vmalloc.c, vsyscall.lds.S. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap. + */ +extern unsigned long __FIXADDR_TOP; +#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) + +#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) +#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) +#else +#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) + +/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ +#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) +#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) +#endif + + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. + * for x86_32: We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * These 'compile-time allocated' memory buffers are + * fixed-size 4k pages (or larger if used with an increment + * higher than 1). Use set_fixmap(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ +enum fixed_addresses { #ifdef CONFIG_X86_32 -# include "fixmap_32.h" + FIX_HOLE, + FIX_VDSO, #else -# include "fixmap_64.h" + VSYSCALL_LAST_PAGE, + VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, + VSYSCALL_HPET, #endif + FIX_DBGP_BASE, + FIX_EARLYCON_MEM_BASE, +#ifdef CONFIG_X86_LOCAL_APIC + FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ +#endif +#ifdef CONFIG_X86_IO_APIC + FIX_IO_APIC_BASE_0, + FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, +#endif +#ifdef CONFIG_X86_64 +#ifdef CONFIG_EFI + FIX_EFI_IO_MAP_LAST_PAGE, + FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE + + MAX_EFI_IO_PAGES - 1, +#endif +#endif +#ifdef CONFIG_X86_VISWS_APIC + FIX_CO_CPU, /* Cobalt timer */ + FIX_CO_APIC, /* Cobalt APIC Redirection Table */ + FIX_LI_PCIA, /* Lithium PCI Bridge A */ + FIX_LI_PCIB, /* Lithium PCI Bridge B */ +#endif +#ifdef CONFIG_X86_F00F_BUG + FIX_F00F_IDT, /* Virtual mapping for IDT */ +#endif +#ifdef CONFIG_X86_CYCLONE_TIMER + FIX_CYCLONE_TIMER, /*cyclone timer register*/ +#endif +#ifdef CONFIG_X86_32 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, +#ifdef CONFIG_PCI_MMCONFIG + FIX_PCIE_MCFG, +#endif +#endif +#ifdef CONFIG_PARAVIRT + FIX_PARAVIRT_BOOTMAP, +#endif + __end_of_permanent_fixed_addresses, +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT + FIX_OHCI1394_BASE, +#endif + /* + * 256 temporary boot-time mappings, used by early_ioremap(), + * before ioremap() is functional. + * + * We round it up to the next 256 pages boundary so that we + * can have a single pgd entry and a single pte table: + */ +#define NR_FIX_BTMAPS 64 +#define FIX_BTMAPS_SLOTS 4 + FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - + (__end_of_permanent_fixed_addresses & 255), + FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, +#ifdef CONFIG_X86_32 + FIX_WP_TEST, +#endif + __end_of_fixed_addresses +}; + + +extern void reserve_top_address(unsigned long reserve); + +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) extern int fixmaps_set; @@ -69,4 +213,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); return __virt_to_fix(vaddr); } +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_H */ diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h deleted file mode 100644 index 8384df78eee..00000000000 --- a/arch/x86/include/asm/fixmap_32.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * fixmap.h: compile-time virtual memory allocation - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998 Ingo Molnar - * - * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 - * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 - */ - -#ifndef _ASM_X86_FIXMAP_32_H -#define _ASM_X86_FIXMAP_32_H - -#ifndef __ASSEMBLY__ -#include -#include -#include -#include -#ifdef CONFIG_X86_32 -#include -#include -#else -#include -#ifdef CONFIG_EFI -#include -#endif -#endif - -/* - * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall - * uses fixmaps that relies on FIXADDR_TOP for proper address calculation. - * Because of this, FIXADDR_TOP x86 integration was left as later work. - */ -#ifdef CONFIG_X86_32 -/* used by vmalloc.c, vsyscall.lds.S. - * - * Leave one empty page between vmalloc'ed areas and - * the start of the fixmap. - */ -extern unsigned long __FIXADDR_TOP; -#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) - -#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) -#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) -#else -#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) - -/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ -#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) -#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) -#endif - -/* - * Here we define all the compile-time 'special' virtual - * addresses. The point is to have a constant address at - * compile time, but to set the physical address only - * in the boot process. - * for x86_32: We allocate these special addresses - * from the end of virtual memory (0xfffff000) backwards. - * Also this lets us do fail-safe vmalloc(), we - * can guarantee that these special addresses and - * vmalloc()-ed addresses never overlap. - * - * These 'compile-time allocated' memory buffers are - * fixed-size 4k pages (or larger if used with an increment - * higher than 1). Use set_fixmap(idx,phys) to associate - * physical memory with fixmap indices. - * - * TLB entries of such buffers will not be flushed across - * task switches. - */ -enum fixed_addresses { -#ifdef CONFIG_X86_32 - FIX_HOLE, - FIX_VDSO, -#else - VSYSCALL_LAST_PAGE, - VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE - + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, - VSYSCALL_HPET, -#endif - FIX_DBGP_BASE, - FIX_EARLYCON_MEM_BASE, -#ifdef CONFIG_X86_LOCAL_APIC - FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ -#endif -#ifdef CONFIG_X86_IO_APIC - FIX_IO_APIC_BASE_0, - FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, -#endif -#ifdef CONFIG_X86_64 -#ifdef CONFIG_EFI - FIX_EFI_IO_MAP_LAST_PAGE, - FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE - + MAX_EFI_IO_PAGES - 1, -#endif -#endif -#ifdef CONFIG_X86_VISWS_APIC - FIX_CO_CPU, /* Cobalt timer */ - FIX_CO_APIC, /* Cobalt APIC Redirection Table */ - FIX_LI_PCIA, /* Lithium PCI Bridge A */ - FIX_LI_PCIB, /* Lithium PCI Bridge B */ -#endif -#ifdef CONFIG_X86_F00F_BUG - FIX_F00F_IDT, /* Virtual mapping for IDT */ -#endif -#ifdef CONFIG_X86_CYCLONE_TIMER - FIX_CYCLONE_TIMER, /*cyclone timer register*/ -#endif -#ifdef CONFIG_X86_32 - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, -#ifdef CONFIG_PCI_MMCONFIG - FIX_PCIE_MCFG, -#endif -#endif -#ifdef CONFIG_PARAVIRT - FIX_PARAVIRT_BOOTMAP, -#endif - __end_of_permanent_fixed_addresses, -#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT - FIX_OHCI1394_BASE, -#endif - /* - * 256 temporary boot-time mappings, used by early_ioremap(), - * before ioremap() is functional. - * - * We round it up to the next 256 pages boundary so that we - * can have a single pgd entry and a single pte table: - */ -#define NR_FIX_BTMAPS 64 -#define FIX_BTMAPS_SLOTS 4 - FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - - (__end_of_permanent_fixed_addresses & 255), - FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, -#ifdef CONFIG_X86_32 - FIX_WP_TEST, -#endif - __end_of_fixed_addresses -}; - - -extern void reserve_top_address(unsigned long reserve); - -#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) -#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) - -#endif /* !__ASSEMBLY__ */ -#endif /* _ASM_X86_FIXMAP_32_H */ diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h deleted file mode 100644 index 95acae0daa5..00000000000 --- a/arch/x86/include/asm/fixmap_64.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * fixmap.h: compile-time virtual memory allocation - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998 Ingo Molnar - * - * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 - * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 - */ - -#ifndef _ASM_X86_FIXMAP_64_H -#define _ASM_X86_FIXMAP_64_H - -#ifndef __ASSEMBLY__ -#include -#include -#include -#include -#ifdef CONFIG_X86_32 -#include -#include -#else -#include -#ifdef CONFIG_EFI -#include -#endif -#endif - -/* - * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall - * uses fixmaps that relies on FIXADDR_TOP for proper address calculation. - * Because of this, FIXADDR_TOP x86 integration was left as later work. - */ -#ifdef CONFIG_X86_32 -/* used by vmalloc.c, vsyscall.lds.S. - * - * Leave one empty page between vmalloc'ed areas and - * the start of the fixmap. - */ -extern unsigned long __FIXADDR_TOP; -#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) - -#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) -#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) -#else -#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) - -/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ -#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) -#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) -#endif - -/* - * Here we define all the compile-time 'special' virtual - * addresses. The point is to have a constant address at - * compile time, but to set the physical address only - * in the boot process. - * for x86_32: We allocate these special addresses - * from the end of virtual memory (0xfffff000) backwards. - * Also this lets us do fail-safe vmalloc(), we - * can guarantee that these special addresses and - * vmalloc()-ed addresses never overlap. - * - * These 'compile-time allocated' memory buffers are - * fixed-size 4k pages (or larger if used with an increment - * higher than 1). Use set_fixmap(idx,phys) to associate - * physical memory with fixmap indices. - * - * TLB entries of such buffers will not be flushed across - * task switches. - */ -enum fixed_addresses { -#ifdef CONFIG_X86_32 - FIX_HOLE, - FIX_VDSO, -#else - VSYSCALL_LAST_PAGE, - VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE - + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, - VSYSCALL_HPET, -#endif - FIX_DBGP_BASE, - FIX_EARLYCON_MEM_BASE, -#ifdef CONFIG_X86_LOCAL_APIC - FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ -#endif -#ifdef CONFIG_X86_IO_APIC - FIX_IO_APIC_BASE_0, - FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, -#endif -#ifdef CONFIG_X86_64 -#ifdef CONFIG_EFI - FIX_EFI_IO_MAP_LAST_PAGE, - FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE - + MAX_EFI_IO_PAGES - 1, -#endif -#endif -#ifdef CONFIG_X86_VISWS_APIC - FIX_CO_CPU, /* Cobalt timer */ - FIX_CO_APIC, /* Cobalt APIC Redirection Table */ - FIX_LI_PCIA, /* Lithium PCI Bridge A */ - FIX_LI_PCIB, /* Lithium PCI Bridge B */ -#endif -#ifdef CONFIG_X86_F00F_BUG - FIX_F00F_IDT, /* Virtual mapping for IDT */ -#endif -#ifdef CONFIG_X86_CYCLONE_TIMER - FIX_CYCLONE_TIMER, /*cyclone timer register*/ -#endif -#ifdef CONFIG_X86_32 - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, -#ifdef CONFIG_PCI_MMCONFIG - FIX_PCIE_MCFG, -#endif -#endif -#ifdef CONFIG_PARAVIRT - FIX_PARAVIRT_BOOTMAP, -#endif - __end_of_permanent_fixed_addresses, -#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT - FIX_OHCI1394_BASE, -#endif - /* - * 256 temporary boot-time mappings, used by early_ioremap(), - * before ioremap() is functional. - * - * We round it up to the next 256 pages boundary so that we - * can have a single pgd entry and a single pte table: - */ -#define NR_FIX_BTMAPS 64 -#define FIX_BTMAPS_SLOTS 4 - FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - - (__end_of_permanent_fixed_addresses & 255), - FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, -#ifdef CONFIG_X86_32 - FIX_WP_TEST, -#endif - __end_of_fixed_addresses -}; - - -extern void reserve_top_address(unsigned long reserve); - -#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) -#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) - -#endif /* !__ASSEMBLY__ */ -#endif /* _ASM_X86_FIXMAP_64_H */ -- cgit v1.2.3 From 144b0712dd9dd9ebd4e80c4e5388c9f6afc2b497 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 27 Feb 2009 10:27:04 -0800 Subject: x86: signal: add __user annotation Impact: cleanup Add missing __user annotation to the parameter of get_sigframe(). Also change cast type to void __user * of *fpstate. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 7cdcd16885e..e89eaf417e5 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -215,7 +215,7 @@ static const struct { */ static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, - void **fpstate) + void __user **fpstate) { unsigned long sp; @@ -243,7 +243,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, if (used_math()) { sp = sp - sig_xstate_size; - *fpstate = (struct _fpstate *) sp; + *fpstate = (void __user *) sp; if (save_i387_xstate(*fpstate) < 0) return (void __user *)-1L; } -- cgit v1.2.3 From 97286a2b64725aac2d584ddd1f94871f9991d5a1 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 27 Feb 2009 10:28:48 -0800 Subject: x86: signal: intrroduce get_sigframe() and replace get_sigstack() Impact: cleanup Introduce get_sigframe() like 32-bit to replace get_sigstack(). Move the i387 stuff into get_sigframe(). Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index e89eaf417e5..82d37c77b0f 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -392,10 +392,13 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Determine which stack to use.. */ static void __user * -get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size) +get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, + void __user **fpstate) { - /* Default to using normal stack - redzone*/ - sp -= 128; + unsigned long sp; + + /* Default to using normal stack - redzone */ + sp = regs->sp - 128; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { @@ -403,7 +406,18 @@ get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size) sp = current->sas_ss_sp + current->sas_ss_size; } - return (void __user *)round_down(sp - size, 64); + if (used_math()) { + sp -= sig_xstate_size; + *fpstate = (void __user *)round_down(sp, 64); + if (save_i387_xstate(*fpstate) < 0) + return (void __user *) -1L; + + sp -= frame_size; + return (void __user *)round_down(sp, 16) - 8; + } + + sp -= frame_size; + return (void __user *)round_down(sp, 64) - 8; } static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, @@ -414,15 +428,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, int err = 0; struct task_struct *me = current; - if (used_math()) { - fp = get_stack(ka, regs->sp, sig_xstate_size); - frame = (void __user *)round_down( - (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; - - if (save_i387_xstate(fp) < 0) - return -EFAULT; - } else - frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8; + frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; -- cgit v1.2.3 From 36a4526583ad61fe7cb7432f53bce52ea198813a Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 27 Feb 2009 10:29:09 -0800 Subject: x86: signal: use 16 bytes boundary for rt_sigframe Impact: cleanup Supporting xsave/xrestore introduces 64 bytes boundary for save_i387_xstate(). 16 bytes boundary is OK for rt_sigframe. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 82d37c77b0f..89ef90df985 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -409,15 +409,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, if (used_math()) { sp -= sig_xstate_size; *fpstate = (void __user *)round_down(sp, 64); + if (save_i387_xstate(*fpstate) < 0) return (void __user *) -1L; - - sp -= frame_size; - return (void __user *)round_down(sp, 16) - 8; } sp -= frame_size; - return (void __user *)round_down(sp, 64) - 8; + return (void __user *)round_down(sp, 16) - 8; } static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, -- cgit v1.2.3 From 75779f05264b9968d7ae7ecb4ca5127b08785692 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 27 Feb 2009 10:29:57 -0800 Subject: x86: signal: unify get_sigframe() Impact: cleanup Unify get_sigframe(). Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal.c | 97 ++++++++++++++++++++---------------------------- 1 file changed, 41 insertions(+), 56 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 89ef90df985..53425c681f2 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -187,28 +187,6 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, /* * Set up a signal frame. */ -#ifdef CONFIG_X86_32 -static const struct { - u16 poplmovl; - u32 val; - u16 int80; -} __attribute__((packed)) retcode = { - 0xb858, /* popl %eax; movl $..., %eax */ - __NR_sigreturn, - 0x80cd, /* int $0x80 */ -}; - -static const struct { - u8 movl; - u32 val; - u16 int80; - u8 pad; -} __attribute__((packed)) rt_retcode = { - 0xb8, /* movl $..., %eax */ - __NR_rt_sigreturn, - 0x80cd, /* int $0x80 */ - 0 -}; /* * Determine which stack to use.. @@ -217,10 +195,13 @@ static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, void __user **fpstate) { - unsigned long sp; - /* Default to using normal stack */ - sp = regs->sp; + unsigned long sp = regs->sp; + +#ifdef CONFIG_X86_64 + /* redzone */ + sp -= 128; +#endif /* CONFIG_X86_64 */ /* * If we are on the alternate signal stack and would overflow it, don't. @@ -234,30 +215,64 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; } else { +#ifdef CONFIG_X86_32 /* This is the legacy signal stack switching. */ if ((regs->ss & 0xffff) != __USER_DS && !(ka->sa.sa_flags & SA_RESTORER) && ka->sa.sa_restorer) sp = (unsigned long) ka->sa.sa_restorer; +#endif /* CONFIG_X86_32 */ } if (used_math()) { - sp = sp - sig_xstate_size; + sp -= sig_xstate_size; +#ifdef CONFIG_X86_32 *fpstate = (void __user *) sp; +#else /* !CONFIG_X86_32 */ + *fpstate = (void __user *)round_down(sp, 64); +#endif /* CONFIG_X86_32 */ + if (save_i387_xstate(*fpstate) < 0) return (void __user *)-1L; } sp -= frame_size; +#ifdef CONFIG_X86_32 /* * Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ sp = ((sp + 4) & -16ul) - 4; +#else /* !CONFIG_X86_32 */ + sp = round_down(sp, 16) - 8; +#endif return (void __user *) sp; } +#ifdef CONFIG_X86_32 +static const struct { + u16 poplmovl; + u32 val; + u16 int80; +} __attribute__((packed)) retcode = { + 0xb858, /* popl %eax; movl $..., %eax */ + __NR_sigreturn, + 0x80cd, /* int $0x80 */ +}; + +static const struct { + u8 movl; + u32 val; + u16 int80; + u8 pad; +} __attribute__((packed)) rt_retcode = { + 0xb8, /* movl $..., %eax */ + __NR_rt_sigreturn, + 0x80cd, /* int $0x80 */ + 0 +}; + static int __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) @@ -388,36 +403,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, return 0; } #else /* !CONFIG_X86_32 */ -/* - * Determine which stack to use.. - */ -static void __user * -get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, - void __user **fpstate) -{ - unsigned long sp; - - /* Default to using normal stack - redzone */ - sp = regs->sp - 128; - - /* This is the X/Open sanctioned signal stack switching. */ - if (ka->sa.sa_flags & SA_ONSTACK) { - if (sas_ss_flags(sp) == 0) - sp = current->sas_ss_sp + current->sas_ss_size; - } - - if (used_math()) { - sp -= sig_xstate_size; - *fpstate = (void __user *)round_down(sp, 64); - - if (save_i387_xstate(*fpstate) < 0) - return (void __user *) -1L; - } - - sp -= frame_size; - return (void __user *)round_down(sp, 16) - 8; -} - static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { -- cgit v1.2.3 From 1fae0279ce4811fa123001515d1ed3d68c1d557f Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Fri, 27 Feb 2009 10:30:32 -0800 Subject: x86: signal: introduce helper align_sigframe() Impact: cleanup Introduce helper align_sigframe() to align stack pointer for signal frame. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 53425c681f2..dde3f2ae237 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -191,6 +191,20 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, /* * Determine which stack to use.. */ +static unsigned long align_sigframe(unsigned long sp) +{ +#ifdef CONFIG_X86_32 + /* + * Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. + */ + sp = ((sp + 4) & -16ul) - 4; +#else /* !CONFIG_X86_32 */ + sp = round_down(sp, 16) - 8; +#endif + return sp; +} + static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, void __user **fpstate) @@ -236,18 +250,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, return (void __user *)-1L; } - sp -= frame_size; -#ifdef CONFIG_X86_32 - /* - * Align the stack pointer according to the i386 ABI, - * i.e. so that on function entry ((sp + 4) & 15) == 0. - */ - sp = ((sp + 4) & -16ul) - 4; -#else /* !CONFIG_X86_32 */ - sp = round_down(sp, 16) - 8; -#endif - - return (void __user *) sp; + return (void __user *)align_sigframe(sp - frame_size); } #ifdef CONFIG_X86_32 -- cgit v1.2.3 From 327f4387e39cf7bfe79a673e56dbf5479db3fec9 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 28 Feb 2009 18:50:21 +0530 Subject: x86: remove double copy of show_cpuinfo_core for 32 and 64 bit Impact: unification show_cpuinfo_core is identical for 32 and 64 bit and can be unified, and CONFIG_X86_HT inherently depends on CONFIG_X86_SMP. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/proc.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 01b1244ef1c..d67e0e48bc2 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -7,11 +7,10 @@ /* * Get CPU information for use by the procfs. */ -#ifdef CONFIG_X86_32 static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, unsigned int cpu) { -#ifdef CONFIG_X86_HT +#ifdef CONFIG_SMP if (c->x86_max_cores * smp_num_siblings > 1) { seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); seq_printf(m, "siblings\t: %d\n", @@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, #endif } +#ifdef CONFIG_X86_32 static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) { /* @@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) c->wp_works_ok ? "yes" : "no"); } #else -static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, - unsigned int cpu) -{ -#ifdef CONFIG_SMP - if (c->x86_max_cores * smp_num_siblings > 1) { - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpus_weight(per_cpu(cpu_core_map, cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); - seq_printf(m, "apicid\t\t: %d\n", c->apicid); - seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); - } -#endif -} - static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) { seq_printf(m, -- cgit v1.2.3 From af6326d72c95d6e0bbc88c92185c654f57acef3b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 1 Mar 2009 16:03:16 +0900 Subject: alpha: fix typo in recent early vmalloc change Impact: fix build Add missing 'o' in variable name. Compile tested. Signed-off-by: Tejun Heo Reported-by: Ingo Molnar Cc: Ivan Kokshaysky --- arch/alpha/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 91eddd8505d..af71d38c8e4 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -202,7 +202,7 @@ callback_init(void * kernel_end) console_remap_vm.size = nr_pages << PAGE_SHIFT; vm_area_register_early(&console_remap_vm, PAGE_SIZE); - vaddr = (unsigned long)consle_remap_vm.addr; + vaddr = (unsigned long)console_remap_vm.addr; /* Set up the third level PTEs and update the virtual addresses of the CRB entries. */ -- cgit v1.2.3 From d0c4f570276cb4d2dc4215b90eb7cb6e2bdd4a15 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 1 Mar 2009 16:06:56 +0900 Subject: bootmem, x86: further fixes for arch-specific bootmem wrapping Impact: fix new breakages introduced by previous fix Commit c132937556f56ee4b831ef4b23f1846e05fde102 tried to clean up bootmem arch wrapper but it wasn't quite correct. Before the commit, the followings were broken. * Low level interface functions prefixed with __ ignored arch preference. * reserve_bootmem(...) can't be mapped into reserve_bootmem_node(NODE_DATA(0)->bdata, ...) because the node is not preference here. The region specified MUST fall into the specified region; otherwise, it will panic. After the commit, * If allocation fails for the arch preferred node, it should fallback to whatever is available. Instead, it simply failed allocation. There are too many internal details to allow generic wrapping and still keep things simple for archs. Plus, all that arch wants is a way to prefer certain node over another. This patch drops the generic wrapping around alloc_bootmem_core() and add alloc_bootmem_core() instead. If necessary, arch can define bootmem_arch_referred_node() macro or function which takes all allocation information and returns the preferred node. bootmem generic code will always try the preferred node first and then fallback to other nodes as usual. Breakages noted and changes reviewed by Johannes Weiner. Signed-off-by: Tejun Heo Acked-by: Johannes Weiner --- arch/x86/include/asm/mmzone_32.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index eeacf67de49..ede6998bd92 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h @@ -92,12 +92,8 @@ static inline int pfn_valid(int pfn) #ifdef CONFIG_NEED_MULTIPLE_NODES /* always use node 0 for bootmem on this numa platform */ -#define alloc_bootmem_core(__bdata, size, align, goal, limit) \ -({ \ - bootmem_data_t __maybe_unused * __abm_bdata_dummy = (__bdata); \ - __alloc_bootmem_core(NODE_DATA(0)->bdata, \ - (size), (align), (goal), (limit)); \ -}) +#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \ + (NODE_DATA(0)->bdata) #endif /* CONFIG_NEED_MULTIPLE_NODES */ #endif /* _ASM_X86_MMZONE_32_H */ -- cgit v1.2.3 From f5c1aa1537be39d8b9bb5279b5881d81898fd3cd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 1 Mar 2009 12:32:08 +0100 Subject: Revert "gpu/drm, x86, PAT: PAT support for io_mapping_*" This reverts commit 17581ad812a9abb0182260374ef2e52d4a808a64. Sitsofe Wheeler reported that /dev/dri/card0 is MIA on his EeePC 900 and bisected it to this commit. Graphics card is an i915 in an EeePC 900: 00:02.0 VGA compatible controller [0300]: Intel Corporation Mobile 915GM/GMS/910GML Express Graphics Controller [8086:2592] (rev 04) ( Most likely the ioremap() of the driver failed and hence the card did not initialize. ) Reported-by: Sitsofe Wheeler Bisected-by: Sitsofe Wheeler Cc: Venkatesh Pallipadi Cc: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/include/asm/iomap.h | 5 +---- arch/x86/mm/iomap_32.c | 44 ++------------------------------------------ 2 files changed, 3 insertions(+), 46 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index bd46495ff7d..86af26091d6 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h @@ -24,10 +24,7 @@ #include int -reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot); - -void -free_io_memtype(u64 base, unsigned long size); +is_io_mapping_possible(resource_size_t base, unsigned long size); void * iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index d5e28424622..6c2b1af1692 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -21,13 +21,13 @@ #include #ifdef CONFIG_X86_PAE -static int +int is_io_mapping_possible(resource_size_t base, unsigned long size) { return 1; } #else -static int +int is_io_mapping_possible(resource_size_t base, unsigned long size) { /* There is no way to map greater than 1 << 32 address without PAE */ @@ -38,46 +38,6 @@ is_io_mapping_possible(resource_size_t base, unsigned long size) } #endif -int -reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot) -{ - unsigned long ret_flag; - - if (!is_io_mapping_possible(base, size)) - goto out_err; - - if (!pat_enabled) { - *prot = pgprot_noncached(PAGE_KERNEL); - return 0; - } - - if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag)) - goto out_err; - - if (ret_flag == _PAGE_CACHE_WB) - goto out_free; - - if (kernel_map_sync_memtype(base, size, ret_flag)) - goto out_free; - - *prot = __pgprot(__PAGE_KERNEL | ret_flag); - return 0; - -out_free: - free_memtype(base, base + size); -out_err: - return -EINVAL; -} -EXPORT_SYMBOL_GPL(reserve_io_memtype_wc); - -void -free_io_memtype(u64 base, unsigned long size) -{ - if (pat_enabled) - free_memtype(base, base + size); -} -EXPORT_SYMBOL_GPL(free_io_memtype); - /* Map 'pfn' using fixed map 'type' and protections 'prot' */ void * -- cgit v1.2.3 From f180053694b43d5714bf56cb95499a3c32ff155c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 2 Mar 2009 11:00:57 +0100 Subject: x86, mm: dont use non-temporal stores in pagecache accesses Impact: standardize IO on cached ops On modern CPUs it is almost always a bad idea to use non-temporal stores, as the regression in this commit has shown it: 30d697f: x86: fix performance regression in write() syscall The kernel simply has no good information about whether using non-temporal stores is a good idea or not - and trying to add heuristics only increases complexity and inserts fragility. The regression on cached write()s took very long to be found - over two years. So dont take any chances and let the hardware decide how it makes use of its caches. The only exception is drivers/gpu/drm/i915/i915_gem.c: there were we are absolutely sure that another entity (the GPU) will pick up the dirty data immediately and that the CPU will not touch that data before the GPU will. Also, keep the _nocache() primitives to make it easier for people to experiment with these details. There may be more clear-cut cases where non-cached copies can be used, outside of filemap.c. Cc: Salman Qazi Cc: Nick Piggin Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess_32.h | 4 ++-- arch/x86/include/asm/uaccess_64.h | 25 +++++++------------------ 2 files changed, 9 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index a0ba6138697..5e06259e90e 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) } static __always_inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n, unsigned long total) + const void __user *from, unsigned long n) { might_fault(); if (__builtin_constant_p(n)) { @@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, - unsigned long n, unsigned long total) + unsigned long n) { return __copy_from_user_ll_nocache_nozero(to, from, n); } diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index dcaa0404cf7..8cc687326eb 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -188,29 +188,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); -static inline int __copy_from_user_nocache(void *dst, const void __user *src, - unsigned size, unsigned long total) +static inline int +__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { might_sleep(); - /* - * In practice this limit means that large file write()s - * which get chunked to 4K copies get handled via - * non-temporal stores here. Smaller writes get handled - * via regular __copy_from_user(): - */ - if (likely(total >= PAGE_SIZE)) - return __copy_user_nocache(dst, src, size, 1); - else - return __copy_from_user(dst, src, size); + return __copy_user_nocache(dst, src, size, 1); } -static inline int __copy_from_user_inatomic_nocache(void *dst, - const void __user *src, unsigned size, unsigned total) +static inline int +__copy_from_user_inatomic_nocache(void *dst, const void __user *src, + unsigned size) { - if (likely(total >= PAGE_SIZE)) - return __copy_user_nocache(dst, src, size, 0); - else - return __copy_from_user_inatomic(dst, src, size); + return __copy_user_nocache(dst, src, size, 0); } unsigned long -- cgit v1.2.3 From 9694cd6c17582cd6c29bf4a4e4aa767862ff4f4c Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 2 Mar 2009 10:53:55 +0100 Subject: x86_32: apic/bigsmp_32, de-inline functions The ones which go only into struct apic are de-inlined by compiler anyway, so remove the inline specifier from them. Afterwards, remove bigsmp_setup_portio_remap completely as it is unused. Signed-off-by: Jiri Slaby Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/bigsmp_32.c | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 69c512e23a9..d806ecaa948 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -16,17 +16,17 @@ #include #include -static inline unsigned bigsmp_get_apic_id(unsigned long x) +static unsigned bigsmp_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } -static inline int bigsmp_apic_id_registered(void) +static int bigsmp_apic_id_registered(void) { return 1; } -static inline const cpumask_t *bigsmp_target_cpus(void) +static const cpumask_t *bigsmp_target_cpus(void) { #ifdef CONFIG_SMP return &cpu_online_map; @@ -35,13 +35,12 @@ static inline const cpumask_t *bigsmp_target_cpus(void) #endif } -static inline unsigned long -bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) +static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } -static inline unsigned long bigsmp_check_apicid_present(int bit) +static unsigned long bigsmp_check_apicid_present(int bit) { return 1; } @@ -64,7 +63,7 @@ static inline unsigned long calculate_ldr(int cpu) * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ -static inline void bigsmp_init_apic_ldr(void) +static void bigsmp_init_apic_ldr(void) { unsigned long val; int cpu = smp_processor_id(); @@ -74,19 +73,19 @@ static inline void bigsmp_init_apic_ldr(void) apic_write(APIC_LDR, val); } -static inline void bigsmp_setup_apic_routing(void) +static void bigsmp_setup_apic_routing(void) { printk(KERN_INFO "Enabling APIC mode: Physflat. Using %d I/O APICs\n", nr_ioapics); } -static inline int bigsmp_apicid_to_node(int logical_apicid) +static int bigsmp_apicid_to_node(int logical_apicid) { return apicid_2_node[hard_smp_processor_id()]; } -static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) +static int bigsmp_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); @@ -94,7 +93,7 @@ static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) +static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) { return physid_mask_of_physid(phys_apicid); } @@ -107,29 +106,24 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu) return cpu_physical_id(cpu); } -static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) +static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0xFFL); } -static inline void bigsmp_setup_portio_remap(void) -{ -} - -static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) +static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) { return 1; } /* As we are using single CPU as destination, pick only one CPU here */ -static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) { return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); } -static inline unsigned int -bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, +static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, const struct cpumask *andmask) { int cpu; @@ -148,7 +142,7 @@ bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, return BAD_APICID; } -static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) +static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } @@ -158,12 +152,12 @@ static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) default_send_IPI_mask_sequence_phys(mask, vector); } -static inline void bigsmp_send_IPI_allbutself(int vector) +static void bigsmp_send_IPI_allbutself(int vector) { default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } -static inline void bigsmp_send_IPI_all(int vector) +static void bigsmp_send_IPI_all(int vector) { bigsmp_send_IPI_mask(cpu_online_mask, vector); } -- cgit v1.2.3 From c2b20cbd057b97e2f440fa3bc90b3df51de324fe Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 2 Mar 2009 10:53:56 +0100 Subject: x86_32: apic/es7000_32, cpu_mask_to_apicid cleanup Remove es7000_cpu_mask_to_apicid_cluster completely, because it's almost the same as es7000_cpu_mask_to_apicid except 2 code paths. One of them is about to be removed soon, the another should be BAD_APICID (it's a fail path). The _cluster one was not invoked on apic->cpu_mask_to_apicid_and anyway, since there was no _cluster_and variant. Also use newer cpumask functions. Signed-off-by: Jiri Slaby Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/es7000_32.c | 46 ++++------------------------------------ 1 file changed, 4 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index da37e2c59fe..9b9e86f3cfd 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -575,25 +575,21 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid) return 1; } -static unsigned int -es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) +static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) { - int cpus_found = 0; - int num_bits_set; + unsigned int cpu, num_bits_set, cpus_found = 0; int apicid; - int cpu; num_bits_set = cpumask_weight(cpumask); /* Return id to all */ if (num_bits_set == nr_cpu_ids) - return 0xFF; + return es7000_cpu_to_logical_apicid(0); /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of target_cpus(): */ cpu = cpumask_first(cpumask); apicid = es7000_cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { if (cpumask_test_cpu(cpu, cpumask)) { int new_apicid = es7000_cpu_to_logical_apicid(cpu); @@ -601,40 +597,6 @@ es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { WARN(1, "Not a valid mask!"); - return 0xFF; - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) -{ - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; - - num_bits_set = cpus_weight(*cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - return es7000_cpu_to_logical_apicid(0); - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): - */ - cpu = first_cpu(*cpumask); - apicid = es7000_cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask)) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); - - if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk("%s: Not a valid mask!\n", __func__); - return es7000_cpu_to_logical_apicid(0); } apicid = new_apicid; @@ -739,7 +701,7 @@ struct apic apic_es7000_cluster = { .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, - .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster, + .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, .send_IPI_mask = es7000_send_IPI_mask, -- cgit v1.2.3 From 0edc0b324a37c4bf9e13f3194f2f45c677808792 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 2 Mar 2009 10:53:57 +0100 Subject: x86_32: apic/es7000_32, fix cpu_mask_to_apicid Perform same-cluster checking even for masks with all (nr_cpu_ids) bits set and report BAD_APICID on failure. While at it, convert it to for_each_cpu. Signed-off-by: Jiri Slaby Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/es7000_32.c | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 9b9e86f3cfd..4d8830ffe48 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -577,32 +577,22 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid) static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) { - unsigned int cpu, num_bits_set, cpus_found = 0; - int apicid; + unsigned int round = 0; + int cpu, uninitialized_var(apicid); - num_bits_set = cpumask_weight(cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - return es7000_cpu_to_logical_apicid(0); /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): + * The cpus in the mask must all be on the apic cluster. */ - cpu = cpumask_first(cpumask); - apicid = es7000_cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpumask_test_cpu(cpu, cpumask)) { - int new_apicid = es7000_cpu_to_logical_apicid(cpu); + for_each_cpu(cpu, cpumask) { + int new_apicid = es7000_cpu_to_logical_apicid(cpu); - if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - WARN(1, "Not a valid mask!"); + if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { + WARN(1, "Not a valid mask!"); - return es7000_cpu_to_logical_apicid(0); - } - apicid = new_apicid; - cpus_found++; + return BAD_APICID; } - cpu++; + apicid = new_apicid; + round++; } return apicid; } -- cgit v1.2.3 From fae176d6e03578fee7cfe948ff9ae81bf7ea0ef0 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 2 Mar 2009 10:53:58 +0100 Subject: x86_32: apic/summit_32, fix cpu_mask_to_apicid Perform same-cluster checking even for masks with all (nr_cpu_ids) bits set and report correct apicid on success instead. While at it, convert it to for_each_cpu and newer cpumask api. Signed-off-by: Jiri Slaby Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/summit_32.c | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 0a1135c5a6d..beda620bee8 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -291,33 +291,21 @@ static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) { - int cpus_found = 0; - int num_bits_set; - int apicid; - int cpu; + unsigned int round = 0; + int cpu, apicid = 0; - num_bits_set = cpus_weight(*cpumask); - if (num_bits_set >= nr_cpu_ids) - return BAD_APICID; /* * The cpus in the mask must all be on the apic cluster. */ - cpu = first_cpu(*cpumask); - apicid = summit_cpu_to_logical_apicid(cpu); - - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask)) { - int new_apicid = summit_cpu_to_logical_apicid(cpu); + for_each_cpu(cpu, cpumask) { + int new_apicid = summit_cpu_to_logical_apicid(cpu); - if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk("%s: Not a valid mask!\n", __func__); - - return BAD_APICID; - } - apicid = apicid | new_apicid; - cpus_found++; + if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { + printk("%s: Not a valid mask!\n", __func__); + return BAD_APICID; } - cpu++; + apicid |= new_apicid; + round++; } return apicid; } -- cgit v1.2.3 From 871d78c6d9fc83b2ad584788a175fcfca07a3666 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 2 Mar 2009 11:34:27 +0100 Subject: x86_32: apic/es7000_32, fix section mismatch Remove __init section placement for some functions, so that we don't get section mismatch warnings. [v2]: 2 of them were not caught by DEBUG_SECTION_MISMATCH=y magic. Fix it. Signed-off-by: Jiri Slaby Cc: Jiri Slaby Signed-off-by: Ingo Molnar Cc: H. Peter Anvin --- arch/x86/kernel/apic/es7000_32.c | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 4d8830ffe48..19588f2770e 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -163,7 +163,7 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) return 0; } -static int __init es7000_apic_is_cluster(void) +static int es7000_apic_is_cluster(void) { /* MPENTIUMIII */ if (boot_cpu_data.x86 == 6 && @@ -173,7 +173,7 @@ static int __init es7000_apic_is_cluster(void) return 0; } -static void __init setup_unisys(void) +static void setup_unisys(void) { /* * Determine the generation of the ES7000 currently running. @@ -192,7 +192,7 @@ static void __init setup_unisys(void) /* * Parse the OEM Table: */ -static int __init parse_unisys_oem(char *oemptr) +static int parse_unisys_oem(char *oemptr) { int i; int success = 0; @@ -254,7 +254,7 @@ static int __init parse_unisys_oem(char *oemptr) } #ifdef CONFIG_ACPI -static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) +static int find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; struct es7000_oem_table *table; @@ -285,7 +285,7 @@ static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) return 0; } -static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) +static void unmap_unisys_acpi_oem_table(unsigned long oem_addr) { if (!oem_addr) return; @@ -303,10 +303,10 @@ static int es7000_check_dsdt(void) return 0; } -static int __initdata es7000_acpi_ret; +static int es7000_acpi_ret; /* Hook from generic ACPI tables.c */ -static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { unsigned long oem_addr = 0; int check_dsdt; @@ -333,8 +333,7 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return ret && !es7000_apic_is_cluster(); } -static int __init -es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) +static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) { int ret = es7000_acpi_ret; @@ -342,13 +341,12 @@ es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) } #else /* !CONFIG_ACPI: */ -static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } -static int __init -es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) +static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) { return 0; } @@ -362,8 +360,7 @@ static void es7000_spin(int n) rep_nop(); } -static int __init -es7000_mip_write(struct mip_reg *mip_reg) +static int es7000_mip_write(struct mip_reg *mip_reg) { int status = 0; int spin; @@ -396,7 +393,7 @@ es7000_mip_write(struct mip_reg *mip_reg) return status; } -static void __init es7000_enable_apic_mode(void) +static void es7000_enable_apic_mode(void) { struct mip_reg es7000_mip_reg; int mip_status; @@ -627,9 +624,9 @@ static int probe_es7000(void) return 0; } -static int __initdata es7000_mps_ret; -static __init int -es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +static int es7000_mps_ret; +static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem, + char *productid) { int ret = 0; @@ -646,8 +643,8 @@ es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) return ret && !es7000_apic_is_cluster(); } -static __init int -es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, char *productid) +static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, + char *productid) { int ret = es7000_mps_ret; -- cgit v1.2.3 From 2fcb1f1f38e9b10ee5f339be4e17ba5cad9b4421 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 2 Mar 2009 11:34:28 +0100 Subject: x86_32: apic/summit_32, fix section mismatch Remove __init section placement for some functions/data, so that we don't get section mismatch warnings. Also make inline function instead of empty setup_summit macro. [v2] One of them was not caught by DEBUG_SECTION_MISMATCH=y magic. Fix it. Signed-off-by: Jiri Slaby Cc: Jiri Slaby Signed-off-by: Ingo Molnar Cc: H. Peter Anvin --- arch/x86/kernel/apic/summit_32.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index beda620bee8..aac52fa873f 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -77,9 +77,9 @@ static void summit_send_IPI_all(int vector) extern int use_cyclone; #ifdef CONFIG_X86_SUMMIT_NUMA -extern void setup_summit(void); +static void setup_summit(void); #else -#define setup_summit() {} +static inline void setup_summit(void) {} #endif static int summit_mps_oem_check(struct mpc_table *mpc, char *oem, @@ -360,15 +360,15 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) } #ifdef CONFIG_X86_SUMMIT_NUMA -static struct rio_table_hdr *rio_table_hdr __initdata; -static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; -static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; +static struct rio_table_hdr *rio_table_hdr; +static struct scal_detail *scal_devs[MAX_NUMNODES]; +static struct rio_detail *rio_devs[MAX_NUMNODES*4]; #ifndef CONFIG_X86_NUMAQ -static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; +static int mp_bus_id_to_node[MAX_MP_BUSSES]; #endif -static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) +static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) { int twister = 0, node = 0; int i, bus, num_buses; @@ -430,7 +430,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) return bus; } -static int __init build_detail_arrays(void) +static int build_detail_arrays(void) { unsigned long ptr; int i, scal_detail_size, rio_detail_size; @@ -464,7 +464,7 @@ static int __init build_detail_arrays(void) return 1; } -void __init setup_summit(void) +void setup_summit(void) { unsigned long ptr; unsigned short offset; -- cgit v1.2.3 From b6122b3843216f3f8e9624f9e876f4f0514f9205 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 2 Mar 2009 11:34:29 +0100 Subject: x86_32: apic/numaq_32, fix section mismatch Remove __cpuinitdata section placement for translation_table structure, since it is referenced from a functions within .text. Signed-off-by: Jiri Slaby Cc: Jiri Slaby Signed-off-by: Ingo Molnar Cc: H. Peter Anvin --- arch/x86/kernel/apic/numaq_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index a7f711f5110..ba2fc646553 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -69,7 +69,7 @@ struct mpc_trans { /* x86_quirks member */ static int mpc_record; -static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; +static struct mpc_trans *translation_table[MAX_MPC_ENTRY]; int mp_bus_id_to_node[MAX_MP_BUSSES]; int mp_bus_id_to_local[MAX_MP_BUSSES]; -- cgit v1.2.3 From db949bba3c7cf2e664ac12e237c6d4c914f0c69d Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 27 Feb 2009 13:25:21 -0800 Subject: x86-32: use non-lazy io bitmap context switching Impact: remove 32-bit optimization to prepare unification x86-32 and -64 differ in the way they context-switch tasks with io permission bitmaps. x86-64 simply copies the next tasks io bitmap into place (if any) on context switch. x86-32 invalidates the bitmap on context switch, so that the next IO instruction will fault; at that point it installs the appropriate IO bitmap. This makes context switching IO-bitmap-using tasks a bit more less expensive, at the cost of making the next IO instruction slower due to the extra fault. This tradeoff only makes sense if IO-bitmap-using processes are relatively common, but they don't actually use IO instructions very often. However, in a typical desktop system, the only process likely to be using IO bitmaps is the X server, and nothing at all on a server. Therefore the lazy context switch doesn't really win all that much, and its just a gratuitious difference from 64-bit code. This patch removes the lazy context switch, with a view to unifying this code in a later change. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/processor.h | 6 ------ arch/x86/kernel/ioport.c | 11 ---------- arch/x86/kernel/process_32.c | 36 ++++++++----------------------- arch/x86/kernel/traps.c | 46 ---------------------------------------- 4 files changed, 9 insertions(+), 90 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c7a98f73821..76139506c3e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -248,7 +248,6 @@ struct x86_hw_tss { #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) #define INVALID_IO_BITMAP_OFFSET 0x8000 -#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct tss_struct { /* @@ -263,11 +262,6 @@ struct tss_struct { * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; - /* - * Cache the current maximum and the last task that used the bitmap: - */ - unsigned long io_bitmap_max; - struct thread_struct *io_bitmap_owner; /* * .. and then another 0x100 bytes for the emergency kernel stack: diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index e41980a373a..99c4d308f16 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -85,19 +85,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) t->io_bitmap_max = bytes; -#ifdef CONFIG_X86_32 - /* - * Sets the lazy trigger so that the next I/O operation will - * reload the correct bitmap. - * Reset the owner so that a process switch will not set - * tss->io_bitmap_base to IO_BITMAP_OFFSET. - */ - tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; - tss->io_bitmap_owner = NULL; -#else /* Update the TSS: */ memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); -#endif put_cpu(); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 646da41a620..a59314e877f 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -248,11 +248,8 @@ void exit_thread(void) /* * Careful, clear this in the TSS too: */ - memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); + memset(tss->io_bitmap, 0xff, t->io_bitmap_max); t->io_bitmap_max = 0; - tss->io_bitmap_owner = NULL; - tss->io_bitmap_max = 0; - tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; put_cpu(); } @@ -458,34 +455,19 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, hard_enable_TSC(); } - if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { + if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { /* - * Disable the bitmap via an invalid offset. We still cache - * the previous bitmap owner and the IO bitmap contents: + * Copy the relevant range of the IO bitmap. + * Normally this is 128 bytes or less: */ - tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; - return; - } - - if (likely(next == tss->io_bitmap_owner)) { + memcpy(tss->io_bitmap, next->io_bitmap_ptr, + max(prev->io_bitmap_max, next->io_bitmap_max)); + } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { /* - * Previous owner of the bitmap (hence the bitmap content) - * matches the next task, we dont have to do anything but - * to set a valid offset in the TSS: + * Clear any possible leftover bits: */ - tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; - return; + memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); } - /* - * Lazy TSS's I/O bitmap copy. We set an invalid offset here - * and we let the task to get a GPF in case an I/O instruction - * is performed. The handler of the GPF will verify that the - * faulting task has a valid I/O bitmap and, it true, does the - * real copy and restart the instruction. This will save us - * redundant copies when the currently switched task does not - * perform any I/O during its timeslice. - */ - tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; } /* diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index c05430ac1b4..a1d288327ff 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -118,47 +118,6 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err) if (!user_mode_vm(regs)) die(str, regs, err); } - -/* - * Perform the lazy TSS's I/O bitmap copy. If the TSS has an - * invalid offset set (the LAZY one) and the faulting thread has - * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS, - * we set the offset field correctly and return 1. - */ -static int lazy_iobitmap_copy(void) -{ - struct thread_struct *thread; - struct tss_struct *tss; - int cpu; - - cpu = get_cpu(); - tss = &per_cpu(init_tss, cpu); - thread = ¤t->thread; - - if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && - thread->io_bitmap_ptr) { - memcpy(tss->io_bitmap, thread->io_bitmap_ptr, - thread->io_bitmap_max); - /* - * If the previously set map was extending to higher ports - * than the current one, pad extra space with 0xff (no access). - */ - if (thread->io_bitmap_max < tss->io_bitmap_max) { - memset((char *) tss->io_bitmap + - thread->io_bitmap_max, 0xff, - tss->io_bitmap_max - thread->io_bitmap_max); - } - tss->io_bitmap_max = thread->io_bitmap_max; - tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; - tss->io_bitmap_owner = thread; - put_cpu(); - - return 1; - } - put_cpu(); - - return 0; -} #endif static void __kprobes @@ -309,11 +268,6 @@ do_general_protection(struct pt_regs *regs, long error_code) conditional_sti(regs); #ifdef CONFIG_X86_32 - if (lazy_iobitmap_copy()) { - /* restart the faulting instruction */ - return; - } - if (regs->flags & X86_VM_MASK) goto gp_in_vm86; #endif -- cgit v1.2.3 From 389d1fb11e5f2a16b5e34c547756f0c4dec641f7 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 27 Feb 2009 13:25:28 -0800 Subject: x86: unify chunks of kernel/process*.c With x86-32 and -64 using the same mechanism for managing the tss io permissions bitmap, large chunks of process*.c are trivially unifyable, including: - exit_thread - flush_thread - __switch_to_xtra (along with tsc enable/disable) and as bonus pickups: - sys_fork - sys_vfork (Note: asmlinkage expands to empty on x86-64) Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/system.h | 2 + arch/x86/kernel/process.c | 191 +++++++++++++++++++++++++++++++++++++++++- arch/x86/kernel/process_32.c | 172 ------------------------------------- arch/x86/kernel/process_64.c | 188 ----------------------------------------- 4 files changed, 192 insertions(+), 361 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index c00bfdbdd45..1a7bf39f72d 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -20,6 +20,8 @@ struct task_struct; /* one of the stranger aspects of C forward declarations */ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, + struct tss_struct *tss); #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 87b69d4fac1..6afa5232dbb 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -1,8 +1,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include unsigned long idle_halt; EXPORT_SYMBOL(idle_halt); @@ -55,6 +58,192 @@ void arch_task_cache_init(void) SLAB_PANIC, NULL); } +/* + * Free current thread data structures etc.. + */ +void exit_thread(void) +{ + struct task_struct *me = current; + struct thread_struct *t = &me->thread; + + if (me->thread.io_bitmap_ptr) { + struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); + + kfree(t->io_bitmap_ptr); + t->io_bitmap_ptr = NULL; + clear_thread_flag(TIF_IO_BITMAP); + /* + * Careful, clear this in the TSS too: + */ + memset(tss->io_bitmap, 0xff, t->io_bitmap_max); + t->io_bitmap_max = 0; + put_cpu(); + } + + ds_exit_thread(current); +} + +void flush_thread(void) +{ + struct task_struct *tsk = current; + +#ifdef CONFIG_X86_64 + if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { + clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); + if (test_tsk_thread_flag(tsk, TIF_IA32)) { + clear_tsk_thread_flag(tsk, TIF_IA32); + } else { + set_tsk_thread_flag(tsk, TIF_IA32); + current_thread_info()->status |= TS_COMPAT; + } + } +#endif + + clear_tsk_thread_flag(tsk, TIF_DEBUG); + + tsk->thread.debugreg0 = 0; + tsk->thread.debugreg1 = 0; + tsk->thread.debugreg2 = 0; + tsk->thread.debugreg3 = 0; + tsk->thread.debugreg6 = 0; + tsk->thread.debugreg7 = 0; + memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); + /* + * Forget coprocessor state.. + */ + tsk->fpu_counter = 0; + clear_fpu(tsk); + clear_used_math(); +} + +static void hard_disable_TSC(void) +{ + write_cr4(read_cr4() | X86_CR4_TSD); +} + +void disable_TSC(void) +{ + preempt_disable(); + if (!test_and_set_thread_flag(TIF_NOTSC)) + /* + * Must flip the CPU state synchronously with + * TIF_NOTSC in the current running context. + */ + hard_disable_TSC(); + preempt_enable(); +} + +static void hard_enable_TSC(void) +{ + write_cr4(read_cr4() & ~X86_CR4_TSD); +} + +static void enable_TSC(void) +{ + preempt_disable(); + if (test_and_clear_thread_flag(TIF_NOTSC)) + /* + * Must flip the CPU state synchronously with + * TIF_NOTSC in the current running context. + */ + hard_enable_TSC(); + preempt_enable(); +} + +int get_tsc_mode(unsigned long adr) +{ + unsigned int val; + + if (test_thread_flag(TIF_NOTSC)) + val = PR_TSC_SIGSEGV; + else + val = PR_TSC_ENABLE; + + return put_user(val, (unsigned int __user *)adr); +} + +int set_tsc_mode(unsigned int val) +{ + if (val == PR_TSC_SIGSEGV) + disable_TSC(); + else if (val == PR_TSC_ENABLE) + enable_TSC(); + else + return -EINVAL; + + return 0; +} + +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, + struct tss_struct *tss) +{ + struct thread_struct *prev, *next; + + prev = &prev_p->thread; + next = &next_p->thread; + + if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || + test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) + ds_switch_to(prev_p, next_p); + else if (next->debugctlmsr != prev->debugctlmsr) + update_debugctlmsr(next->debugctlmsr); + + if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { + set_debugreg(next->debugreg0, 0); + set_debugreg(next->debugreg1, 1); + set_debugreg(next->debugreg2, 2); + set_debugreg(next->debugreg3, 3); + /* no 4 and 5 */ + set_debugreg(next->debugreg6, 6); + set_debugreg(next->debugreg7, 7); + } + + if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ + test_tsk_thread_flag(next_p, TIF_NOTSC)) { + /* prev and next are different */ + if (test_tsk_thread_flag(next_p, TIF_NOTSC)) + hard_disable_TSC(); + else + hard_enable_TSC(); + } + + if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { + /* + * Copy the relevant range of the IO bitmap. + * Normally this is 128 bytes or less: + */ + memcpy(tss->io_bitmap, next->io_bitmap_ptr, + max(prev->io_bitmap_max, next->io_bitmap_max)); + } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { + /* + * Clear any possible leftover bits: + */ + memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); + } +} + +int sys_fork(struct pt_regs *regs) +{ + return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); +} + +/* + * This is trivial, and on the face of it looks like it + * could equally well be done in user mode. + * + * Not so, for quite unobvious reasons - register pressure. + * In user mode vfork() cannot have a stack frame, and if + * done by calling the "clone()" system call directly, you + * do not have enough call-clobbered registers to hold all + * the information you need. + */ +int sys_vfork(struct pt_regs *regs) +{ + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, + NULL, NULL); +} + + /* * Idle related variables and functions */ diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a59314e877f..14014d766ca 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -230,52 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) } EXPORT_SYMBOL(kernel_thread); -/* - * Free current thread data structures etc.. - */ -void exit_thread(void) -{ - /* The process may have allocated an io port bitmap... nuke it. */ - if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { - struct task_struct *tsk = current; - struct thread_struct *t = &tsk->thread; - int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); - - kfree(t->io_bitmap_ptr); - t->io_bitmap_ptr = NULL; - clear_thread_flag(TIF_IO_BITMAP); - /* - * Careful, clear this in the TSS too: - */ - memset(tss->io_bitmap, 0xff, t->io_bitmap_max); - t->io_bitmap_max = 0; - put_cpu(); - } - - ds_exit_thread(current); -} - -void flush_thread(void) -{ - struct task_struct *tsk = current; - - tsk->thread.debugreg0 = 0; - tsk->thread.debugreg1 = 0; - tsk->thread.debugreg2 = 0; - tsk->thread.debugreg3 = 0; - tsk->thread.debugreg6 = 0; - tsk->thread.debugreg7 = 0; - memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); - clear_tsk_thread_flag(tsk, TIF_DEBUG); - /* - * Forget coprocessor state.. - */ - tsk->fpu_counter = 0; - clear_fpu(tsk); - clear_used_math(); -} - void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); @@ -363,112 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) } EXPORT_SYMBOL_GPL(start_thread); -static void hard_disable_TSC(void) -{ - write_cr4(read_cr4() | X86_CR4_TSD); -} - -void disable_TSC(void) -{ - preempt_disable(); - if (!test_and_set_thread_flag(TIF_NOTSC)) - /* - * Must flip the CPU state synchronously with - * TIF_NOTSC in the current running context. - */ - hard_disable_TSC(); - preempt_enable(); -} - -static void hard_enable_TSC(void) -{ - write_cr4(read_cr4() & ~X86_CR4_TSD); -} - -static void enable_TSC(void) -{ - preempt_disable(); - if (test_and_clear_thread_flag(TIF_NOTSC)) - /* - * Must flip the CPU state synchronously with - * TIF_NOTSC in the current running context. - */ - hard_enable_TSC(); - preempt_enable(); -} - -int get_tsc_mode(unsigned long adr) -{ - unsigned int val; - - if (test_thread_flag(TIF_NOTSC)) - val = PR_TSC_SIGSEGV; - else - val = PR_TSC_ENABLE; - - return put_user(val, (unsigned int __user *)adr); -} - -int set_tsc_mode(unsigned int val) -{ - if (val == PR_TSC_SIGSEGV) - disable_TSC(); - else if (val == PR_TSC_ENABLE) - enable_TSC(); - else - return -EINVAL; - - return 0; -} - -static noinline void -__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, - struct tss_struct *tss) -{ - struct thread_struct *prev, *next; - - prev = &prev_p->thread; - next = &next_p->thread; - - if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || - test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) - ds_switch_to(prev_p, next_p); - else if (next->debugctlmsr != prev->debugctlmsr) - update_debugctlmsr(next->debugctlmsr); - - if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { - set_debugreg(next->debugreg0, 0); - set_debugreg(next->debugreg1, 1); - set_debugreg(next->debugreg2, 2); - set_debugreg(next->debugreg3, 3); - /* no 4 and 5 */ - set_debugreg(next->debugreg6, 6); - set_debugreg(next->debugreg7, 7); - } - - if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ - test_tsk_thread_flag(next_p, TIF_NOTSC)) { - /* prev and next are different */ - if (test_tsk_thread_flag(next_p, TIF_NOTSC)) - hard_disable_TSC(); - else - hard_enable_TSC(); - } - - if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { - /* - * Copy the relevant range of the IO bitmap. - * Normally this is 128 bytes or less: - */ - memcpy(tss->io_bitmap, next->io_bitmap_ptr, - max(prev->io_bitmap_max, next->io_bitmap_max)); - } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { - /* - * Clear any possible leftover bits: - */ - memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); - } -} /* * switch_to(x,yn) should switch tasks from x to y. @@ -582,11 +430,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) return prev_p; } -int sys_fork(struct pt_regs *regs) -{ - return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); -} - int sys_clone(struct pt_regs *regs) { unsigned long clone_flags; @@ -602,21 +445,6 @@ int sys_clone(struct pt_regs *regs) return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } -/* - * This is trivial, and on the face of it looks like it - * could equally well be done in user mode. - * - * Not so, for quite unobvious reasons - register pressure. - * In user mode vfork() cannot have a stack frame, and if - * done by calling the "clone()" system call directly, you - * do not have enough call-clobbered registers to hold all - * the information you need. - */ -int sys_vfork(struct pt_regs *regs) -{ - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL); -} - /* * sys_execve() executes a new program. */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 836ef6575f0..abb7e6a7f0c 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -237,61 +237,6 @@ void show_regs(struct pt_regs *regs) show_trace(NULL, regs, (void *)(regs + 1), regs->bp); } -/* - * Free current thread data structures etc.. - */ -void exit_thread(void) -{ - struct task_struct *me = current; - struct thread_struct *t = &me->thread; - - if (me->thread.io_bitmap_ptr) { - struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); - - kfree(t->io_bitmap_ptr); - t->io_bitmap_ptr = NULL; - clear_thread_flag(TIF_IO_BITMAP); - /* - * Careful, clear this in the TSS too: - */ - memset(tss->io_bitmap, 0xff, t->io_bitmap_max); - t->io_bitmap_max = 0; - put_cpu(); - } - - ds_exit_thread(current); -} - -void flush_thread(void) -{ - struct task_struct *tsk = current; - - if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { - clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); - if (test_tsk_thread_flag(tsk, TIF_IA32)) { - clear_tsk_thread_flag(tsk, TIF_IA32); - } else { - set_tsk_thread_flag(tsk, TIF_IA32); - current_thread_info()->status |= TS_COMPAT; - } - } - clear_tsk_thread_flag(tsk, TIF_DEBUG); - - tsk->thread.debugreg0 = 0; - tsk->thread.debugreg1 = 0; - tsk->thread.debugreg2 = 0; - tsk->thread.debugreg3 = 0; - tsk->thread.debugreg6 = 0; - tsk->thread.debugreg7 = 0; - memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); - /* - * Forget coprocessor state.. - */ - tsk->fpu_counter = 0; - clear_fpu(tsk); - clear_used_math(); -} - void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { @@ -425,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) } EXPORT_SYMBOL_GPL(start_thread); -static void hard_disable_TSC(void) -{ - write_cr4(read_cr4() | X86_CR4_TSD); -} - -void disable_TSC(void) -{ - preempt_disable(); - if (!test_and_set_thread_flag(TIF_NOTSC)) - /* - * Must flip the CPU state synchronously with - * TIF_NOTSC in the current running context. - */ - hard_disable_TSC(); - preempt_enable(); -} - -static void hard_enable_TSC(void) -{ - write_cr4(read_cr4() & ~X86_CR4_TSD); -} - -static void enable_TSC(void) -{ - preempt_disable(); - if (test_and_clear_thread_flag(TIF_NOTSC)) - /* - * Must flip the CPU state synchronously with - * TIF_NOTSC in the current running context. - */ - hard_enable_TSC(); - preempt_enable(); -} - -int get_tsc_mode(unsigned long adr) -{ - unsigned int val; - - if (test_thread_flag(TIF_NOTSC)) - val = PR_TSC_SIGSEGV; - else - val = PR_TSC_ENABLE; - - return put_user(val, (unsigned int __user *)adr); -} - -int set_tsc_mode(unsigned int val) -{ - if (val == PR_TSC_SIGSEGV) - disable_TSC(); - else if (val == PR_TSC_ENABLE) - enable_TSC(); - else - return -EINVAL; - - return 0; -} - -/* - * This special macro can be used to load a debugging register - */ -#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r) - -static inline void __switch_to_xtra(struct task_struct *prev_p, - struct task_struct *next_p, - struct tss_struct *tss) -{ - struct thread_struct *prev, *next; - - prev = &prev_p->thread, - next = &next_p->thread; - - if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || - test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) - ds_switch_to(prev_p, next_p); - else if (next->debugctlmsr != prev->debugctlmsr) - update_debugctlmsr(next->debugctlmsr); - - if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { - loaddebug(next, 0); - loaddebug(next, 1); - loaddebug(next, 2); - loaddebug(next, 3); - /* no 4 and 5 */ - loaddebug(next, 6); - loaddebug(next, 7); - } - - if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ - test_tsk_thread_flag(next_p, TIF_NOTSC)) { - /* prev and next are different */ - if (test_tsk_thread_flag(next_p, TIF_NOTSC)) - hard_disable_TSC(); - else - hard_enable_TSC(); - } - - if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { - /* - * Copy the relevant range of the IO bitmap. - * Normally this is 128 bytes or less: - */ - memcpy(tss->io_bitmap, next->io_bitmap_ptr, - max(prev->io_bitmap_max, next->io_bitmap_max)); - } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { - /* - * Clear any possible leftover bits: - */ - memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); - } -} - /* * switch_to(x,y) should switch tasks from x to y. * @@ -694,11 +527,6 @@ void set_personality_64bit(void) current->personality &= ~READ_IMPLIES_EXEC; } -asmlinkage long sys_fork(struct pt_regs *regs) -{ - return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); -} - asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) @@ -708,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp, return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); } -/* - * This is trivial, and on the face of it looks like it - * could equally well be done in user mode. - * - * Not so, for quite unobvious reasons - register pressure. - * In user mode vfork() cannot have a stack frame, and if - * done by calling the "clone()" system call directly, you - * do not have enough call-clobbered registers to hold all - * the information you need. - */ -asmlinkage long sys_vfork(struct pt_regs *regs) -{ - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, - NULL, NULL); -} - unsigned long get_wchan(struct task_struct *p) { unsigned long stack; -- cgit v1.2.3 From 2fb6b2a048ed8fa3f049c7d42f7a2dd3f0c8d7a6 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 27 Feb 2009 13:25:33 -0800 Subject: x86: add forward decl for tss_struct Its the correct thing to do before using the struct in a prototype. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/system.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 1a7bf39f72d..643c59b4bc6 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -20,6 +20,7 @@ struct task_struct; /* one of the stranger aspects of C forward declarations */ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); +struct tss_struct; void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, struct tss_struct *tss); -- cgit v1.2.3 From 9976b39b5031bbf76f715893cf080b6a17683881 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 27 Feb 2009 09:19:26 -0800 Subject: xen: deal with virtually mapped percpu data The virtually mapped percpu space causes us two problems: - for hypercalls which take an mfn, we need to do a full pagetable walk to convert the percpu va into an mfn, and - when a hypercall requires a page to be mapped RO via all its aliases, we need to make sure its RO in both the percpu mapping and in the linear mapping This primarily affects the gdt and the vcpu info structure. Signed-off-by: Jeremy Fitzhardinge Cc: Xen-devel Cc: Gerd Hoffmann Cc: Rusty Russell Cc: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/include/asm/xen/page.h | 1 + arch/x86/xen/enlighten.c | 10 ++++++---- arch/x86/xen/mmu.c | 7 +++++++ arch/x86/xen/smp.c | 6 +++++- 4 files changed, 19 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 4bd990ee43d..1a918dde46b 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -164,6 +164,7 @@ static inline pte_t __pte_ma(pteval_t x) xmaddr_t arbitrary_virt_to_machine(void *address); +unsigned long arbitrary_virt_to_mfn(void *vaddr); void make_lowmem_page_readonly(void *vaddr); void make_lowmem_page_readwrite(void *vaddr); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 86497d5f44c..352ea683065 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -103,7 +103,7 @@ static void xen_vcpu_setup(int cpu) vcpup = &per_cpu(xen_vcpu_info, cpu); - info.mfn = virt_to_mfn(vcpup); + info.mfn = arbitrary_virt_to_mfn(vcpup); info.offset = offset_in_page(vcpup); printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", @@ -301,8 +301,10 @@ static void xen_load_gdt(const struct desc_ptr *dtr) frames = mcs.args; for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { - frames[f] = virt_to_mfn(va); + frames[f] = arbitrary_virt_to_mfn((void *)va); + make_lowmem_page_readonly((void *)va); + make_lowmem_page_readonly(mfn_to_virt(frames[f])); } MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); @@ -314,7 +316,7 @@ static void load_TLS_descriptor(struct thread_struct *t, unsigned int cpu, unsigned int i) { struct desc_struct *gdt = get_cpu_gdt_table(cpu); - xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); + xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); struct multicall_space mc = __xen_mc_entry(0); MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); @@ -488,7 +490,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, break; default: { - xmaddr_t maddr = virt_to_machine(&dt[entry]); + xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]); xen_mc_flush(); if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 319bd40a57c..cb6afa4ec95 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -276,6 +276,13 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn) p2m_top[topidx][idx] = mfn; } +unsigned long arbitrary_virt_to_mfn(void *vaddr) +{ + xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); + + return PFN_DOWN(maddr.maddr); +} + xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 035582ae815..8d470562ffc 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -219,6 +219,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) { struct vcpu_guest_context *ctxt; struct desc_struct *gdt; + unsigned long gdt_mfn; if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) return 0; @@ -248,9 +249,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ctxt->ldt_ents = 0; BUG_ON((unsigned long)gdt & ~PAGE_MASK); + + gdt_mfn = arbitrary_virt_to_mfn(gdt); make_lowmem_page_readonly(gdt); + make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); - ctxt->gdt_frames[0] = virt_to_mfn(gdt); + ctxt->gdt_frames[0] = gdt_mfn; ctxt->gdt_ents = GDT_ENTRIES; ctxt->user_regs.cs = __KERNEL_CS; -- cgit v1.2.3 From 55ba99eb211a06709237cb322ecd8c8b6faf6159 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 3 Mar 2009 15:40:25 +0900 Subject: sh: Add support for SH7786 CPU subtype. This adds preliminary support for the SH7786 CPU subtype. While this is a dual-core CPU, only UP is supported for now. L2 cache support is likewise not yet implemented. More information on this particular CPU subtype is available at: http://www.renesas.com/fmwk.jsp?cnt=sh7786_root.jsp&fp=/products/mpumcu/superh_family/sh7780_series/sh7786_group/ Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 + arch/sh/include/asm/processor.h | 2 +- arch/sh/include/cpu-sh4/cpu/freq.h | 4 + arch/sh/include/cpu-sh4/cpu/sh7786.h | 192 +++++++ arch/sh/kernel/cpu/sh4/probe.c | 7 + arch/sh/kernel/cpu/sh4a/Makefile | 3 + arch/sh/kernel/cpu/sh4a/clock-sh7786.c | 148 +++++ arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c | 950 ++++++++++++++++++++++++++++++++ arch/sh/kernel/cpu/sh4a/setup-sh7786.c | 407 ++++++++++++++ arch/sh/kernel/setup.c | 1 + arch/sh/kernel/timers/timer-tmu.c | 1 + arch/sh/oprofile/common.c | 1 + 12 files changed, 1722 insertions(+), 1 deletion(-) create mode 100644 arch/sh/include/cpu-sh4/cpu/sh7786.h create mode 100644 arch/sh/kernel/cpu/sh4a/clock-sh7786.c create mode 100644 arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c create mode 100644 arch/sh/kernel/cpu/sh4a/setup-sh7786.c (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 78a01d7d37e..0ae09683fb2 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -356,6 +356,13 @@ config CPU_SUBTYPE_SH7785 select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA +config CPU_SUBTYPE_SH7786 + bool "Support SH7786 processor" + select CPU_SH4A + select CPU_SHX2 + select ARCH_SPARSEMEM_ENABLE + select SYS_SUPPORTS_NUMA + config CPU_SUBTYPE_SHX3 bool "Support SH-X3 processor" select CPU_SH4A diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 1ef4b24d761..1fd58b42143 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -31,7 +31,7 @@ enum cpu_type { CPU_SH7760, CPU_SH4_202, CPU_SH4_501, /* SH-4A types */ - CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, + CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SH7786, CPU_SH7723, CPU_SHX3, /* SH4AL-DSP types */ diff --git a/arch/sh/include/cpu-sh4/cpu/freq.h b/arch/sh/include/cpu-sh4/cpu/freq.h index c23af81c2e7..749d1c43433 100644 --- a/arch/sh/include/cpu-sh4/cpu/freq.h +++ b/arch/sh/include/cpu-sh4/cpu/freq.h @@ -29,6 +29,10 @@ #define FRQCR0 0xffc80000 #define FRQCR1 0xffc80004 #define FRQMR1 0xffc80014 +#elif defined(CONFIG_CPU_SUBTYPE_SH7786) +#define FRQCR0 0xffc40000 +#define FRQCR1 0xffc40004 +#define FRQMR1 0xffc40014 #elif defined(CONFIG_CPU_SUBTYPE_SHX3) #define FRQCR 0xffc00014 #else diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h new file mode 100644 index 00000000000..48688adc0c8 --- /dev/null +++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h @@ -0,0 +1,192 @@ +/* + * SH7786 Pinmux + * + * Copyright (C) 2008, 2009 Renesas Solutions Corp. + * Kuninori Morimoto + * + * Based on sh7785.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __CPU_SH7786_H__ +#define __CPU_SH7786_H__ + +enum { + /* PA */ + GPIO_PA7, GPIO_PA6, GPIO_PA5, GPIO_PA4, + GPIO_PA3, GPIO_PA2, GPIO_PA1, GPIO_PA0, + + /* PB */ + GPIO_PB7, GPIO_PB6, GPIO_PB5, GPIO_PB4, + GPIO_PB3, GPIO_PB2, GPIO_PB1, GPIO_PB0, + + /* PC */ + GPIO_PC7, GPIO_PC6, GPIO_PC5, GPIO_PC4, + GPIO_PC3, GPIO_PC2, GPIO_PC1, GPIO_PC0, + + /* PD */ + GPIO_PD7, GPIO_PD6, GPIO_PD5, GPIO_PD4, + GPIO_PD3, GPIO_PD2, GPIO_PD1, GPIO_PD0, + + /* PE */ + GPIO_PE5, GPIO_PE4, GPIO_PE3, GPIO_PE2, + GPIO_PE1, GPIO_PE0, + + /* PF */ + GPIO_PF7, GPIO_PF6, GPIO_PF5, GPIO_PF4, + GPIO_PF3, GPIO_PF2, GPIO_PF1, GPIO_PF0, + + /* PG */ + GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, + GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, + + /* PH */ + GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4, + GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0, + + /* PJ */ + GPIO_PJ7, GPIO_PJ6, GPIO_PJ5, GPIO_PJ4, + GPIO_PJ3, GPIO_PJ2, GPIO_PJ1, GPIO_PJ0, + + GPIO_FN_CDE, + GPIO_FN_ETH_MAGIC, + GPIO_FN_DISP, + GPIO_FN_ETH_LINK, + GPIO_FN_DR5, + GPIO_FN_ETH_TX_ER, + GPIO_FN_DR4, + GPIO_FN_ETH_TX_EN, + GPIO_FN_DR3, + GPIO_FN_ETH_TXD3, + GPIO_FN_DR2, + GPIO_FN_ETH_TXD2, + GPIO_FN_DR1, + GPIO_FN_ETH_TXD1, + GPIO_FN_DR0, + GPIO_FN_ETH_TXD0, + GPIO_FN_VSYNC, + GPIO_FN_HSPI_CLK, + GPIO_FN_ODDF, + GPIO_FN_HSPI_CS, + GPIO_FN_DG5, + GPIO_FN_ETH_MDIO, + GPIO_FN_DG4, + GPIO_FN_ETH_RX_CLK, + GPIO_FN_DG3, + GPIO_FN_ETH_MDC, + GPIO_FN_DG2, + GPIO_FN_ETH_COL, + GPIO_FN_DG1, + GPIO_FN_ETH_TX_CLK, + GPIO_FN_DG0, + GPIO_FN_ETH_CRS, + GPIO_FN_DCLKIN, + GPIO_FN_HSPI_RX, + GPIO_FN_HSYNC, + GPIO_FN_HSPI_TX, + GPIO_FN_DB5, + GPIO_FN_ETH_RXD3, + GPIO_FN_DB4, + GPIO_FN_ETH_RXD2, + GPIO_FN_DB3, + GPIO_FN_ETH_RXD1, + GPIO_FN_DB2, + GPIO_FN_ETH_RXD0, + GPIO_FN_DB1, + GPIO_FN_ETH_RX_DV, + GPIO_FN_DB0, + GPIO_FN_ETH_RX_ER, + GPIO_FN_DCLKOUT, + GPIO_FN_SCIF1_SLK, + GPIO_FN_SCIF1_RXD, + GPIO_FN_SCIF1_TXD, + GPIO_FN_DACK1, + GPIO_FN_BACK, + GPIO_FN_FALE, + GPIO_FN_DACK0, + GPIO_FN_FCLE, + GPIO_FN_DREQ1, + GPIO_FN_BREQ, + GPIO_FN_USB_OVC1, + GPIO_FN_DREQ0, + GPIO_FN_USB_OVC0, + GPIO_FN_USB_PENC1, + GPIO_FN_USB_PENC0, + GPIO_FN_HAC1_SDOUT, + GPIO_FN_SSI1_SDATA, + GPIO_FN_SDIF1CMD, + GPIO_FN_HAC1_SDIN, + GPIO_FN_SSI1_SCK, + GPIO_FN_SDIF1CD, + GPIO_FN_HAC1_SYNC, + GPIO_FN_SSI1_WS, + GPIO_FN_SDIF1WP, + GPIO_FN_HAC1_BITCLK, + GPIO_FN_SSI1_CLK, + GPIO_FN_SDIF1CLK, + GPIO_FN_HAC0_SDOUT, + GPIO_FN_SSI0_SDATA, + GPIO_FN_SDIF1D3, + GPIO_FN_HAC0_SDIN, + GPIO_FN_SSI0_SCK, + GPIO_FN_SDIF1D2, + GPIO_FN_HAC0_SYNC, + GPIO_FN_SSI0_WS, + GPIO_FN_SDIF1D1, + GPIO_FN_HAC0_BITCLK, + GPIO_FN_SSI0_CLK, + GPIO_FN_SDIF1D0, + GPIO_FN_SCIF3_SCK, + GPIO_FN_SSI2_SDATA, + GPIO_FN_SCIF3_RXD, + GPIO_FN_TCLK, + GPIO_FN_SSI2_SCK, + GPIO_FN_SCIF3_TXD, + GPIO_FN_HAC_RES, + GPIO_FN_SSI2_WS, + GPIO_FN_DACK3, + GPIO_FN_SDIF0CMD, + GPIO_FN_DACK2, + GPIO_FN_SDIF0CD, + GPIO_FN_DREQ3, + GPIO_FN_SDIF0WP, + GPIO_FN_SCIF0_CTS, + GPIO_FN_DREQ2, + GPIO_FN_SDIF0CLK, + GPIO_FN_SCIF0_RTS, + GPIO_FN_IRL7, + GPIO_FN_SDIF0D3, + GPIO_FN_SCIF0_SCK, + GPIO_FN_IRL6, + GPIO_FN_SDIF0D2, + GPIO_FN_SCIF0_RXD, + GPIO_FN_IRL5, + GPIO_FN_SDIF0D1, + GPIO_FN_SCIF0_TXD, + GPIO_FN_IRL4, + GPIO_FN_SDIF0D0, + GPIO_FN_SCIF5_SCK, + GPIO_FN_FRB, + GPIO_FN_SCIF5_RXD, + GPIO_FN_IOIS16, + GPIO_FN_SCIF5_TXD, + GPIO_FN_CE2B, + GPIO_FN_DRAK3, + GPIO_FN_CE2A, + GPIO_FN_SCIF4_SCK, + GPIO_FN_DRAK2, + GPIO_FN_SSI3_WS, + GPIO_FN_SCIF4_RXD, + GPIO_FN_DRAK1, + GPIO_FN_SSI3_SDATA, + GPIO_FN_FSTATUS, + GPIO_FN_SCIF4_TXD, + GPIO_FN_DRAK0, + GPIO_FN_SSI3_SCK, + GPIO_FN_FSE, +}; + +#endif /* __CPU_SH7786_H__ */ diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 2e42572b1b1..2bd0ec96263 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -129,6 +129,13 @@ int __init detect_cpu_and_cache_system(void) boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | CPU_HAS_LLSC; break; + case 0x4004: + boot_cpu_data.type = CPU_SH7786; + boot_cpu_data.icache.ways = 4; + boot_cpu_data.dcache.ways = 4; + boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | + CPU_HAS_LLSC; + break; case 0x3008: boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 8e344ec5847..1a92361feeb 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o +obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o @@ -21,6 +22,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o +clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7722.o @@ -31,6 +33,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o obj-y += $(clock-y) obj-$(CONFIG_SMP) += $(smp-y) diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c new file mode 100644 index 00000000000..f84a9c13447 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c @@ -0,0 +1,148 @@ +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7786.c + * + * SH7786 support for the clock framework + * + * Copyright (C) 2008, 2009 Renesas Solutions Corp. + * Kuninori Morimoto + * + * Based on SH7785 + * Copyright (C) 2007 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +static int ifc_divisors[] = { 1, 2, 4, 1 }; +static int sfc_divisors[] = { 1, 1, 4, 1 }; +static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 1, + 24, 32, 1, 1, 1, 1, 1, 1 }; +static int mfc_divisors[] = { 1, 1, 4, 1 }; +static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 16, 1, + 24, 32, 1, 48, 1, 1, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= pfc_divisors[ctrl_inl(FRQMR1) & 0x000f]; +} + +static struct clk_ops sh7786_master_clk_ops = { + .init = master_clk_init, +}; + +static void module_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inl(FRQMR1) & 0x000f); + clk->rate = clk->parent->rate / pfc_divisors[idx]; +} + +static struct clk_ops sh7786_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static void bus_clk_recalc(struct clk *clk) +{ + int idx = ((ctrl_inl(FRQMR1) >> 16) & 0x000f); + clk->rate = clk->parent->rate / bfc_divisors[idx]; +} + +static struct clk_ops sh7786_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static void cpu_clk_recalc(struct clk *clk) +{ + int idx = ((ctrl_inl(FRQMR1) >> 28) & 0x0003); + clk->rate = clk->parent->rate / ifc_divisors[idx]; +} + +static struct clk_ops sh7786_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct clk_ops *sh7786_clk_ops[] = { + &sh7786_master_clk_ops, + &sh7786_module_clk_ops, + &sh7786_bus_clk_ops, + &sh7786_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7786_clk_ops)) + *ops = sh7786_clk_ops[idx]; +} + +static void shyway_clk_recalc(struct clk *clk) +{ + int idx = ((ctrl_inl(FRQMR1) >> 20) & 0x0003); + clk->rate = clk->parent->rate / sfc_divisors[idx]; +} + +static struct clk_ops sh7786_shyway_clk_ops = { + .recalc = shyway_clk_recalc, +}; + +static struct clk sh7786_shyway_clk = { + .name = "shyway_clk", + .flags = CLK_ALWAYS_ENABLED, + .ops = &sh7786_shyway_clk_ops, +}; + +static void ddr_clk_recalc(struct clk *clk) +{ + int idx = ((ctrl_inl(FRQMR1) >> 12) & 0x0003); + clk->rate = clk->parent->rate / mfc_divisors[idx]; +} + +static struct clk_ops sh7786_ddr_clk_ops = { + .recalc = ddr_clk_recalc, +}; + +static struct clk sh7786_ddr_clk = { + .name = "ddr_clk", + .flags = CLK_ALWAYS_ENABLED, + .ops = &sh7786_ddr_clk_ops, +}; + +/* + * Additional SH7786-specific on-chip clocks that aren't already part of the + * clock framework + */ +static struct clk *sh7786_onchip_clocks[] = { + &sh7786_shyway_clk, + &sh7786_ddr_clk, +}; + +static int __init sh7786_clk_init(void) +{ + struct clk *clk = clk_get(NULL, "master_clk"); + int i; + + for (i = 0; i < ARRAY_SIZE(sh7786_onchip_clocks); i++) { + struct clk *clkp = sh7786_onchip_clocks[i]; + + clkp->parent = clk; + clk_register(clkp); + clk_enable(clkp); + } + + /* + * Now that we have the rest of the clocks registered, we need to + * force the parent clock to propagate so that these clocks will + * automatically figure out their rate. We cheat by handing the + * parent clock its current rate and forcing child propagation. + */ + clk_set_rate(clk, clk_get_rate(clk)); + + clk_put(clk); + + return 0; +} +arch_initcall(sh7786_clk_init); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c new file mode 100644 index 00000000000..373b3447bfd --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c @@ -0,0 +1,950 @@ +/* + * SH7786 Pinmux + * + * Copyright (C) 2008, 2009 Renesas Solutions Corp. + * Kuninori Morimoto + * + * Based on SH7785 pinmux + * + * Copyright (C) 2008 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include + +enum { + PINMUX_RESERVED = 0, + + PINMUX_DATA_BEGIN, + PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, + PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA, + PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, + PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, + PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, + PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA, + PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, + PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, + PE7_DATA, PE6_DATA, + PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, + PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, + PG7_DATA, PG6_DATA, PG5_DATA, + PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA, + PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, + PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA, + PJ3_DATA, PJ2_DATA, PJ1_DATA, + PINMUX_DATA_END, + + PINMUX_INPUT_BEGIN, + PA7_IN, PA6_IN, PA5_IN, PA4_IN, + PA3_IN, PA2_IN, PA1_IN, PA0_IN, + PB7_IN, PB6_IN, PB5_IN, PB4_IN, + PB3_IN, PB2_IN, PB1_IN, PB0_IN, + PC7_IN, PC6_IN, PC5_IN, PC4_IN, + PC3_IN, PC2_IN, PC1_IN, PC0_IN, + PD7_IN, PD6_IN, PD5_IN, PD4_IN, + PD3_IN, PD2_IN, PD1_IN, PD0_IN, + PE7_IN, PE6_IN, + PF7_IN, PF6_IN, PF5_IN, PF4_IN, + PF3_IN, PF2_IN, PF1_IN, PF0_IN, + PG7_IN, PG6_IN, PG5_IN, + PH7_IN, PH6_IN, PH5_IN, PH4_IN, + PH3_IN, PH2_IN, PH1_IN, PH0_IN, + PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN, + PJ3_IN, PJ2_IN, PJ1_IN, + PINMUX_INPUT_END, + + PINMUX_INPUT_PULLUP_BEGIN, + PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU, + PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU, + PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU, + PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU, + PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU, + PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU, + PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU, + PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU, + PE7_IN_PU, PE6_IN_PU, + PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU, + PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU, + PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, + PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU, + PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU, + PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU, + PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU, + PINMUX_INPUT_PULLUP_END, + + PINMUX_OUTPUT_BEGIN, + PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT, + PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT, + PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT, + PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT, + PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT, + PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT, + PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT, + PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT, + PE7_OUT, PE6_OUT, + PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT, + PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT, + PG7_OUT, PG6_OUT, PG5_OUT, + PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT, + PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT, + PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT, + PJ3_OUT, PJ2_OUT, PJ1_OUT, + PINMUX_OUTPUT_END, + + PINMUX_FUNCTION_BEGIN, + PA7_FN, PA6_FN, PA5_FN, PA4_FN, + PA3_FN, PA2_FN, PA1_FN, PA0_FN, + PB7_FN, PB6_FN, PB5_FN, PB4_FN, + PB3_FN, PB2_FN, PB1_FN, PB0_FN, + PC7_FN, PC6_FN, PC5_FN, PC4_FN, + PC3_FN, PC2_FN, PC1_FN, PC0_FN, + PD7_FN, PD6_FN, PD5_FN, PD4_FN, + PD3_FN, PD2_FN, PD1_FN, PD0_FN, + PE7_FN, PE6_FN, + PF7_FN, PF6_FN, PF5_FN, PF4_FN, + PF3_FN, PF2_FN, PF1_FN, PF0_FN, + PG7_FN, PG6_FN, PG5_FN, + PH7_FN, PH6_FN, PH5_FN, PH4_FN, + PH3_FN, PH2_FN, PH1_FN, PH0_FN, + PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN, + PJ3_FN, PJ2_FN, PJ1_FN, + P1MSEL14_0, P1MSEL14_1, + P1MSEL13_0, P1MSEL13_1, + P1MSEL12_0, P1MSEL12_1, + P1MSEL11_0, P1MSEL11_1, + P1MSEL10_0, P1MSEL10_1, + P1MSEL9_0, P1MSEL9_1, + P1MSEL8_0, P1MSEL8_1, + P1MSEL7_0, P1MSEL7_1, + P1MSEL6_0, P1MSEL6_1, + P1MSEL5_0, P1MSEL5_1, + P1MSEL4_0, P1MSEL4_1, + P1MSEL3_0, P1MSEL3_1, + P1MSEL2_0, P1MSEL2_1, + P1MSEL1_0, P1MSEL1_1, + P1MSEL0_0, P1MSEL0_1, + + P2MSEL15_0, P2MSEL15_1, + P2MSEL14_0, P2MSEL14_1, + P2MSEL13_0, P2MSEL13_1, + P2MSEL12_0, P2MSEL12_1, + P2MSEL11_0, P2MSEL11_1, + P2MSEL10_0, P2MSEL10_1, + P2MSEL9_0, P2MSEL9_1, + P2MSEL8_0, P2MSEL8_1, + P2MSEL7_0, P2MSEL7_1, + P2MSEL6_0, P2MSEL6_1, + P2MSEL5_0, P2MSEL5_1, + P2MSEL4_0, P2MSEL4_1, + P2MSEL3_0, P2MSEL3_1, + P2MSEL2_0, P2MSEL2_1, + P2MSEL1_0, P2MSEL1_1, + P2MSEL0_0, P2MSEL0_1, + PINMUX_FUNCTION_END, + + PINMUX_MARK_BEGIN, + CDE_MARK, + ETH_MAGIC_MARK, + DISP_MARK, + ETH_LINK_MARK, + DR5_MARK, + ETH_TX_ER_MARK, + DR4_MARK, + ETH_TX_EN_MARK, + DR3_MARK, + ETH_TXD3_MARK, + DR2_MARK, + ETH_TXD2_MARK, + DR1_MARK, + ETH_TXD1_MARK, + DR0_MARK, + ETH_TXD0_MARK, + + VSYNC_MARK, + HSPI_CLK_MARK, + ODDF_MARK, + HSPI_CS_MARK, + DG5_MARK, + ETH_MDIO_MARK, + DG4_MARK, + ETH_RX_CLK_MARK, + DG3_MARK, + ETH_MDC_MARK, + DG2_MARK, + ETH_COL_MARK, + DG1_MARK, + ETH_TX_CLK_MARK, + DG0_MARK, + ETH_CRS_MARK, + + DCLKIN_MARK, + HSPI_RX_MARK, + HSYNC_MARK, + HSPI_TX_MARK, + DB5_MARK, + ETH_RXD3_MARK, + DB4_MARK, + ETH_RXD2_MARK, + DB3_MARK, + ETH_RXD1_MARK, + DB2_MARK, + ETH_RXD0_MARK, + DB1_MARK, + ETH_RX_DV_MARK, + DB0_MARK, + ETH_RX_ER_MARK, + + DCLKOUT_MARK, + SCIF1_SLK_MARK, + SCIF1_RXD_MARK, + SCIF1_TXD_MARK, + DACK1_MARK, + BACK_MARK, + FALE_MARK, + DACK0_MARK, + FCLE_MARK, + DREQ1_MARK, + BREQ_MARK, + USB_OVC1_MARK, + DREQ0_MARK, + USB_OVC0_MARK, + + USB_PENC1_MARK, + USB_PENC0_MARK, + + HAC1_SDOUT_MARK, + SSI1_SDATA_MARK, + SDIF1CMD_MARK, + HAC1_SDIN_MARK, + SSI1_SCK_MARK, + SDIF1CD_MARK, + HAC1_SYNC_MARK, + SSI1_WS_MARK, + SDIF1WP_MARK, + HAC1_BITCLK_MARK, + SSI1_CLK_MARK, + SDIF1CLK_MARK, + HAC0_SDOUT_MARK, + SSI0_SDATA_MARK, + SDIF1D3_MARK, + HAC0_SDIN_MARK, + SSI0_SCK_MARK, + SDIF1D2_MARK, + HAC0_SYNC_MARK, + SSI0_WS_MARK, + SDIF1D1_MARK, + HAC0_BITCLK_MARK, + SSI0_CLK_MARK, + SDIF1D0_MARK, + + SCIF3_SCK_MARK, + SSI2_SDATA_MARK, + SCIF3_RXD_MARK, + TCLK_MARK, + SSI2_SCK_MARK, + SCIF3_TXD_MARK, + HAC_RES_MARK, + SSI2_WS_MARK, + + DACK3_MARK, + SDIF0CMD_MARK, + DACK2_MARK, + SDIF0CD_MARK, + DREQ3_MARK, + SDIF0WP_MARK, + SCIF0_CTS_MARK, + DREQ2_MARK, + SDIF0CLK_MARK, + SCIF0_RTS_MARK, + IRL7_MARK, + SDIF0D3_MARK, + SCIF0_SCK_MARK, + IRL6_MARK, + SDIF0D2_MARK, + SCIF0_RXD_MARK, + IRL5_MARK, + SDIF0D1_MARK, + SCIF0_TXD_MARK, + IRL4_MARK, + SDIF0D0_MARK, + + SCIF5_SCK_MARK, + FRB_MARK, + SCIF5_RXD_MARK, + IOIS16_MARK, + SCIF5_TXD_MARK, + CE2B_MARK, + DRAK3_MARK, + CE2A_MARK, + SCIF4_SCK_MARK, + DRAK2_MARK, + SSI3_WS_MARK, + SCIF4_RXD_MARK, + DRAK1_MARK, + SSI3_SDATA_MARK, + FSTATUS_MARK, + SCIF4_TXD_MARK, + DRAK0_MARK, + SSI3_SCK_MARK, + FSE_MARK, + PINMUX_MARK_END, +}; + +static pinmux_enum_t pinmux_data[] = { + + /* PA GPIO */ + PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU), + PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU), + PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU), + PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU), + PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU), + PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU), + PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU), + PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU), + + /* PB GPIO */ + PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU), + PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU), + PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU), + PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU), + PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU), + PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU), + PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU), + PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU), + + /* PC GPIO */ + PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU), + PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU), + PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU), + PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU), + PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU), + PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU), + PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU), + PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU), + + /* PD GPIO */ + PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU), + PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU), + PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU), + PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU), + PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU), + PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU), + PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU), + PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU), + + /* PE GPIO */ + PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU), + PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU), + + /* PF GPIO */ + PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU), + PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU), + PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU), + PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU), + PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU), + PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU), + PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU), + PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU), + + /* PG GPIO */ + PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU), + PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU), + PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU), + + /* PH GPIO */ + PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU), + PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU), + PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU), + PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU), + PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU), + PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU), + PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU), + PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU), + + /* PJ GPIO */ + PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU), + PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU), + PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU), + PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU), + PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU), + PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU), + PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU), + + /* PA FN */ + PINMUX_MARK_BEGIN, + PINMUX_DATA(CDE_MARK, P1MSEL2_0, PA7_FN), + PINMUX_DATA(DISP_MARK, P1MSEL2_0, PA6_FN), + PINMUX_DATA(DR5_MARK, P1MSEL2_0, PA5_FN), + PINMUX_DATA(DR4_MARK, P1MSEL2_0, PA4_FN), + PINMUX_DATA(DR3_MARK, P1MSEL2_0, PA3_FN), + PINMUX_DATA(DR2_MARK, P1MSEL2_0, PA2_FN), + PINMUX_DATA(DR1_MARK, P1MSEL2_0, PA1_FN), + PINMUX_DATA(DR0_MARK, P1MSEL2_0, PA0_FN), + PINMUX_DATA(ETH_MAGIC_MARK, P1MSEL2_1, PA7_FN), + PINMUX_DATA(ETH_LINK_MARK, P1MSEL2_1, PA6_FN), + PINMUX_DATA(ETH_TX_ER_MARK, P1MSEL2_1, PA5_FN), + PINMUX_DATA(ETH_TX_EN_MARK, P1MSEL2_1, PA4_FN), + PINMUX_DATA(ETH_TXD3_MARK, P1MSEL2_1, PA3_FN), + PINMUX_DATA(ETH_TXD2_MARK, P1MSEL2_1, PA2_FN), + PINMUX_DATA(ETH_TXD1_MARK, P1MSEL2_1, PA1_FN), + PINMUX_DATA(ETH_TXD0_MARK, P1MSEL2_1, PA0_FN), + + /* PB FN */ + PINMUX_DATA(VSYNC_MARK, P1MSEL3_0, PB7_FN), + PINMUX_DATA(ODDF_MARK, P1MSEL3_0, PB6_FN), + PINMUX_DATA(DG5_MARK, P1MSEL2_0, PB5_FN), + PINMUX_DATA(DG4_MARK, P1MSEL2_0, PB4_FN), + PINMUX_DATA(DG3_MARK, P1MSEL2_0, PB3_FN), + PINMUX_DATA(DG2_MARK, P1MSEL2_0, PB2_FN), + PINMUX_DATA(DG1_MARK, P1MSEL2_0, PB1_FN), + PINMUX_DATA(DG0_MARK, P1MSEL2_0, PB0_FN), + PINMUX_DATA(HSPI_CLK_MARK, P1MSEL3_1, PB7_FN), + PINMUX_DATA(HSPI_CS_MARK, P1MSEL3_1, PB6_FN), + PINMUX_DATA(ETH_MDIO_MARK, P1MSEL2_1, PB5_FN), + PINMUX_DATA(ETH_RX_CLK_MARK, P1MSEL2_1, PB4_FN), + PINMUX_DATA(ETH_MDC_MARK, P1MSEL2_1, PB3_FN), + PINMUX_DATA(ETH_COL_MARK, P1MSEL2_1, PB2_FN), + PINMUX_DATA(ETH_TX_CLK_MARK, P1MSEL2_1, PB1_FN), + PINMUX_DATA(ETH_CRS_MARK, P1MSEL2_1, PB0_FN), + + /* PC FN */ + PINMUX_DATA(DCLKIN_MARK, P1MSEL3_0, PC7_FN), + PINMUX_DATA(HSYNC_MARK, P1MSEL3_0, PC6_FN), + PINMUX_DATA(DB5_MARK, P1MSEL2_0, PC5_FN), + PINMUX_DATA(DB4_MARK, P1MSEL2_0, PC4_FN), + PINMUX_DATA(DB3_MARK, P1MSEL2_0, PC3_FN), + PINMUX_DATA(DB2_MARK, P1MSEL2_0, PC2_FN), + PINMUX_DATA(DB1_MARK, P1MSEL2_0, PC1_FN), + PINMUX_DATA(DB0_MARK, P1MSEL2_0, PC0_FN), + + PINMUX_DATA(HSPI_RX_MARK, P1MSEL3_1, PC7_FN), + PINMUX_DATA(HSPI_TX_MARK, P1MSEL3_1, PC6_FN), + PINMUX_DATA(ETH_RXD3_MARK, P1MSEL2_1, PC5_FN), + PINMUX_DATA(ETH_RXD2_MARK, P1MSEL2_1, PC4_FN), + PINMUX_DATA(ETH_RXD1_MARK, P1MSEL2_1, PC3_FN), + PINMUX_DATA(ETH_RXD0_MARK, P1MSEL2_1, PC2_FN), + PINMUX_DATA(ETH_RX_DV_MARK, P1MSEL2_1, PC1_FN), + PINMUX_DATA(ETH_RX_ER_MARK, P1MSEL2_1, PC0_FN), + + /* PD FN */ + PINMUX_DATA(DCLKOUT_MARK, PD7_FN), + PINMUX_DATA(SCIF1_SLK_MARK, PD6_FN), + PINMUX_DATA(SCIF1_RXD_MARK, PD5_FN), + PINMUX_DATA(SCIF1_TXD_MARK, PD4_FN), + PINMUX_DATA(DACK1_MARK, P1MSEL13_1, P1MSEL12_0, PD3_FN), + PINMUX_DATA(BACK_MARK, P1MSEL13_0, P1MSEL12_1, PD3_FN), + PINMUX_DATA(FALE_MARK, P1MSEL13_0, P1MSEL12_0, PD3_FN), + PINMUX_DATA(DACK0_MARK, P1MSEL14_1, PD2_FN), + PINMUX_DATA(FCLE_MARK, P1MSEL14_0, PD2_FN), + PINMUX_DATA(DREQ1_MARK, P1MSEL10_0, P1MSEL9_1, PD1_FN), + PINMUX_DATA(BREQ_MARK, P1MSEL10_1, P1MSEL9_0, PD1_FN), + PINMUX_DATA(USB_OVC1_MARK, P1MSEL10_0, P1MSEL9_0, PD1_FN), + PINMUX_DATA(DREQ0_MARK, P1MSEL11_1, PD0_FN), + PINMUX_DATA(USB_OVC0_MARK, P1MSEL11_0, PD0_FN), + + /* PE FN */ + PINMUX_DATA(USB_PENC1_MARK, PE7_FN), + PINMUX_DATA(USB_PENC0_MARK, PE6_FN), + + /* PF FN */ + PINMUX_DATA(HAC1_SDOUT_MARK, P2MSEL15_0, P2MSEL14_0, PF7_FN), + PINMUX_DATA(HAC1_SDIN_MARK, P2MSEL15_0, P2MSEL14_0, PF6_FN), + PINMUX_DATA(HAC1_SYNC_MARK, P2MSEL15_0, P2MSEL14_0, PF5_FN), + PINMUX_DATA(HAC1_BITCLK_MARK, P2MSEL15_0, P2MSEL14_0, PF4_FN), + PINMUX_DATA(HAC0_SDOUT_MARK, P2MSEL13_0, P2MSEL12_0, PF3_FN), + PINMUX_DATA(HAC0_SDIN_MARK, P2MSEL13_0, P2MSEL12_0, PF2_FN), + PINMUX_DATA(HAC0_SYNC_MARK, P2MSEL13_0, P2MSEL12_0, PF1_FN), + PINMUX_DATA(HAC0_BITCLK_MARK, P2MSEL13_0, P2MSEL12_0, PF0_FN), + PINMUX_DATA(SSI1_SDATA_MARK, P2MSEL15_0, P2MSEL14_1, PF7_FN), + PINMUX_DATA(SSI1_SCK_MARK, P2MSEL15_0, P2MSEL14_1, PF6_FN), + PINMUX_DATA(SSI1_WS_MARK, P2MSEL15_0, P2MSEL14_1, PF5_FN), + PINMUX_DATA(SSI1_CLK_MARK, P2MSEL15_0, P2MSEL14_1, PF4_FN), + PINMUX_DATA(SSI0_SDATA_MARK, P2MSEL13_0, P2MSEL12_1, PF3_FN), + PINMUX_DATA(SSI0_SCK_MARK, P2MSEL13_0, P2MSEL12_1, PF2_FN), + PINMUX_DATA(SSI0_WS_MARK, P2MSEL13_0, P2MSEL12_1, PF1_FN), + PINMUX_DATA(SSI0_CLK_MARK, P2MSEL13_0, P2MSEL12_1, PF0_FN), + PINMUX_DATA(SDIF1CMD_MARK, P2MSEL15_1, P2MSEL14_0, PF7_FN), + PINMUX_DATA(SDIF1CD_MARK, P2MSEL15_1, P2MSEL14_0, PF6_FN), + PINMUX_DATA(SDIF1WP_MARK, P2MSEL15_1, P2MSEL14_0, PF5_FN), + PINMUX_DATA(SDIF1CLK_MARK, P2MSEL15_1, P2MSEL14_0, PF4_FN), + PINMUX_DATA(SDIF1D3_MARK, P2MSEL13_1, P2MSEL12_0, PF3_FN), + PINMUX_DATA(SDIF1D2_MARK, P2MSEL13_1, P2MSEL12_0, PF2_FN), + PINMUX_DATA(SDIF1D1_MARK, P2MSEL13_1, P2MSEL12_0, PF1_FN), + PINMUX_DATA(SDIF1D0_MARK, P2MSEL13_1, P2MSEL12_0, PF0_FN), + + /* PG FN */ + PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL8_0, PG7_FN), + PINMUX_DATA(SSI2_SDATA_MARK, P1MSEL8_1, PG7_FN), + PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL7_0, P1MSEL6_0, PG6_FN), + PINMUX_DATA(SSI2_SCK_MARK, P1MSEL7_1, P1MSEL6_0, PG6_FN), + PINMUX_DATA(TCLK_MARK, P1MSEL7_0, P1MSEL6_1, PG6_FN), + PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL5_0, P1MSEL4_0, PG5_FN), + PINMUX_DATA(SSI2_WS_MARK, P1MSEL5_1, P1MSEL4_0, PG5_FN), + PINMUX_DATA(HAC_RES_MARK, P1MSEL5_0, P1MSEL4_1, PG5_FN), + + /* PH FN */ + PINMUX_DATA(DACK3_MARK, P2MSEL4_0, PH7_FN), + PINMUX_DATA(SDIF0CMD_MARK, P2MSEL4_1, PH7_FN), + PINMUX_DATA(DACK2_MARK, P2MSEL4_0, PH6_FN), + PINMUX_DATA(SDIF0CD_MARK, P2MSEL4_1, PH6_FN), + PINMUX_DATA(DREQ3_MARK, P2MSEL4_0, PH5_FN), + PINMUX_DATA(SDIF0WP_MARK, P2MSEL4_1, PH5_FN), + PINMUX_DATA(DREQ2_MARK, P2MSEL3_0, P2MSEL2_1, PH4_FN), + PINMUX_DATA(SDIF0CLK_MARK, P2MSEL3_1, P2MSEL2_0, PH4_FN), + PINMUX_DATA(SCIF0_CTS_MARK, P2MSEL3_0, P2MSEL2_0, PH4_FN), + PINMUX_DATA(SDIF0D3_MARK, P2MSEL1_1, P2MSEL0_0, PH3_FN), + PINMUX_DATA(SCIF0_RTS_MARK, P2MSEL1_0, P2MSEL0_0, PH3_FN), + PINMUX_DATA(IRL7_MARK, P2MSEL1_0, P2MSEL0_1, PH3_FN), + PINMUX_DATA(SDIF0D2_MARK, P2MSEL1_1, P2MSEL0_0, PH2_FN), + PINMUX_DATA(SCIF0_SCK_MARK, P2MSEL1_0, P2MSEL0_0, PH2_FN), + PINMUX_DATA(IRL6_MARK, P2MSEL1_0, P2MSEL0_1, PH2_FN), + PINMUX_DATA(SDIF0D1_MARK, P2MSEL1_1, P2MSEL0_0, PH1_FN), + PINMUX_DATA(SCIF0_RXD_MARK, P2MSEL1_0, P2MSEL0_0, PH1_FN), + PINMUX_DATA(IRL5_MARK, P2MSEL1_0, P2MSEL0_1, PH1_FN), + PINMUX_DATA(SDIF0D0_MARK, P2MSEL1_1, P2MSEL0_0, PH0_FN), + PINMUX_DATA(SCIF0_TXD_MARK, P2MSEL1_0, P2MSEL0_0, PH0_FN), + PINMUX_DATA(IRL4_MARK, P2MSEL1_0, P2MSEL0_1, PH0_FN), + + /* PJ FN */ + PINMUX_DATA(SCIF5_SCK_MARK, P2MSEL11_1, PJ7_FN), + PINMUX_DATA(FRB_MARK, P2MSEL11_0, PJ7_FN), + PINMUX_DATA(SCIF5_RXD_MARK, P2MSEL10_0, PJ6_FN), + PINMUX_DATA(IOIS16_MARK, P2MSEL10_1, PJ6_FN), + PINMUX_DATA(SCIF5_TXD_MARK, P2MSEL10_0, PJ5_FN), + PINMUX_DATA(CE2B_MARK, P2MSEL10_1, PJ5_FN), + PINMUX_DATA(DRAK3_MARK, P2MSEL7_0, PJ4_FN), + PINMUX_DATA(CE2A_MARK, P2MSEL7_1, PJ4_FN), + PINMUX_DATA(SCIF4_SCK_MARK, P2MSEL9_0, P2MSEL8_0, PJ3_FN), + PINMUX_DATA(DRAK2_MARK, P2MSEL9_0, P2MSEL8_1, PJ3_FN), + PINMUX_DATA(SSI3_WS_MARK, P2MSEL9_1, P2MSEL8_0, PJ3_FN), + PINMUX_DATA(SCIF4_RXD_MARK, P2MSEL6_1, P2MSEL5_0, PJ2_FN), + PINMUX_DATA(DRAK1_MARK, P2MSEL6_0, P2MSEL5_1, PJ2_FN), + PINMUX_DATA(FSTATUS_MARK, P2MSEL6_0, P2MSEL5_0, PJ2_FN), + PINMUX_DATA(SSI3_SDATA_MARK, P2MSEL6_1, P2MSEL5_1, PJ2_FN), + PINMUX_DATA(SCIF4_TXD_MARK, P2MSEL6_1, P2MSEL5_0, PJ1_FN), + PINMUX_DATA(DRAK0_MARK, P2MSEL6_0, P2MSEL5_1, PJ1_FN), + PINMUX_DATA(FSE_MARK, P2MSEL6_0, P2MSEL5_0, PJ1_FN), + PINMUX_DATA(SSI3_SCK_MARK, P2MSEL6_1, P2MSEL5_1, PJ1_FN), +}; + +static struct pinmux_gpio pinmux_gpios[] = { + /* PA */ + PINMUX_GPIO(GPIO_PA7, PA7_DATA), + PINMUX_GPIO(GPIO_PA6, PA6_DATA), + PINMUX_GPIO(GPIO_PA5, PA5_DATA), + PINMUX_GPIO(GPIO_PA4, PA4_DATA), + PINMUX_GPIO(GPIO_PA3, PA3_DATA), + PINMUX_GPIO(GPIO_PA2, PA2_DATA), + PINMUX_GPIO(GPIO_PA1, PA1_DATA), + PINMUX_GPIO(GPIO_PA0, PA0_DATA), + + /* PB */ + PINMUX_GPIO(GPIO_PB7, PB7_DATA), + PINMUX_GPIO(GPIO_PB6, PB6_DATA), + PINMUX_GPIO(GPIO_PB5, PB5_DATA), + PINMUX_GPIO(GPIO_PB4, PB4_DATA), + PINMUX_GPIO(GPIO_PB3, PB3_DATA), + PINMUX_GPIO(GPIO_PB2, PB2_DATA), + PINMUX_GPIO(GPIO_PB1, PB1_DATA), + PINMUX_GPIO(GPIO_PB0, PB0_DATA), + + /* PC */ + PINMUX_GPIO(GPIO_PC7, PC7_DATA), + PINMUX_GPIO(GPIO_PC6, PC6_DATA), + PINMUX_GPIO(GPIO_PC5, PC5_DATA), + PINMUX_GPIO(GPIO_PC4, PC4_DATA), + PINMUX_GPIO(GPIO_PC3, PC3_DATA), + PINMUX_GPIO(GPIO_PC2, PC2_DATA), + PINMUX_GPIO(GPIO_PC1, PC1_DATA), + PINMUX_GPIO(GPIO_PC0, PC0_DATA), + + /* PD */ + PINMUX_GPIO(GPIO_PD7, PD7_DATA), + PINMUX_GPIO(GPIO_PD6, PD6_DATA), + PINMUX_GPIO(GPIO_PD5, PD5_DATA), + PINMUX_GPIO(GPIO_PD4, PD4_DATA), + PINMUX_GPIO(GPIO_PD3, PD3_DATA), + PINMUX_GPIO(GPIO_PD2, PD2_DATA), + PINMUX_GPIO(GPIO_PD1, PD1_DATA), + PINMUX_GPIO(GPIO_PD0, PD0_DATA), + + /* PE */ + PINMUX_GPIO(GPIO_PE5, PE7_DATA), + PINMUX_GPIO(GPIO_PE4, PE6_DATA), + + /* PF */ + PINMUX_GPIO(GPIO_PF7, PF7_DATA), + PINMUX_GPIO(GPIO_PF6, PF6_DATA), + PINMUX_GPIO(GPIO_PF5, PF5_DATA), + PINMUX_GPIO(GPIO_PF4, PF4_DATA), + PINMUX_GPIO(GPIO_PF3, PF3_DATA), + PINMUX_GPIO(GPIO_PF2, PF2_DATA), + PINMUX_GPIO(GPIO_PF1, PF1_DATA), + PINMUX_GPIO(GPIO_PF0, PF0_DATA), + + /* PG */ + PINMUX_GPIO(GPIO_PG7, PG7_DATA), + PINMUX_GPIO(GPIO_PG6, PG6_DATA), + PINMUX_GPIO(GPIO_PG5, PG5_DATA), + + /* PH */ + PINMUX_GPIO(GPIO_PH7, PH7_DATA), + PINMUX_GPIO(GPIO_PH6, PH6_DATA), + PINMUX_GPIO(GPIO_PH5, PH5_DATA), + PINMUX_GPIO(GPIO_PH4, PH4_DATA), + PINMUX_GPIO(GPIO_PH3, PH3_DATA), + PINMUX_GPIO(GPIO_PH2, PH2_DATA), + PINMUX_GPIO(GPIO_PH1, PH1_DATA), + PINMUX_GPIO(GPIO_PH0, PH0_DATA), + + /* PJ */ + PINMUX_GPIO(GPIO_PJ7, PJ7_DATA), + PINMUX_GPIO(GPIO_PJ6, PJ6_DATA), + PINMUX_GPIO(GPIO_PJ5, PJ5_DATA), + PINMUX_GPIO(GPIO_PJ4, PJ4_DATA), + PINMUX_GPIO(GPIO_PJ3, PJ3_DATA), + PINMUX_GPIO(GPIO_PJ2, PJ2_DATA), + PINMUX_GPIO(GPIO_PJ1, PJ1_DATA), + + /* FN */ + PINMUX_GPIO(GPIO_FN_CDE, CDE_MARK), + PINMUX_GPIO(GPIO_FN_ETH_MAGIC, ETH_MAGIC_MARK), + PINMUX_GPIO(GPIO_FN_DISP, DISP_MARK), + PINMUX_GPIO(GPIO_FN_ETH_LINK, ETH_LINK_MARK), + PINMUX_GPIO(GPIO_FN_DR5, DR5_MARK), + PINMUX_GPIO(GPIO_FN_ETH_TX_ER, ETH_TX_ER_MARK), + PINMUX_GPIO(GPIO_FN_DR4, DR4_MARK), + PINMUX_GPIO(GPIO_FN_ETH_TX_EN, ETH_TX_EN_MARK), + PINMUX_GPIO(GPIO_FN_DR3, DR3_MARK), + PINMUX_GPIO(GPIO_FN_ETH_TXD3, ETH_TXD3_MARK), + PINMUX_GPIO(GPIO_FN_DR2, DR2_MARK), + PINMUX_GPIO(GPIO_FN_ETH_TXD2, ETH_TXD2_MARK), + PINMUX_GPIO(GPIO_FN_DR1, DR1_MARK), + PINMUX_GPIO(GPIO_FN_ETH_TXD1, ETH_TXD1_MARK), + PINMUX_GPIO(GPIO_FN_DR0, DR0_MARK), + PINMUX_GPIO(GPIO_FN_ETH_TXD0, ETH_TXD0_MARK), + PINMUX_GPIO(GPIO_FN_VSYNC, VSYNC_MARK), + PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK), + PINMUX_GPIO(GPIO_FN_ODDF, ODDF_MARK), + PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK), + PINMUX_GPIO(GPIO_FN_DG5, DG5_MARK), + PINMUX_GPIO(GPIO_FN_ETH_MDIO, ETH_MDIO_MARK), + PINMUX_GPIO(GPIO_FN_DG4, DG4_MARK), + PINMUX_GPIO(GPIO_FN_ETH_RX_CLK, ETH_RX_CLK_MARK), + PINMUX_GPIO(GPIO_FN_DG3, DG3_MARK), + PINMUX_GPIO(GPIO_FN_ETH_MDC, ETH_MDC_MARK), + PINMUX_GPIO(GPIO_FN_DG2, DG2_MARK), + PINMUX_GPIO(GPIO_FN_ETH_COL, ETH_COL_MARK), + PINMUX_GPIO(GPIO_FN_DG1, DG1_MARK), + PINMUX_GPIO(GPIO_FN_ETH_TX_CLK, ETH_TX_CLK_MARK), + PINMUX_GPIO(GPIO_FN_DG0, DG0_MARK), + PINMUX_GPIO(GPIO_FN_ETH_CRS, ETH_CRS_MARK), + PINMUX_GPIO(GPIO_FN_DCLKIN, DCLKIN_MARK), + PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK), + PINMUX_GPIO(GPIO_FN_HSYNC, HSYNC_MARK), + PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK), + PINMUX_GPIO(GPIO_FN_DB5, DB5_MARK), + PINMUX_GPIO(GPIO_FN_ETH_RXD3, ETH_RXD3_MARK), + PINMUX_GPIO(GPIO_FN_DB4, DB4_MARK), + PINMUX_GPIO(GPIO_FN_ETH_RXD2, ETH_RXD2_MARK), + PINMUX_GPIO(GPIO_FN_DB3, DB3_MARK), + PINMUX_GPIO(GPIO_FN_ETH_RXD1, ETH_RXD1_MARK), + PINMUX_GPIO(GPIO_FN_DB2, DB2_MARK), + PINMUX_GPIO(GPIO_FN_ETH_RXD0, ETH_RXD0_MARK), + PINMUX_GPIO(GPIO_FN_DB1, DB1_MARK), + PINMUX_GPIO(GPIO_FN_ETH_RX_DV, ETH_RX_DV_MARK), + PINMUX_GPIO(GPIO_FN_DB0, DB0_MARK), + PINMUX_GPIO(GPIO_FN_ETH_RX_ER, ETH_RX_ER_MARK), + PINMUX_GPIO(GPIO_FN_DCLKOUT, DCLKOUT_MARK), + PINMUX_GPIO(GPIO_FN_SCIF1_SLK, SCIF1_SLK_MARK), + PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK), + PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK), + PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK), + PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK), + PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK), + PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK), + PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK), + PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK), + PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK), + PINMUX_GPIO(GPIO_FN_USB_OVC1, USB_OVC1_MARK), + PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), + PINMUX_GPIO(GPIO_FN_USB_OVC0, USB_OVC0_MARK), + PINMUX_GPIO(GPIO_FN_USB_PENC1, USB_PENC1_MARK), + PINMUX_GPIO(GPIO_FN_USB_PENC0, USB_PENC0_MARK), + PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK), + PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK), + PINMUX_GPIO(GPIO_FN_SDIF1CMD, SDIF1CMD_MARK), + PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK), + PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK), + PINMUX_GPIO(GPIO_FN_SDIF1CD, SDIF1CD_MARK), + PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK), + PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK), + PINMUX_GPIO(GPIO_FN_SDIF1WP, SDIF1WP_MARK), + PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK), + PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK), + PINMUX_GPIO(GPIO_FN_SDIF1CLK, SDIF1CLK_MARK), + PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK), + PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK), + PINMUX_GPIO(GPIO_FN_SDIF1D3, SDIF1D3_MARK), + PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK), + PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK), + PINMUX_GPIO(GPIO_FN_SDIF1D2, SDIF1D2_MARK), + PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK), + PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK), + PINMUX_GPIO(GPIO_FN_SDIF1D1, SDIF1D1_MARK), + PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK), + PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK), + PINMUX_GPIO(GPIO_FN_SDIF1D0, SDIF1D0_MARK), + PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK), + PINMUX_GPIO(GPIO_FN_SSI2_SDATA, SSI2_SDATA_MARK), + PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK), + PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK), + PINMUX_GPIO(GPIO_FN_SSI2_SCK, SSI2_SCK_MARK), + PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK), + PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK), + PINMUX_GPIO(GPIO_FN_SSI2_WS, SSI2_WS_MARK), + PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK), + PINMUX_GPIO(GPIO_FN_SDIF0CMD, SDIF0CMD_MARK), + PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK), + PINMUX_GPIO(GPIO_FN_SDIF0CD, SDIF0CD_MARK), + PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK), + PINMUX_GPIO(GPIO_FN_SDIF0WP, SDIF0WP_MARK), + PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK), + PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK), + PINMUX_GPIO(GPIO_FN_SDIF0CLK, SDIF0CLK_MARK), + PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK), + PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK), + PINMUX_GPIO(GPIO_FN_SDIF0D3, SDIF0D3_MARK), + PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK), + PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK), + PINMUX_GPIO(GPIO_FN_SDIF0D2, SDIF0D2_MARK), + PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK), + PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK), + PINMUX_GPIO(GPIO_FN_SDIF0D1, SDIF0D1_MARK), + PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK), + PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK), + PINMUX_GPIO(GPIO_FN_SDIF0D0, SDIF0D0_MARK), + PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK), + PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK), + PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK), + PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), + PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK), + PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK), + PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK), + PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK), + PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK), + PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK), + PINMUX_GPIO(GPIO_FN_SSI3_WS, SSI3_WS_MARK), + PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK), + PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK), + PINMUX_GPIO(GPIO_FN_SSI3_SDATA, SSI3_SDATA_MARK), + PINMUX_GPIO(GPIO_FN_FSTATUS, FSTATUS_MARK), + PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK), + PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK), + PINMUX_GPIO(GPIO_FN_SSI3_SCK, SSI3_SCK_MARK), + PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK), +}; + +static struct pinmux_cfg_reg pinmux_config_regs[] = { + { PINMUX_CFG_REG("PACR", 0xffcc0000, 16, 2) { + PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU, + PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU, + PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU, + PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU, + PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU, + PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU, + PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU, + PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU } + }, + { PINMUX_CFG_REG("PBCR", 0xffcc0002, 16, 2) { + PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU, + PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU, + PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU, + PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU, + PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU, + PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU, + PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU, + PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU } + }, + { PINMUX_CFG_REG("PCCR", 0xffcc0004, 16, 2) { + PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU, + PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU, + PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU, + PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU, + PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU, + PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU, + PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU, + PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU } + }, + { PINMUX_CFG_REG("PDCR", 0xffcc0006, 16, 2) { + PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU, + PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU, + PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU, + PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU, + PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU, + PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU, + PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU, + PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU } + }, + { PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2) { + PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU, + PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, } + }, + { PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2) { + PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU, + PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU, + PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU, + PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU, + PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU, + PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU, + PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU, + PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU } + }, + { PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2) { + PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU, + PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU, + PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, } + }, + { PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2) { + PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU, + PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU, + PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU, + PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU, + PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU, + PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU, + PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU, + PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU } + }, + { PINMUX_CFG_REG("PJCR", 0xffcc0010, 16, 2) { + PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU, + PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU, + PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU, + PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU, + PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU, + PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU, + PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU, + 0, 0, 0, 0, } + }, + { PINMUX_CFG_REG("P1MSELR", 0xffcc0080, 16, 1) { + 0, 0, + P1MSEL14_0, P1MSEL14_1, + P1MSEL13_0, P1MSEL13_1, + P1MSEL12_0, P1MSEL12_1, + P1MSEL11_0, P1MSEL11_1, + P1MSEL10_0, P1MSEL10_1, + P1MSEL9_0, P1MSEL9_1, + P1MSEL8_0, P1MSEL8_1, + P1MSEL7_0, P1MSEL7_1, + P1MSEL6_0, P1MSEL6_1, + P1MSEL5_0, P1MSEL5_1, + P1MSEL4_0, P1MSEL4_1, + P1MSEL3_0, P1MSEL3_1, + P1MSEL2_0, P1MSEL2_1, + P1MSEL1_0, P1MSEL1_1, + P1MSEL0_0, P1MSEL0_1 } + }, + { PINMUX_CFG_REG("P2MSELR", 0xffcc0082, 16, 1) { + P2MSEL15_0, P2MSEL15_1, + P2MSEL14_0, P2MSEL14_1, + P2MSEL13_0, P2MSEL13_1, + P2MSEL12_0, P2MSEL12_1, + P2MSEL11_0, P2MSEL11_1, + P2MSEL10_0, P2MSEL10_1, + P2MSEL9_0, P2MSEL9_1, + P2MSEL8_0, P2MSEL8_1, + P2MSEL7_0, P2MSEL7_1, + P2MSEL6_0, P2MSEL6_1, + P2MSEL5_0, P2MSEL5_1, + P2MSEL4_0, P2MSEL4_1, + P2MSEL3_0, P2MSEL3_1, + P2MSEL2_0, P2MSEL2_1, + P2MSEL1_0, P2MSEL1_1, + P2MSEL0_0, P2MSEL0_1 } + }, + {} +}; + +static struct pinmux_data_reg pinmux_data_regs[] = { + { PINMUX_DATA_REG("PADR", 0xffcc0020, 8) { + PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, + PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA } + }, + { PINMUX_DATA_REG("PBDR", 0xffcc0022, 8) { + PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, + PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA } + }, + { PINMUX_DATA_REG("PCDR", 0xffcc0024, 8) { + PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, + PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA } + }, + { PINMUX_DATA_REG("PDDR", 0xffcc0026, 8) { + PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, + PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA } + }, + { PINMUX_DATA_REG("PEDR", 0xffcc0028, 8) { + PE7_DATA, PE6_DATA, + 0, 0, 0, 0, 0, 0 } + }, + { PINMUX_DATA_REG("PFDR", 0xffcc002a, 8) { + PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, + PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA } + }, + { PINMUX_DATA_REG("PGDR", 0xffcc002c, 8) { + PG7_DATA, PG6_DATA, PG5_DATA, 0, + 0, 0, 0, 0 } + }, + { PINMUX_DATA_REG("PHDR", 0xffcc002e, 8) { + PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA, + PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA } + }, + { PINMUX_DATA_REG("PJDR", 0xffcc0030, 8) { + PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA, + PJ3_DATA, PJ2_DATA, PJ1_DATA, 0 } + }, + { }, +}; + +static struct pinmux_info sh7786_pinmux_info = { + .name = "sh7786_pfc", + .reserved_id = PINMUX_RESERVED, + .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, + .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, + .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, + .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, + .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, + .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, + + .first_gpio = GPIO_PA7, + .last_gpio = GPIO_FN_FSE, + + .gpios = pinmux_gpios, + .cfg_regs = pinmux_config_regs, + .data_regs = pinmux_data_regs, + + .gpio_data = pinmux_data, + .gpio_data_size = ARRAY_SIZE(pinmux_data), +}; + +static int __init plat_pinmux_setup(void) +{ + return register_pinmux(&sh7786_pinmux_info); +} + +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c new file mode 100644 index 00000000000..249b99e1adb --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -0,0 +1,407 @@ +/* + * SH7786 Setup + * + * Copyright (C) 2009 Renesas Solutions Corp. + * Kuninori Morimoto + * + * Based on SH7785 Setup + * + * Copyright (C) 2007 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffea0000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 40, 41, 43, 42 }, + }, + /* + * The rest of these all have multiplexed IRQs + */ + { + .mapbase = 0xffeb0000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 44, 44, 44, 44 }, + }, { + .mapbase = 0xffec0000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 50, 50, 50, 50 }, + }, { + .mapbase = 0xffed0000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 51, 51, 51, 51 }, + }, { + .mapbase = 0xffee0000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 52, 52, 52, 52 }, + }, { + .mapbase = 0xffef0000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 53, 53, 53, 53 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7786_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7786_devices_setup(void) +{ + return platform_add_devices(sh7786_devices, + ARRAY_SIZE(sh7786_devices)); +} +device_initcall(sh7786_devices_setup); + +enum { + UNUSED = 0, + + /* interrupt sources */ + + IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, + + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, + + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + WDT, + TMU0_0, TMU0_1, TMU0_2, TMU0_3, + TMU1_0, TMU1_1, TMU1_2, + DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6, + HUDI1, HUDI0, + DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3, + HPB_0, HPB_1, HPB_2, + SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3, + SCIF1, + TMU2, TMU3, + SCIF2, SCIF3, SCIF4, SCIF5, + Eth_0, Eth_1, + PCIeC0_0, PCIeC0_1, PCIeC0_2, + PCIeC1_0, PCIeC1_1, PCIeC1_2, + USB, + I2C0, I2C1, + DU, + SSI0, SSI1, SSI2, SSI3, + PCIeC2_0, PCIeC2_1, PCIeC2_2, + HAC0, HAC1, + FLCTL, + HSPI, + GPIO0, GPIO1, + Thermal, + INTC0, INTC1, INTC2, INTC3, INTC4, INTC5, INTC6, INTC7, + + /* interrupt groups */ +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(WDT, 0x3e0), + INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420), + INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460), + INTC_VECT(TMU1_0, 0x480), INTC_VECT(TMU1_1, 0x4a0), + INTC_VECT(TMU1_2, 0x4c0), + INTC_VECT(DMAC0_0, 0x500), INTC_VECT(DMAC0_1, 0x520), + INTC_VECT(DMAC0_2, 0x540), INTC_VECT(DMAC0_3, 0x560), + INTC_VECT(DMAC0_4, 0x580), INTC_VECT(DMAC0_5, 0x5a0), + INTC_VECT(DMAC0_6, 0x5c0), + INTC_VECT(HUDI1, 0x5e0), INTC_VECT(HUDI0, 0x600), + INTC_VECT(DMAC1_0, 0x620), INTC_VECT(DMAC1_1, 0x640), + INTC_VECT(DMAC1_2, 0x660), INTC_VECT(DMAC1_3, 0x680), + INTC_VECT(HPB_0, 0x6a0), INTC_VECT(HPB_1, 0x6c0), + INTC_VECT(HPB_2, 0x6e0), + INTC_VECT(SCIF0_0, 0x700), INTC_VECT(SCIF0_1, 0x720), + INTC_VECT(SCIF0_2, 0x740), INTC_VECT(SCIF0_3, 0x760), + INTC_VECT(SCIF1, 0x780), + INTC_VECT(TMU2, 0x7a0), INTC_VECT(TMU3, 0x7c0), + INTC_VECT(SCIF2, 0x840), INTC_VECT(SCIF3, 0x860), + INTC_VECT(SCIF4, 0x880), INTC_VECT(SCIF5, 0x8a0), + INTC_VECT(Eth_0, 0x8c0), INTC_VECT(Eth_1, 0x8e0), + INTC_VECT(PCIeC0_0, 0xae0), INTC_VECT(PCIeC0_1, 0xb00), + INTC_VECT(PCIeC0_2, 0xb20), + INTC_VECT(PCIeC1_0, 0xb40), INTC_VECT(PCIeC1_1, 0xb60), + INTC_VECT(PCIeC1_2, 0xb80), + INTC_VECT(USB, 0xba0), + INTC_VECT(I2C0, 0xcc0), INTC_VECT(I2C1, 0xce0), + INTC_VECT(DU, 0xd00), + INTC_VECT(SSI0, 0xd20), INTC_VECT(SSI1, 0xd40), + INTC_VECT(SSI2, 0xd60), INTC_VECT(SSI3, 0xd80), + INTC_VECT(PCIeC2_0, 0xda0), INTC_VECT(PCIeC2_1, 0xdc0), + INTC_VECT(PCIeC2_2, 0xde0), + INTC_VECT(HAC0, 0xe00), INTC_VECT(HAC1, 0xe20), + INTC_VECT(FLCTL, 0xe40), + INTC_VECT(HSPI, 0xe80), + INTC_VECT(GPIO0, 0xea0), INTC_VECT(GPIO1, 0xec0), + INTC_VECT(Thermal, 0xee0), +}; + +/* FIXME: Main CPU support only now */ +#if 1 /* Main CPU */ +#define CnINTMSK0 0xfe410030 +#define CnINTMSK1 0xfe410040 +#define CnINTMSKCLR0 0xfe410050 +#define CnINTMSKCLR1 0xfe410060 +#define CnINT2MSKR0 0xfe410a20 +#define CnINT2MSKR1 0xfe410a24 +#define CnINT2MSKR2 0xfe410a28 +#define CnINT2MSKR3 0xfe410a2c +#define CnINT2MSKCR0 0xfe410a30 +#define CnINT2MSKCR1 0xfe410a34 +#define CnINT2MSKCR2 0xfe410a38 +#define CnINT2MSKCR3 0xfe410a3c +#else /* Sub CPU */ +#define CnINTMSK0 0xfe410034 +#define CnINTMSK1 0xfe410044 +#define CnINTMSKCLR0 0xfe410054 +#define CnINTMSKCLR1 0xfe410064 +#define CnINT2MSKR0 0xfe410b20 +#define CnINT2MSKR1 0xfe410b24 +#define CnINT2MSKR2 0xfe410b28 +#define CnINT2MSKR3 0xfe410b2c +#define CnINT2MSKCR0 0xfe410b30 +#define CnINT2MSKCR1 0xfe410b34 +#define CnINT2MSKCR2 0xfe410b38 +#define CnINT2MSKCR3 0xfe410b3c +#endif + +#define INTMSK2 0xfe410068 +#define INTMSKCLR2 0xfe41006c + +static struct intc_mask_reg mask_registers[] __initdata = { + { CnINTMSK0, CnINTMSKCLR0, 32, + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, + { INTMSK2, INTMSKCLR2, 32, + { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0, + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } }, + { CnINT2MSKR0, CnINT2MSKCR0 , 32, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT } }, + { CnINT2MSKR1, CnINT2MSKCR1, 32, + { TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0, + DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6, + HUDI1, HUDI0, + DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3, + HPB_0, HPB_1, HPB_2, + SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3, + SCIF1, + TMU2, TMU3, 0, } }, + { CnINT2MSKR2, CnINT2MSKCR2, 32, + { 0, 0, SCIF2, SCIF3, SCIF4, SCIF5, + Eth_0, Eth_1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + PCIeC0_0, PCIeC0_1, PCIeC0_2, + PCIeC1_0, PCIeC1_1, PCIeC1_2, + USB, 0, 0 } }, + { CnINT2MSKR3, CnINT2MSKCR3, 32, + { 0, 0, 0, 0, 0, 0, + I2C0, I2C1, + DU, SSI0, SSI1, SSI2, SSI3, + PCIeC2_0, PCIeC2_1, PCIeC2_2, + HAC0, HAC1, + FLCTL, 0, + HSPI, GPIO0, GPIO1, Thermal, + 0, 0, 0, 0, 0, 0, 0, 0 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } }, + { 0xfe410804, 0, 32, 8, /* INT2PRI1 */ { TMU0_0, TMU0_1, + TMU0_2, TMU0_3 } }, + { 0xfe410808, 0, 32, 8, /* INT2PRI2 */ { TMU1_0, TMU1_1, + TMU1_2, 0 } }, + { 0xfe41080c, 0, 32, 8, /* INT2PRI3 */ { DMAC0_0, DMAC0_1, + DMAC0_2, DMAC0_3 } }, + { 0xfe410810, 0, 32, 8, /* INT2PRI4 */ { DMAC0_4, DMAC0_5, + DMAC0_6, HUDI1 } }, + { 0xfe410814, 0, 32, 8, /* INT2PRI5 */ { HUDI0, DMAC1_0, + DMAC1_1, DMAC1_2 } }, + { 0xfe410818, 0, 32, 8, /* INT2PRI6 */ { DMAC1_3, HPB_0, + HPB_1, HPB_2 } }, + { 0xfe41081c, 0, 32, 8, /* INT2PRI7 */ { SCIF0_0, SCIF0_1, + SCIF0_2, SCIF0_3 } }, + { 0xfe410820, 0, 32, 8, /* INT2PRI8 */ { SCIF1, TMU2, TMU3, 0 } }, + { 0xfe410824, 0, 32, 8, /* INT2PRI9 */ { 0, 0, SCIF2, SCIF3 } }, + { 0xfe410828, 0, 32, 8, /* INT2PRI10 */ { SCIF4, SCIF5, + Eth_0, Eth_1 } }, + { 0xfe41082c, 0, 32, 8, /* INT2PRI11 */ { 0, 0, 0, 0 } }, + { 0xfe410830, 0, 32, 8, /* INT2PRI12 */ { 0, 0, 0, 0 } }, + { 0xfe410834, 0, 32, 8, /* INT2PRI13 */ { 0, 0, 0, 0 } }, + { 0xfe410838, 0, 32, 8, /* INT2PRI14 */ { 0, 0, 0, PCIeC0_0 } }, + { 0xfe41083c, 0, 32, 8, /* INT2PRI15 */ { PCIeC0_1, PCIeC0_2, + PCIeC1_0, PCIeC1_1 } }, + { 0xfe410840, 0, 32, 8, /* INT2PRI16 */ { PCIeC1_2, USB, 0, 0 } }, + { 0xfe410844, 0, 32, 8, /* INT2PRI17 */ { 0, 0, 0, 0 } }, + { 0xfe410848, 0, 32, 8, /* INT2PRI18 */ { 0, 0, I2C0, I2C1 } }, + { 0xfe41084c, 0, 32, 8, /* INT2PRI19 */ { DU, SSI0, SSI1, SSI2 } }, + { 0xfe410850, 0, 32, 8, /* INT2PRI20 */ { SSI3, PCIeC2_0, + PCIeC2_1, PCIeC2_2 } }, + { 0xfe410854, 0, 32, 8, /* INT2PRI21 */ { HAC0, HAC1, FLCTL, 0 } }, + { 0xfe410858, 0, 32, 8, /* INT2PRI22 */ { HSPI, GPIO0, + GPIO1, Thermal } }, + { 0xfe41085c, 0, 32, 8, /* INT2PRI23 */ { 0, 0, 0, 0 } }, + { 0xfe410860, 0, 32, 8, /* INT2PRI24 */ { 0, 0, 0, 0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7786", vectors, NULL, + mask_registers, prio_registers, NULL); + +/* Support for external interrupt pins in IRQ mode */ + +static struct intc_vect vectors_irq0123[] __initdata = { + INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240), + INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0), +}; + +static struct intc_vect vectors_irq4567[] __initdata = { + INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340), + INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0), +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xfe410024, 0, 32, /* INTREQ */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123", + vectors_irq0123, NULL, mask_registers, + prio_registers, sense_registers, ack_registers); + +static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567", + vectors_irq4567, NULL, mask_registers, + prio_registers, sense_registers, ack_registers); + +/* External interrupt pins in IRL mode */ + +static struct intc_vect vectors_irl0123[] __initdata = { + INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220), + INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260), + INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0), + INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0), + INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320), + INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360), + INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0), + INTC_VECT(IRL0_HHHL, 0x3c0), +}; + +static struct intc_vect vectors_irl4567[] __initdata = { + INTC_VECT(IRL4_LLLL, 0x900), INTC_VECT(IRL4_LLLH, 0x920), + INTC_VECT(IRL4_LLHL, 0x940), INTC_VECT(IRL4_LLHH, 0x960), + INTC_VECT(IRL4_LHLL, 0x980), INTC_VECT(IRL4_LHLH, 0x9a0), + INTC_VECT(IRL4_LHHL, 0x9c0), INTC_VECT(IRL4_LHHH, 0x9e0), + INTC_VECT(IRL4_HLLL, 0xa00), INTC_VECT(IRL4_HLLH, 0xa20), + INTC_VECT(IRL4_HLHL, 0xa40), INTC_VECT(IRL4_HLHH, 0xa60), + INTC_VECT(IRL4_HHLL, 0xa80), INTC_VECT(IRL4_HHLH, 0xaa0), + INTC_VECT(IRL4_HHHL, 0xac0), +}; + +static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123, + NULL, mask_registers, NULL, NULL); + +static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567, + NULL, mask_registers, NULL, NULL); + +#define INTC_ICR0 0xfe410000 +#define INTC_INTMSK0 CnINTMSK0 +#define INTC_INTMSK1 CnINTMSK1 +#define INTC_INTMSK2 INTMSK2 +#define INTC_INTMSKCLR1 CnINTMSKCLR1 +#define INTC_INTMSKCLR2 INTMSKCLR2 + +void __init plat_irq_setup(void) +{ + /* disable IRQ3-0 + IRQ7-4 */ + ctrl_outl(0xff000000, INTC_INTMSK0); + + /* disable IRL3-0 + IRL7-4 */ + ctrl_outl(0xc0000000, INTC_INTMSK1); + ctrl_outl(0xfffefffe, INTC_INTMSK2); + + /* select IRL mode for IRL3-0 + IRL7-4 */ + ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); + + register_intc_controller(&intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ7654: + /* select IRQ mode for IRL7-4 */ + ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0); + register_intc_controller(&intc_desc_irq4567); + break; + case IRQ_MODE_IRQ3210: + /* select IRQ mode for IRL3-0 */ + ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0); + register_intc_controller(&intc_desc_irq0123); + break; + case IRQ_MODE_IRL7654: + /* enable IRL7-4 but don't provide any masking */ + ctrl_outl(0x40000000, INTC_INTMSKCLR1); + ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + ctrl_outl(0x80000000, INTC_INTMSKCLR1); + ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL7654_MASK: + /* enable IRL7-4 and mask using cpu intc controller */ + ctrl_outl(0x40000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl4567); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + ctrl_outl(0x80000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl0123); + break; + default: + BUG(); + } +} + +void __init plat_mem_setup(void) +{ +} diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 370d2cfa34e..61ab2a7f864 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -432,6 +432,7 @@ static const char *cpu_name[] = { [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785", + [CPU_SH7786] = "SH7786", [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3", [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103", [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723", diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 2b62f9cff22..10b5a6f17cc 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -244,6 +244,7 @@ static int tmu_timer_init(void) !defined(CONFIG_CPU_SUBTYPE_SH7721) && \ !defined(CONFIG_CPU_SUBTYPE_SH7760) && \ !defined(CONFIG_CPU_SUBTYPE_SH7785) && \ + !defined(CONFIG_CPU_SUBTYPE_SH7786) && \ !defined(CONFIG_CPU_SUBTYPE_SHX3) ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); #endif diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c index 1d97d64cb95..1b9d4304b3b 100644 --- a/arch/sh/oprofile/common.c +++ b/arch/sh/oprofile/common.c @@ -107,6 +107,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) case CPU_SH7780: case CPU_SH7781: case CPU_SH7785: + case CPU_SH7786: case CPU_SH7723: case CPU_SHX3: lmodel = &op_model_sh4a_ops; -- cgit v1.2.3 From 37042fbd8b3256d2a44b17646fa9de8777d5a34a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 3 Mar 2009 15:57:02 +0900 Subject: sh: SH7786 is an SH-X3 core, select CPU_SHX3. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 0ae09683fb2..83aee3db54b 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -359,7 +359,7 @@ config CPU_SUBTYPE_SH7785 config CPU_SUBTYPE_SH7786 bool "Support SH7786 processor" select CPU_SH4A - select CPU_SHX2 + select CPU_SHX3 select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA -- cgit v1.2.3 From 5ac072e110ff358a9ebc318a1b54f0182b799f72 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 3 Mar 2009 16:22:00 +0900 Subject: sh: Urquell board support. This adds preliminary support for the SH7786-based Urquell board. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/boards/Kconfig | 5 + arch/sh/boards/Makefile | 1 + arch/sh/boards/board-urquell.c | 128 +++++++ arch/sh/configs/urquell_defconfig | 534 +++++++++++++++++++++++++++++ arch/sh/include/mach-common/mach/urquell.h | 68 ++++ arch/sh/tools/mach-types | 1 + 6 files changed, 737 insertions(+) create mode 100644 arch/sh/boards/board-urquell.c create mode 100644 arch/sh/configs/urquell_defconfig create mode 100644 arch/sh/include/mach-common/mach/urquell.h (limited to 'arch') diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index c9da37088d2..694abecf602 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -162,6 +162,11 @@ config SH_SH7785LCR_29BIT_PHYSMAPS DIP switch(S2-5). If you set the DIP switch for S2-5 = ON, you can access all on-board device in 29bit address mode. +config SH_URQUELL + bool "Urquell" + depends on CPU_SUBTYPE_SH7786 + select ARCH_REQUIRE_GPIOLIB + config SH_MIGOR bool "Migo-R" depends on CPU_SUBTYPE_SH7722 diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile index 269ae2be49e..6f101a8161f 100644 --- a/arch/sh/boards/Makefile +++ b/arch/sh/boards/Makefile @@ -4,5 +4,6 @@ obj-$(CONFIG_SH_AP325RXA) += board-ap325rxa.o obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o +obj-$(CONFIG_SH_URQUELL) += board-urquell.o obj-$(CONFIG_SH_SHMIN) += board-shmin.o obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c new file mode 100644 index 00000000000..d5caeabf46e --- /dev/null +++ b/arch/sh/boards/board-urquell.c @@ -0,0 +1,128 @@ +/* + * Renesas Technology Corp. SH7786 Urquell Support. + * + * Copyright (C) 2008 Kuninori Morimoto + * Copyright (C) 2008 Yoshihiro Shimoda + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct resource heartbeat_resources[] = { + [0] = { + .start = BOARDREG(SLEDR), + .end = BOARDREG(SLEDR), + .flags = IORESOURCE_MEM, + }, +}; + +static struct heartbeat_data heartbeat_data = { + .regsize = 16, +}; + +static struct platform_device heartbeat_device = { + .name = "heartbeat", + .id = -1, + .dev = { + .platform_data = &heartbeat_data, + }, + .num_resources = ARRAY_SIZE(heartbeat_resources), + .resource = heartbeat_resources, +}; + +static struct mtd_partition nor_flash_partitions[] = { + { + .name = "loader", + .offset = 0x00000000, + .size = SZ_512K, + .mask_flags = MTD_WRITEABLE, /* Read-only */ + }, + { + .name = "bootenv", + .offset = MTDPART_OFS_APPEND, + .size = SZ_512K, + .mask_flags = MTD_WRITEABLE, /* Read-only */ + }, + { + .name = "kernel", + .offset = MTDPART_OFS_APPEND, + .size = SZ_4M, + }, + { + .name = "data", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, + }, +}; + +static struct physmap_flash_data nor_flash_data = { + .width = 2, + .parts = nor_flash_partitions, + .nr_parts = ARRAY_SIZE(nor_flash_partitions), +}; + +static struct resource nor_flash_resources[] = { + [0] = { + .start = NOR_FLASH_ADDR, + .end = NOR_FLASH_ADDR + NOR_FLASH_SIZE - 1, + .flags = IORESOURCE_MEM, + } +}; + +static struct platform_device nor_flash_device = { + .name = "physmap-flash", + .dev = { + .platform_data = &nor_flash_data, + }, + .num_resources = ARRAY_SIZE(nor_flash_resources), + .resource = nor_flash_resources, +}; + +static struct platform_device *urquell_devices[] __initdata = { + &heartbeat_device, + &nor_flash_device, +}; + +static int __init urquell_devices_setup(void) +{ + /* USB */ + gpio_request(GPIO_FN_USB_OVC0, NULL); + gpio_request(GPIO_FN_USB_PENC0, NULL); + + return platform_add_devices(urquell_devices, + ARRAY_SIZE(urquell_devices)); +} +device_initcall(urquell_devices_setup); + +static void urquell_power_off(void) +{ + __raw_writew(0xa5a5, UBOARDREG(SRSTR)); +} + +/* Initialize the board */ +static void __init urquell_setup(char **cmdline_p) +{ + printk(KERN_INFO "Renesas Technology Corp. Urquell support.\n"); + + pm_power_off = urquell_power_off; +} + +/* + * The Machine Vector + */ +static struct sh_machine_vector mv_urquell __initmv = { + .mv_name = "Urquell", + .mv_setup = urquell_setup, +}; diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig new file mode 100644 index 00000000000..94a4bdbd1bc --- /dev/null +++ b/arch/sh/configs/urquell_defconfig @@ -0,0 +1,534 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.29-rc4 +# Tue Mar 3 16:20:09 2009 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_ARCH_SUSPEND_POSSIBLE is not set +# CONFIG_ARCH_HIBERNATION_POSSIBLE is not set +CONFIG_SYS_SUPPORTS_NUMA=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +# CONFIG_EXPERIMENTAL is not set +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SYSVIPC is not set +# CONFIG_BSD_PROCESS_ACCT is not set + +# +# RCU Subsystem +# +CONFIG_CLASSIC_RCU=y +# CONFIG_TREE_RCU is not set +# CONFIG_PREEMPT_RCU is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=17 +# CONFIG_CGROUPS is not set +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_EMBEDDED=y +# CONFIG_UID16 is not set +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +# CONFIG_HOTPLUG is not set +# CONFIG_PRINTK is not set +# CONFIG_BUG is not set +# CONFIG_ELF_CORE is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_BASE_FULL is not set +# CONFIG_FUTEX is not set +# CONFIG_EPOLL is not set +# CONFIG_SIGNALFD is not set +# CONFIG_TIMERFD is not set +# CONFIG_EVENTFD is not set +CONFIG_SHMEM=y +# CONFIG_AIO is not set +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_BASE_SMALL=1 +# CONFIG_MODULES is not set +# CONFIG_BLOCK is not set +# CONFIG_FREEZER is not set + +# +# System type +# +CONFIG_CPU_SH4=y +CONFIG_CPU_SH4A=y +CONFIG_CPU_SHX3=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7201 is not set +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +# CONFIG_CPU_SUBTYPE_SH7705 is not set +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7751R is not set +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +# CONFIG_CPU_SUBTYPE_SH7763 is not set +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +# CONFIG_CPU_SUBTYPE_SH7785 is not set +CONFIG_CPU_SUBTYPE_SH7786=y +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set +# CONFIG_CPU_SUBTYPE_SH5_101 is not set +# CONFIG_CPU_SUBTYPE_SH5_103 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_MMU=y +CONFIG_PAGE_OFFSET=0x80000000 +CONFIG_MEMORY_START=0x08000000 +CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_29BIT=y +CONFIG_VSYSCALL=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=1 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_ENTRY_OFFSET=0x00001000 +CONFIG_SELECT_MEMORY_MODEL=y +# CONFIG_FLATMEM_MANUAL is not set +# CONFIG_DISCONTIGMEM_MANUAL is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_STATIC=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_UNEVICTABLE_LRU=y + +# +# Cache configuration +# +# CONFIG_SH_DIRECT_MAPPED is not set +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SH_FPU=y +# CONFIG_SH_STORE_QUEUES is not set +CONFIG_CPU_HAS_INTEVT=y +CONFIG_CPU_HAS_SR_RB=y +CONFIG_CPU_HAS_FPU=y + +# +# Board support +# +CONFIG_SH_URQUELL=y + +# +# Timer and clock configuration +# +CONFIG_SH_TMU=y +CONFIG_SH_TIMER_IRQ=16 +CONFIG_SH_PCLK_FREQ=31250000 +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +# CONFIG_SH_DMA is not set + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +CONFIG_HEARTBEAT=y +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_GUSA=y + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +# CONFIG_CMDLINE_BOOL is not set + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set +# CONFIG_NET is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +# CONFIG_MTD_PARTITIONS is not set + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_CHAR is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_CFI_INTELEXT is not set +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_QINFO_PROBE is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +# CONFIG_MISC_DEVICES is not set +CONFIG_HAVE_IDE=y + +# +# SCSI device support +# +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=6 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_UNIX98_PTYS is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_I2C is not set +# CONFIG_SPI is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y + +# +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_REGULATOR is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_DAB is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_SOUND is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_RTC_CLASS is not set +# CONFIG_DMADEVICES is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set + +# +# File systems +# +# CONFIG_DNOTIFY is not set +# CONFIG_INOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Pseudo filesystems +# +# CONFIG_PROC_FS is not set +# CONFIG_SYSFS is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_NLS is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_LATENCYTOP is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y + +# +# Tracers +# +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_EARLY_SCIF_CONSOLE is not set +# CONFIG_MORE_COMPILE_OPTIONS is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_GENERIC_FIND_LAST_BIT=y +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +# CONFIG_CRC32 is not set +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y diff --git a/arch/sh/include/mach-common/mach/urquell.h b/arch/sh/include/mach-common/mach/urquell.h new file mode 100644 index 00000000000..14b3e1d0177 --- /dev/null +++ b/arch/sh/include/mach-common/mach/urquell.h @@ -0,0 +1,68 @@ +#ifndef __MACH_URQUELL_H +#define __MACH_URQUELL_H + +/* + * ------ 0x00000000 ------------------------------------ + * CS0 | (SW1,SW47) EEPROM, SRAM, NOR FLASH + * -----+ 0x04000000 ------------------------------------ + * CS1 | (SW47) SRAM, SRAM-LAN-PCMCIA, NOR FLASH + * -----+ 0x08000000 ------------------------------------ + * CS2 | DDR3 + * CS3 | + * -----+ 0x10000000 ------------------------------------ + * CS4 | PCIe + * -----+ 0x14000000 ------------------------------------ + * CS5 | (SW47) LRAM/URAM, SRAM-LAN-PCMCIA + * -----+ 0x18000000 ------------------------------------ + * CS6 | ATA, NAND FLASH + * -----+ 0x1c000000 ------------------------------------ + * CS7 | SH7786 register + * -----+------------------------------------------------ + */ + +#define NOR_FLASH_ADDR 0x00000000 +#define NOR_FLASH_SIZE 0x04000000 + +#define CS1_BASE 0x05000000 +#define CS5_BASE 0x15000000 +#define FPGA_BASE CS1_BASE + +#define BOARDREG(ofs) (FPGA_BASE + ofs##_OFS) +#define UBOARDREG(ofs) (0xa0000000 + FPGA_BASE + ofs##_OFS) + +#define SRSTR_OFS 0x0000 /* System reset register */ +#define BDMR_OFS 0x0010 /* Board operating mode resister */ +#define IRL0SR_OFS 0x0020 /* IRL0 Status register */ +#define IRL0MSKR_OFS 0x0030 /* IRL0 Mask register */ +#define IRL1SR_OFS 0x0040 /* IRL1 Status register */ +#define IRL1MSKR_OFS 0x0050 /* IRL1 Mask register */ +#define IRL2SR_OFS 0x0060 /* IRL2 Status register */ +#define IRL2MSKR_OFS 0x0070 /* IRL2 Mask register */ +#define IRL3SR_OFS 0x0080 /* IRL3 Status register */ +#define IRL3MSKR_OFS 0x0090 /* IRL3 Mask register */ +#define SOFTINTR_OFS 0x0120 /* Softwear Interrupt register */ +#define SLEDR_OFS 0x0130 /* LED control resister */ +#define MAPSCIFSWR_OFS 0x0140 /* Map/SCIF Switch register */ +#define FPVERR_OFS 0x0150 /* FPGA Version register */ +#define FPDATER_OFS 0x0160 /* FPGA Date register */ +#define FPYEARR_OFS 0x0170 /* FPGA Year register */ +#define TCLKCR_OFS 0x0180 /* TCLK Control register */ +#define DIPSWMR_OFS 0x1000 /* DIPSW monitor register */ +#define FPODR_OFS 0x1010 /* Output port data register */ +#define ATACNR_OFS 0x1020 /* ATA-CN Control/status register */ +#define FPINDR_OFS 0x1030 /* Input port data register */ +#define MDSWMR_OFS 0x1040 /* MODE SW monitor register */ +#define DDR3BUPCR_OFS 0x1050 /* DDR3 Backup control register */ +#define SSICODECCR_OFS 0x1060 /* SSI-CODEC control register */ +#define PCIESLOTSR_OFS 0x1070 /* PCIexpress Slot status register */ +#define ETHERPORTSR_OFS 0x1080 /* EtherPhy Port status register */ +#define LATCHCR_OFS 0x3000 /* Latch control register */ +#define LATCUAR_OFS 0x3010 /* Latch upper address register */ +#define LATCLAR_OFS 0x3012 /* Latch lower address register */ +#define LATCLUDR_OFS 0x3024 /* Latch D31-16 register */ +#define LATCLLDR_OFS 0x3026 /* Latch D15-0 register */ + +#define CHARLED_OFS 0x2000 /* Character LED */ + +#endif /* __MACH_URQUELL_H */ + diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index 284b7e86749..8f9e1662fa9 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types @@ -52,3 +52,4 @@ RSK7203 SH_RSK7203 AP325RXA SH_AP325RXA SH7763RDP SH_SH7763RDP SH7785LCR SH_SH7785LCR +URQUELL SH_URQUELL -- cgit v1.2.3 From 2505170211f7630361a852e25b60f4df4c878daa Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Mon, 2 Mar 2009 17:20:01 -0800 Subject: x86, signals: fix xine & firefox bustage Impact: fix bad frame in rt_sigreturn on 64-bit After commit 97286a2b64725aac2d584ddd1f94871f9991d5a1 some applications fail to return from signal handler: [ 145.150133] firefox[3250] bad frame in rt_sigreturn frame:00007f902b44eb28 ip:352e80b307 sp:7f902b44ef70 orax:ffffffffffffffff in libpthread-2.9.so[352e800000+17000] [ 665.519017] firefox[5420] bad frame in rt_sigreturn frame:00007faa8deaeb28 ip:352e80b307 sp:7faa8deaef70 orax:ffffffffffffffff in libpthread-2.9.so[352e800000+17000] The root cause is forgetting to keep 64 byte aligned value of fpstate for next stack pointer calculation. Reported-by: Jaswinder Singh Rajput Reported-by: Mike Galbraith Signed-off-by: Hiroshi Shimamoto LKML-Reference: <49AC85C1.7060600@ct.jp.nec.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index dde3f2ae237..d2cc6428c58 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -240,11 +240,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, if (used_math()) { sp -= sig_xstate_size; -#ifdef CONFIG_X86_32 - *fpstate = (void __user *) sp; -#else /* !CONFIG_X86_32 */ - *fpstate = (void __user *)round_down(sp, 64); -#endif /* CONFIG_X86_32 */ +#ifdef CONFIG_X86_64 + sp = round_down(sp, 64); +#endif /* CONFIG_X86_64 */ + *fpstate = (void __user *)sp; if (save_i387_xstate(*fpstate) < 0) return (void __user *)-1L; -- cgit v1.2.3 From 2b688dfd0a93cf3b17c38feef693361da47b0606 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 12:55:04 +0200 Subject: x86: move __VMALLOC_RESERVE to pgtable_32.c Impact: cleanup The __VMALLOC_RESERVE global variable is not used in init_32.c. Move that to pgtable_32.c to reduce the diff between init_32.c and init_64.c. Signed-off-by: Pekka Enberg LKML-Reference: <1236077704.2675.4.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 2 -- arch/x86/mm/pgtable_32.c | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 06708ee94aa..5b06a2f5dea 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -50,8 +50,6 @@ #include #include -unsigned int __VMALLOC_RESERVE = 128 << 20; - unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 0951db9ee51..e4032c886c3 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -20,6 +20,8 @@ #include #include +unsigned int __VMALLOC_RESERVE = 128 << 20; + /* * Associate a virtual page frame with a given physical page frame * and protection flags for that frame. -- cgit v1.2.3 From fd578f9c0a0a7bf3e460e6f21cdc6f4018949e80 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 12:55:05 +0200 Subject: x86: use roundup() instead of PAGE_ALIGN() in find_early_table_space() Impact: cleanup This patch changes find_early_table_space() to use roundup() for rounding up tables to page size to unify the common parts of the 32-bit and 64-bit implementations. Signed-off-by: Pekka Enberg LKML-Reference: <1236077705.2675.6.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5b06a2f5dea..1dd6b6334dc 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -845,10 +845,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) unsigned long puds, pmds, ptes, tables, start; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; - tables = PAGE_ALIGN(puds * sizeof(pud_t)); + tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; - tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); + tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); if (use_pse) { unsigned long extra; @@ -859,10 +859,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) } else ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; - tables += PAGE_ALIGN(ptes * sizeof(pte_t)); + tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); /* for fixmap */ - tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); + tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); /* * RED-PEN putting page tables only on node 0 could -- cgit v1.2.3 From 05f209e7b936a48e341d36831079116a06658ccc Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 13:15:02 +0200 Subject: x86: add sanity checks to init_32.c Impact: unification This patch adds sanity checks that are already in init_64.c to init_32.c. Signed-off-by: Pekka Enberg LKML-Reference: <1236078902.2675.16.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 1dd6b6334dc..1570a822c18 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -1214,18 +1214,21 @@ void mark_rodata_ro(void) void free_init_pages(char *what, unsigned long begin, unsigned long end) { -#ifdef CONFIG_DEBUG_PAGEALLOC + unsigned long addr = begin; + + if (addr >= end) + return; + /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will * create a kernel page fault: */ +#ifdef CONFIG_DEBUG_PAGEALLOC printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", begin, PAGE_ALIGN(end)); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); #else - unsigned long addr; - /* * We just marked the kernel text read only above, now that * we are going to free part of that, we need to make that @@ -1233,14 +1236,16 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) */ set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); - for (addr = begin; addr < end; addr += PAGE_SIZE) { + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); + + for (; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); + memset((void *)(addr & ~(PAGE_SIZE-1)), + POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr); totalram_pages++; } - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); #endif } -- cgit v1.2.3 From e087edd8c056292191bb989baf49f83ee509e624 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 13:15:04 +0200 Subject: x86: make sure initmem is writable on 64-bit Impact: unification This patch ports commit 3c1df68b848b39270752ff8d4b956cc4a4dce0f6 ("x86: make sure initmem is writable") to the 64-bit version to unify implementations of free_init_pages(). Signed-off-by: Pekka Enberg Cc: Arjan van de Ven LKML-Reference: <1236078904.2675.17.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index e6d36b49025..03da9030d0e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -962,6 +962,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) begin, PAGE_ALIGN(end)); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); #else + /* + * We just marked the kernel text read only above, now that + * we are going to free part of that, we need to make that + * writeable first. + */ + set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); for (; addr < end; addr += PAGE_SIZE) { -- cgit v1.2.3 From e5b2bb552706ca0e30795ee84caacbb37cec5705 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 13:15:06 +0200 Subject: x86: unify free_init_pages() and free_initmem() Impact: unification This patch introduces a common arch/x86/mm/init.c and moves the identical free_init_pages() and free_initmem() functions to the file. Signed-off-by: Pekka Enberg LKML-Reference: <1236078906.2675.18.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/Makefile | 2 +- arch/x86/mm/init.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ arch/x86/mm/init_32.c | 44 -------------------------------------------- arch/x86/mm/init_64.c | 44 -------------------------------------------- 4 files changed, 50 insertions(+), 89 deletions(-) create mode 100644 arch/x86/mm/init.c (limited to 'arch') diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 2b938a38491..08537747cb5 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,4 +1,4 @@ -obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ +obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ pat.o pgtable.o gup.o obj-$(CONFIG_SMP) += tlb.o diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c new file mode 100644 index 00000000000..ce6a722587d --- /dev/null +++ b/arch/x86/mm/init.c @@ -0,0 +1,49 @@ +#include +#include +#include +#include +#include + +void free_init_pages(char *what, unsigned long begin, unsigned long end) +{ + unsigned long addr = begin; + + if (addr >= end) + return; + + /* + * If debugging page accesses then do not free this memory but + * mark them not present - any buggy init-section access will + * create a kernel page fault: + */ +#ifdef CONFIG_DEBUG_PAGEALLOC + printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", + begin, PAGE_ALIGN(end)); + set_memory_np(begin, (end - begin) >> PAGE_SHIFT); +#else + /* + * We just marked the kernel text read only above, now that + * we are going to free part of that, we need to make that + * writeable first. + */ + set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); + + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); + + for (; addr < end; addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + memset((void *)(addr & ~(PAGE_SIZE-1)), + POISON_FREE_INITMEM, PAGE_SIZE); + free_page(addr); + totalram_pages++; + } +#endif +} + +void free_initmem(void) +{ + free_init_pages("unused kernel memory", + (unsigned long)(&__init_begin), + (unsigned long)(&__init_end)); +} diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 1570a822c18..cd8d6732613 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -1212,50 +1212,6 @@ void mark_rodata_ro(void) } #endif -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr = begin; - - if (addr >= end) - return; - - /* - * If debugging page accesses then do not free this memory but - * mark them not present - any buggy init-section access will - * create a kernel page fault: - */ -#ifdef CONFIG_DEBUG_PAGEALLOC - printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", - begin, PAGE_ALIGN(end)); - set_memory_np(begin, (end - begin) >> PAGE_SHIFT); -#else - /* - * We just marked the kernel text read only above, now that - * we are going to free part of that, we need to make that - * writeable first. - */ - set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); - - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); - - for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)(addr & ~(PAGE_SIZE-1)), - POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } -#endif -} - -void free_initmem(void) -{ - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); -} - #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 03da9030d0e..aae87456d93 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -945,50 +945,6 @@ void __init mem_init(void) initsize >> 10); } -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr = begin; - - if (addr >= end) - return; - - /* - * If debugging page accesses then do not free this memory but - * mark them not present - any buggy init-section access will - * create a kernel page fault: - */ -#ifdef CONFIG_DEBUG_PAGEALLOC - printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", - begin, PAGE_ALIGN(end)); - set_memory_np(begin, (end - begin) >> PAGE_SHIFT); -#else - /* - * We just marked the kernel text read only above, now that - * we are going to free part of that, we need to make that - * writeable first. - */ - set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); - - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); - - for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)(addr & ~(PAGE_SIZE-1)), - POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } -#endif -} - -void free_initmem(void) -{ - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); -} - #ifdef CONFIG_DEBUG_RODATA const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); -- cgit v1.2.3 From 867c5b5292583b1e474cbbcb4c77f09bfca3903c Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 14:10:12 +0200 Subject: x86: set_highmem_pages_init() cleanup Impact: cleanup This patch moves set_highmem_pages_init() to arch/x86/mm/highmem_32.c. The declaration of the function is kept in asm/numa_32.h because asm/highmem.h is included only if CONFIG_HIGHMEM is enabled so we can't put the empty static inline function there. Signed-off-by: Pekka Enberg LKML-Reference: <1236082212.2675.24.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/numa_32.h | 6 +++++- arch/x86/mm/highmem_32.c | 34 ++++++++++++++++++++++++++++++++++ arch/x86/mm/init_32.c | 12 ------------ arch/x86/mm/numa_32.c | 26 -------------------------- 4 files changed, 39 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index e9f5db79624..a37229011b5 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h @@ -4,8 +4,12 @@ extern int pxm_to_nid(int pxm); extern void numa_remove_cpu(int cpu); -#ifdef CONFIG_NUMA +#ifdef CONFIG_HIGHMEM extern void set_highmem_pages_init(void); +#else +static inline void set_highmem_pages_init(void) +{ +} #endif #endif /* _ASM_X86_NUMA_32_H */ diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index bcc079c282d..13a823cf564 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -1,5 +1,6 @@ #include #include +#include /* for totalram_pages */ void *kmap(struct page *page) { @@ -156,3 +157,36 @@ EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(kunmap_atomic); + +#ifdef CONFIG_NUMA +void __init set_highmem_pages_init(void) +{ + struct zone *zone; + int nid; + + for_each_zone(zone) { + unsigned long zone_start_pfn, zone_end_pfn; + + if (!is_highmem(zone)) + continue; + + zone_start_pfn = zone->zone_start_pfn; + zone_end_pfn = zone_start_pfn + zone->spanned_pages; + + nid = zone_to_nid(zone); + printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", + zone->name, nid, zone_start_pfn, zone_end_pfn); + + add_highpages_with_active_regions(nid, zone_start_pfn, + zone_end_pfn); + } + totalram_pages += totalhigh_pages; +} +#else +static void __init set_highmem_pages_init(void) +{ + add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); + + totalram_pages += totalhigh_pages; +} +#endif /* CONFIG_NUMA */ diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index cd8d6732613..0b087dcd2c1 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -467,22 +467,10 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, work_with_active_regions(nid, add_highpages_work_fn, &data); } -#ifndef CONFIG_NUMA -static void __init set_highmem_pages_init(void) -{ - add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); - - totalram_pages += totalhigh_pages; -} -#endif /* !CONFIG_NUMA */ - #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { } -static inline void set_highmem_pages_init(void) -{ -} #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index d1f7439d173..a04092a8acc 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -423,32 +423,6 @@ void __init initmem_init(unsigned long start_pfn, setup_bootmem_allocator(); } -void __init set_highmem_pages_init(void) -{ -#ifdef CONFIG_HIGHMEM - struct zone *zone; - int nid; - - for_each_zone(zone) { - unsigned long zone_start_pfn, zone_end_pfn; - - if (!is_highmem(zone)) - continue; - - zone_start_pfn = zone->zone_start_pfn; - zone_end_pfn = zone_start_pfn + zone->spanned_pages; - - nid = zone_to_nid(zone); - printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", - zone->name, nid, zone_start_pfn, zone_end_pfn); - - add_highpages_with_active_regions(nid, zone_start_pfn, - zone_end_pfn); - } - totalram_pages += totalhigh_pages; -#endif -} - #ifdef CONFIG_MEMORY_HOTPLUG static int paddr_to_nid(u64 addr) { -- cgit v1.2.3 From 03787ceed8f7bf06af29f3b213017d89f8e9423d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 3 Mar 2009 15:32:24 +0100 Subject: x86: set_highmem_pages_init() cleanup, fix !CONFIG_NUMA && CONFIG_HIGHMEM=y Impact: build fix arch/x86/mm/highmem_32.c:187: error: static declaration of 'set_highmem_pages_init' follows non-static declaration arch/x86/include/asm/numa_32.h:8: error: previous declaration of 'set_highmem_pages_init' was here Signed-off-by: Pekka Enberg LKML-Reference: <1236082212.2675.24.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/highmem_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 13a823cf564..00f127c80b0 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -183,7 +183,7 @@ void __init set_highmem_pages_init(void) totalram_pages += totalhigh_pages; } #else -static void __init set_highmem_pages_init(void) +void __init set_highmem_pages_init(void) { add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); -- cgit v1.2.3 From 4e8304758cc09a6097dbd2c4f44a5369e5c1edb0 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 3 Mar 2009 11:51:39 -0800 Subject: x86: remove vestigial fix_ioremap prototypes The function seems to have disappeared at some point, leaving some vestigial prototypes behind... Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 683d0b4c00f..e5383e3d2f8 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -172,8 +172,6 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) extern void iounmap(volatile void __iomem *addr); -extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); - #ifdef CONFIG_X86_32 # include "io_32.h" @@ -198,7 +196,6 @@ extern void early_ioremap_reset(void); extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); extern void __iomem *early_memremap(unsigned long offset, unsigned long size); extern void early_iounmap(void __iomem *addr, unsigned long size); -extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); #define IO_SPACE_LIMIT 0xffff -- cgit v1.2.3 From f254f3909efaf59ca2d0f408de2d044dace60706 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 3 Mar 2009 12:02:57 -0800 Subject: x86: un-__init fill_pud/pmd/pte They are used by __set_fixmap->set_pte_vaddr_pud, which can be used by arch_setup_additional_pages(), and so is used after init. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 11981fc8570..07f44d491df 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -168,7 +168,7 @@ static __ref void *spp_getpage(void) return ptr; } -static pud_t * __init fill_pud(pgd_t *pgd, unsigned long vaddr) +static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) { if (pgd_none(*pgd)) { pud_t *pud = (pud_t *)spp_getpage(); @@ -180,7 +180,7 @@ static pud_t * __init fill_pud(pgd_t *pgd, unsigned long vaddr) return pud_offset(pgd, vaddr); } -static pmd_t * __init fill_pmd(pud_t *pud, unsigned long vaddr) +static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) { if (pud_none(*pud)) { pmd_t *pmd = (pmd_t *) spp_getpage(); @@ -192,7 +192,7 @@ static pmd_t * __init fill_pmd(pud_t *pud, unsigned long vaddr) return pmd_offset(pud, vaddr); } -static pte_t * __init fill_pte(pmd_t *pmd, unsigned long vaddr) +static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) { if (pmd_none(*pmd)) { pte_t *pte = (pte_t *) spp_getpage(); -- cgit v1.2.3 From 36e8abf3edcd2d207193ec5741d1a2a645d470a5 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Thu, 5 Mar 2009 00:16:26 -0500 Subject: [CPUFREQ] Prevent p4-clockmod from auto-binding to the ondemand governor. The latency of p4-clockmod sucks so hard that scaling on a regular basis with ondemand is a really bad idea. Signed-off-by: Matthew Garrett Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 1778402305e..352cf9a4916 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -246,7 +246,10 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* assumed */ + + /* the transition latency is set to be 1 higher than the maximum + * transition latency of the ondemand governor */ + policy->cpuinfo.transition_latency = 10000001; policy->cur = stock_freq; return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); -- cgit v1.2.3 From 929ef1a1f566087e4b362e3a56c50bf9db30e3c5 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 5 Mar 2009 17:37:12 +0900 Subject: sh: urquell: Add smc91x support and update defconfig accordingly. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/boards/board-urquell.c | 34 ++ arch/sh/configs/urquell_defconfig | 926 +++++++++++++++++++++++++++++++++++--- 2 files changed, 896 insertions(+), 64 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c index d5caeabf46e..17036ce2008 100644 --- a/arch/sh/boards/board-urquell.c +++ b/arch/sh/boards/board-urquell.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,32 @@ static struct platform_device heartbeat_device = { .resource = heartbeat_resources, }; +static struct smc91x_platdata smc91x_info = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; + +static struct resource smc91x_eth_resources[] = { + [0] = { + .name = "SMC91C111" , + .start = 0x05800300, + .end = 0x0580030f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 11, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device smc91x_eth_device = { + .name = "smc91x", + .num_resources = ARRAY_SIZE(smc91x_eth_resources), + .resource = smc91x_eth_resources, + .dev = { + .platform_data = &smc91x_info, + }, +}; + static struct mtd_partition nor_flash_partitions[] = { { .name = "loader", @@ -92,6 +119,7 @@ static struct platform_device nor_flash_device = { static struct platform_device *urquell_devices[] __initdata = { &heartbeat_device, + &smc91x_eth_device, &nor_flash_device, }; @@ -111,6 +139,11 @@ static void urquell_power_off(void) __raw_writew(0xa5a5, UBOARDREG(SRSTR)); } +static void __init urquell_init_irq(void) +{ + plat_irq_setup_pins(IRQ_MODE_IRL3210_MASK); +} + /* Initialize the board */ static void __init urquell_setup(char **cmdline_p) { @@ -125,4 +158,5 @@ static void __init urquell_setup(char **cmdline_p) static struct sh_machine_vector mv_urquell __initmv = { .mv_name = "Urquell", .mv_setup = urquell_setup, + .mv_init_irq = urquell_init_irq, }; diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig index 94a4bdbd1bc..be726c7cdf9 100644 --- a/arch/sh/configs/urquell_defconfig +++ b/arch/sh/configs/urquell_defconfig @@ -1,12 +1,13 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.29-rc4 -# Tue Mar 3 16:20:09 2009 +# Thu Mar 5 17:28:13 2009 # CONFIG_SUPERH=y CONFIG_SUPERH32=y CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_BUG=y CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y @@ -29,13 +30,20 @@ CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" # # General setup # -# CONFIG_EXPERIMENTAL is not set +CONFIG_EXPERIMENTAL=y CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -# CONFIG_SYSVIPC is not set -# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set # # RCU Subsystem @@ -45,45 +53,82 @@ CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_RCU is not set # CONFIG_TREE_RCU_TRACE is not set # CONFIG_PREEMPT_RCU_TRACE is not set -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=17 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_GROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_USER_SCHED=y +# CONFIG_CGROUP_SCHED is not set # CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set # CONFIG_BLK_DEV_INITRD is not set -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y CONFIG_EMBEDDED=y -# CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set -# CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set -# CONFIG_PRINTK is not set -# CONFIG_BUG is not set -# CONFIG_ELF_CORE is not set -# CONFIG_COMPAT_BRK is not set -# CONFIG_BASE_FULL is not set -# CONFIG_FUTEX is not set -# CONFIG_EPOLL is not set -# CONFIG_SIGNALFD is not set -# CONFIG_TIMERFD is not set -# CONFIG_EVENTFD is not set +CONFIG_UID16=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_COMPAT_BRK=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_ANON_INODES=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y CONFIG_SHMEM=y -# CONFIG_AIO is not set -# CONFIG_VM_EVENT_COUNTERS is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y +CONFIG_AIO=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set # CONFIG_SLOB is not set -# CONFIG_PROFILING is not set +CONFIG_PROFILING=y +# CONFIG_OPROFILE is not set CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_ARCH_TRACEHOOK=y CONFIG_HAVE_CLK=y CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_BASE_SMALL=1 -# CONFIG_MODULES is not set -# CONFIG_BLOCK is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" # CONFIG_FREEZER is not set # @@ -135,9 +180,11 @@ CONFIG_QUICKLIST=y CONFIG_MMU=y CONFIG_PAGE_OFFSET=0x80000000 CONFIG_MEMORY_START=0x08000000 -CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_MEMORY_SIZE=0x08000000 CONFIG_29BIT=y +# CONFIG_X2TLB is not set CONFIG_VSYSCALL=y +# CONFIG_NUMA is not set CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_DEFAULT=y @@ -158,6 +205,7 @@ CONFIG_SPARSEMEM_MANUAL=y CONFIG_SPARSEMEM=y CONFIG_HAVE_MEMORY_PRESENT=y CONFIG_SPARSEMEM_STATIC=y +# CONFIG_MEMORY_HOTPLUG is not set CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MIGRATION=y @@ -180,7 +228,7 @@ CONFIG_CACHE_WRITEBACK=y CONFIG_CPU_LITTLE_ENDIAN=y # CONFIG_CPU_BIG_ENDIAN is not set CONFIG_SH_FPU=y -# CONFIG_SH_STORE_QUEUES is not set +CONFIG_SH_STORE_QUEUES=y CONFIG_CPU_HAS_INTEVT=y CONFIG_CPU_HAS_SR_RB=y CONFIG_CPU_HAS_FPU=y @@ -195,9 +243,9 @@ CONFIG_SH_URQUELL=y # CONFIG_SH_TMU=y CONFIG_SH_TIMER_IRQ=16 -CONFIG_SH_PCLK_FREQ=31250000 +CONFIG_SH_PCLK_FREQ=33333333 CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ=y +# CONFIG_NO_HZ is not set CONFIG_HIGH_RES_TIMERS=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y @@ -230,9 +278,12 @@ CONFIG_HZ_250=y # CONFIG_HZ_1000 is not set CONFIG_HZ=250 CONFIG_SCHED_HRTICK=y -CONFIG_PREEMPT_NONE=y +CONFIG_KEXEC=y +# CONFIG_CRASH_DUMP is not set +# CONFIG_SECCOMP is not set +# CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set +CONFIG_PREEMPT=y CONFIG_GUSA=y # @@ -240,20 +291,116 @@ CONFIG_GUSA=y # CONFIG_ZERO_PAGE_OFFSET=0x00001000 CONFIG_BOOT_LINK_OFFSET=0x00800000 -# CONFIG_CMDLINE_BOOL is not set +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttySC1, 38400 earlyprintk=serial ip=on ignore_loglevel root=/dev/nfs ip=dhcp memchunk.vpu=4m" # # Bus options # # CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set # # Executable file formats # CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set -# CONFIG_NET is not set + +# +# Power management options (EXPERIMENTAL) +# +# CONFIG_PM is not set +# CONFIG_CPU_IDLE is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_COMPAT_NET_DEV_OPS=y +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_MULTIPLE_TABLES is not set +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +# CONFIG_WIRELESS_OLD_REGULATORY is not set +CONFIG_WIRELESS_EXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +# CONFIG_MAC80211 is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set # # Device Drivers @@ -262,25 +409,39 @@ CONFIG_BINFMT_ELF=y # # Generic Driver Options # +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_STANDALONE=y -# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_FW_LOADER is not set # CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set -# CONFIG_MTD_CONCAT is not set -# CONFIG_MTD_PARTITIONS is not set +CONFIG_MTD_CONCAT=y +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set # # User Modules And Translation Layers # -# CONFIG_MTD_CHAR is not set +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set # CONFIG_MTD_OOPS is not set # # RAM/ROM/Flash chip drivers # CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y +# CONFIG_MTD_JEDECPROBE is not set CONFIG_MTD_GEN_PROBE=y # CONFIG_MTD_CFI_ADV_OPTIONS is not set CONFIG_MTD_MAP_BANK_WIDTH_1=y @@ -294,7 +455,7 @@ CONFIG_MTD_CFI_I2=y # CONFIG_MTD_CFI_I4 is not set # CONFIG_MTD_CFI_I8 is not set # CONFIG_MTD_CFI_INTELEXT is not set -# CONFIG_MTD_CFI_AMDSTD is not set +CONFIG_MTD_CFI_AMDSTD=y # CONFIG_MTD_CFI_STAA is not set CONFIG_MTD_CFI_UTIL=y # CONFIG_MTD_RAM is not set @@ -315,6 +476,7 @@ CONFIG_MTD_PHYSMAP=y # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set # # Disk-On-Chip Device Drivers @@ -336,20 +498,176 @@ CONFIG_MTD_PHYSMAP=y # # CONFIG_MTD_UBI is not set # CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_UB is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_HD is not set # CONFIG_MISC_DEVICES is not set CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set # # SCSI device support # -# CONFIG_SCSI_DMA is not set +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_TGT is not set # CONFIG_SCSI_NETLINK is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +# CONFIG_SCSI_MULTI_LUN is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set +CONFIG_SCSI_WAIT_SCAN=m + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +# CONFIG_SCSI_LOWLEVEL is not set +# CONFIG_SCSI_DH is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_SATA_PMP=y +CONFIG_ATA_SFF=y +# CONFIG_SATA_MV is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_MDIO_BITBANG is not set +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_AX88796 is not set +# CONFIG_STNIC is not set +CONFIG_SMC91X=y +# CONFIG_SMC911X is not set +# CONFIG_SMSC911X is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set + +# +# Wireless LAN +# +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set +# CONFIG_IWLWIFI_LEDS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_USBNET is not set +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set # CONFIG_PHONE is not set # # Input device support # -# CONFIG_INPUT is not set +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=m +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_SH_KEYSC is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set # # Hardware I/O ports @@ -360,8 +678,12 @@ CONFIG_HAVE_IDE=y # # Character devices # -# CONFIG_VT is not set -# CONFIG_DEVKMEM is not set +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_DEVKMEM=y # CONFIG_SERIAL_NONSTANDARD is not set # @@ -377,15 +699,64 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y -# CONFIG_UNIX98_PTYS is not set -# CONFIG_LEGACY_PTYS is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 # CONFIG_IPMI_HANDLER is not set -# CONFIG_HW_RANDOM is not set +CONFIG_HW_RANDOM=y # CONFIG_R3964 is not set -# CONFIG_I2C is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_CHARDEV is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOPCA=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SH_MOBILE is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_PCA_PLATFORM=y +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_PCF8575 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set # CONFIG_SPI is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y CONFIG_GPIOLIB=y +# CONFIG_GPIO_SYSFS is not set # # Memory mapped GPIO expanders: @@ -394,6 +765,9 @@ CONFIG_GPIOLIB=y # # I2C GPIO expanders: # +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set # # PCI GPIO expanders: @@ -419,9 +793,16 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set -# CONFIG_MFD_SM501 is not set +CONFIG_MFD_SM501=y +# CONFIG_MFD_SM501_GPIO is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set # CONFIG_REGULATOR is not set # @@ -432,6 +813,7 @@ CONFIG_SSB_POSSIBLE=y # Multimedia core support # # CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set # CONFIG_VIDEO_MEDIA is not set # @@ -444,15 +826,202 @@ CONFIG_SSB_POSSIBLE=y # # CONFIG_VGASTATE is not set # CONFIG_VIDEO_OUTPUT_CONTROL is not set -# CONFIG_FB is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +CONFIG_FB_SH_MOBILE_LCDC=m +CONFIG_FB_SM501=y +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # # Display device support # # CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_LOGO_SUPERH_MONO is not set +# CONFIG_LOGO_SUPERH_VGA16 is not set +# CONFIG_LOGO_SUPERH_CLUT224 is not set # CONFIG_SOUND is not set -# CONFIG_USB_SUPPORT is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HID_DEBUG is not set +# CONFIG_HIDRAW is not set + +# +# USB Input Devices +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set + +# +# Special HID drivers +# +CONFIG_HID_COMPAT=y +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_EZKEY=y +CONFIG_HID_GYRATION=y +CONFIG_HID_LOGITECH=y +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_NTRIG is not set +CONFIG_HID_PANTHERLORD=y +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PETALYNX=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SUNPLUS=y +# CONFIG_GREENASIA_FF is not set +# CONFIG_HID_TOPSEED is not set +CONFIG_THRUSTMASTER_FF=m +CONFIG_ZEROPLUS_FF=m +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB_ARCH_HAS_EHCI is not set +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +CONFIG_USB_DEVICE_CLASS=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_MON=y +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HWA_HCD is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# + +# +# see USB_STORAGE Help for more information +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_BERRY_CHARGE is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_PHIDGET is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_VST is not set +# CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set @@ -465,38 +1034,171 @@ CONFIG_SSB_POSSIBLE=y # # File systems # -# CONFIG_DNOTIFY is not set -# CONFIG_INOTIFY is not set +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_FILE_LOCKING=y +# CONFIG_XFS_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_DNOTIFY=y +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y # CONFIG_QUOTA is not set # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +CONFIG_NTFS_FS=y +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + # # Pseudo filesystems # -# CONFIG_PROC_FS is not set -# CONFIG_SYSFS is not set -# CONFIG_TMPFS is not set +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLBFS is not set # CONFIG_HUGETLB_PAGE is not set -# CONFIG_MISC_FILESYSTEMS is not set -# CONFIG_NLS is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +CONFIG_MINIX_FS=y +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +CONFIG_NFSD_V4=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_REGISTER_V4 is not set +CONFIG_RPCSEC_GSS_KRB5=y +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +CONFIG_NLS_CODEPAGE_932=y +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set # # Kernel hacking # CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set # CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set # CONFIG_DEBUG_KERNEL is not set +# CONFIG_DEBUG_BUGVERBOSE is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_LATENCYTOP is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y @@ -504,6 +1206,7 @@ CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y # # Tracers # +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_SH_STANDARD_BIOS is not set @@ -514,21 +1217,116 @@ CONFIG_HAVE_ARCH_KGDB=y # Security options # # CONFIG_KEYS is not set +# CONFIG_SECURITY is not set # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set -# CONFIG_CRYPTO is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set # # Library routines # +CONFIG_BITREVERSE=y CONFIG_GENERIC_FIND_LAST_BIT=y # CONFIG_CRC_CCITT is not set # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set # CONFIG_CRC_ITU_T is not set -# CONFIG_CRC32 is not set +CONFIG_CRC32=y # CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set +CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y -- cgit v1.2.3 From a4b1fddcd40f10820fbc123d4d02cd62eeef476c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 5 Mar 2009 17:52:34 +0900 Subject: sh: Set a sensible default for the SH7786 pclk. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 83aee3db54b..a4c2c845863 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -466,7 +466,8 @@ config SH_PCLK_FREQ default "33333333" if CPU_SUBTYPE_SH7770 || CPU_SUBTYPE_SH7723 || \ CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \ CPU_SUBTYPE_SH7203 || CPU_SUBTYPE_SH7206 || \ - CPU_SUBTYPE_SH7263 || CPU_SUBTYPE_MXG + CPU_SUBTYPE_SH7263 || CPU_SUBTYPE_MXG || \ + CPU_SUBTYPE_SH7786 default "60000000" if CPU_SUBTYPE_SH7751 || CPU_SUBTYPE_SH7751R default "66000000" if CPU_SUBTYPE_SH4_202 default "50000000" -- cgit v1.2.3 From 8150bc886be5ce3cc301a2baca1fcf2cf7bd7f39 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Wed, 4 Mar 2009 00:49:26 +0000 Subject: S3C24XX: Move and update IIS headers Move the IIS headers to their correct place. Signed-off-by: Ben Dooks Signed-off-by: Mark Brown --- arch/arm/mach-s3c2410/dma.c | 2 +- arch/arm/mach-s3c2410/include/mach/hardware.h | 3 - arch/arm/mach-s3c2410/include/mach/io.h | 2 +- arch/arm/mach-s3c2412/dma.c | 4 +- arch/arm/mach-s3c2440/dma.c | 2 +- arch/arm/mach-s3c2443/dma.c | 2 +- arch/arm/mach-shark/include/mach/io.h | 2 +- arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h | 72 +++++++++++++++++++++ arch/arm/plat-s3c24xx/clock-dclk.c | 1 + arch/arm/plat-s3c24xx/include/plat/regs-iis.h | 77 +++++++++++++++++++++++ 10 files changed, 157 insertions(+), 10 deletions(-) create mode 100644 arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h create mode 100644 arch/arm/plat-s3c24xx/include/plat/regs-iis.h (limited to 'arch') diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c index 552b4c778fd..440c014e24b 100644 --- a/arch/arm/mach-s3c2410/dma.c +++ b/arch/arm/mach-s3c2410/dma.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include static struct s3c24xx_dma_map __initdata s3c2410_dma_mappings[] = { diff --git a/arch/arm/mach-s3c2410/include/mach/hardware.h b/arch/arm/mach-s3c2410/include/mach/hardware.h index 74d5a1a4024..db72beb61d7 100644 --- a/arch/arm/mach-s3c2410/include/mach/hardware.h +++ b/arch/arm/mach-s3c2410/include/mach/hardware.h @@ -131,7 +131,4 @@ extern int s3c2412_gpio_set_sleepcfg(unsigned int pin, unsigned int state); /* machine specific hardware definitions should go after this */ -/* currently here until moved into config (todo) */ -#define CONFIG_NO_MULTIWORD_IO - #endif /* __ASM_ARCH_HARDWARE_H */ diff --git a/arch/arm/mach-s3c2410/include/mach/io.h b/arch/arm/mach-s3c2410/include/mach/io.h index 9813dbf2ae4..c477771c092 100644 --- a/arch/arm/mach-s3c2410/include/mach/io.h +++ b/arch/arm/mach-s3c2410/include/mach/io.h @@ -9,7 +9,7 @@ #ifndef __ASM_ARM_ARCH_IO_H #define __ASM_ARM_ARCH_IO_H -#include +#include #define IO_SPACE_LIMIT 0xffffffff diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c index 919856c9433..9e3478506c6 100644 --- a/arch/arm/mach-s3c2412/dma.c +++ b/arch/arm/mach-s3c2412/dma.c @@ -29,8 +29,8 @@ #include #include #include -#include -#include +#include +#include #include #define MAP(x) { (x)| DMA_CH_VALID, (x)| DMA_CH_VALID, (x)| DMA_CH_VALID, (x)| DMA_CH_VALID } diff --git a/arch/arm/mach-s3c2440/dma.c b/arch/arm/mach-s3c2440/dma.c index 5b5ee0b8f4e..69b6cf34df4 100644 --- a/arch/arm/mach-s3c2440/dma.c +++ b/arch/arm/mach-s3c2440/dma.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include static struct s3c24xx_dma_map __initdata s3c2440_dma_mappings[] = { diff --git a/arch/arm/mach-s3c2443/dma.c b/arch/arm/mach-s3c2443/dma.c index 2a58a4d5aa5..8430e582918 100644 --- a/arch/arm/mach-s3c2443/dma.c +++ b/arch/arm/mach-s3c2443/dma.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #define MAP(x) { \ diff --git a/arch/arm/mach-shark/include/mach/io.h b/arch/arm/mach-shark/include/mach/io.h index c5cee829fc8..8ca7d7f09bd 100644 --- a/arch/arm/mach-shark/include/mach/io.h +++ b/arch/arm/mach-shark/include/mach/io.h @@ -14,7 +14,7 @@ #define PCIO_BASE 0xe0000000 #define IO_SPACE_LIMIT 0xffffffff -#define __io(a) ((void __iomem *)(PCIO_BASE + (a))) +#define __io(a) __typesafe_io(PCIO_BASE + (a)) #define __mem_pci(addr) (addr) #endif diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h new file mode 100644 index 00000000000..25d4058bcfe --- /dev/null +++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h @@ -0,0 +1,72 @@ +/* linux/include/asm-arm/plat-s3c24xx/regs-s3c2412-iis.h + * + * Copyright 2007 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * S3C2412 IIS register definition +*/ + +#ifndef __ASM_ARCH_REGS_S3C2412_IIS_H +#define __ASM_ARCH_REGS_S3C2412_IIS_H + +#define S3C2412_IISCON (0x00) +#define S3C2412_IISMOD (0x04) +#define S3C2412_IISFIC (0x08) +#define S3C2412_IISPSR (0x0C) +#define S3C2412_IISTXD (0x10) +#define S3C2412_IISRXD (0x14) + +#define S3C2412_IISCON_LRINDEX (1 << 11) +#define S3C2412_IISCON_TXFIFO_EMPTY (1 << 10) +#define S3C2412_IISCON_RXFIFO_EMPTY (1 << 9) +#define S3C2412_IISCON_TXFIFO_FULL (1 << 8) +#define S3C2412_IISCON_RXFIFO_FULL (1 << 7) +#define S3C2412_IISCON_TXDMA_PAUSE (1 << 6) +#define S3C2412_IISCON_RXDMA_PAUSE (1 << 5) +#define S3C2412_IISCON_TXCH_PAUSE (1 << 4) +#define S3C2412_IISCON_RXCH_PAUSE (1 << 3) +#define S3C2412_IISCON_TXDMA_ACTIVE (1 << 2) +#define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) +#define S3C2412_IISCON_IIS_ACTIVE (1 << 0) + +#define S3C2412_IISMOD_MASTER_INTERNAL (0 << 10) +#define S3C2412_IISMOD_MASTER_EXTERNAL (1 << 10) +#define S3C2412_IISMOD_SLAVE (2 << 10) +#define S3C2412_IISMOD_MASTER_MASK (3 << 10) +#define S3C2412_IISMOD_MODE_TXONLY (0 << 8) +#define S3C2412_IISMOD_MODE_RXONLY (1 << 8) +#define S3C2412_IISMOD_MODE_TXRX (2 << 8) +#define S3C2412_IISMOD_MODE_MASK (3 << 8) +#define S3C2412_IISMOD_LR_LLOW (0 << 7) +#define S3C2412_IISMOD_LR_RLOW (1 << 7) +#define S3C2412_IISMOD_SDF_IIS (0 << 5) +#define S3C2412_IISMOD_SDF_MSB (0 << 5) +#define S3C2412_IISMOD_SDF_LSB (0 << 5) +#define S3C2412_IISMOD_SDF_MASK (3 << 5) +#define S3C2412_IISMOD_RCLK_256FS (0 << 3) +#define S3C2412_IISMOD_RCLK_512FS (1 << 3) +#define S3C2412_IISMOD_RCLK_384FS (2 << 3) +#define S3C2412_IISMOD_RCLK_768FS (3 << 3) +#define S3C2412_IISMOD_RCLK_MASK (3 << 3) +#define S3C2412_IISMOD_BCLK_32FS (0 << 1) +#define S3C2412_IISMOD_BCLK_48FS (1 << 1) +#define S3C2412_IISMOD_BCLK_16FS (2 << 1) +#define S3C2412_IISMOD_BCLK_24FS (3 << 1) +#define S3C2412_IISMOD_BCLK_MASK (3 << 1) +#define S3C2412_IISMOD_8BIT (1 << 0) + +#define S3C2412_IISPSR_PSREN (1 << 15) + +#define S3C2412_IISFIC_TXFLUSH (1 << 15) +#define S3C2412_IISFIC_RXFLUSH (1 << 7) +#define S3C2412_IISFIC_TXCOUNT(x) (((x) >> 8) & 0xf) +#define S3C2412_IISFIC_RXCOUNT(x) (((x) >> 0) & 0xf) + + + +#endif /* __ASM_ARCH_REGS_S3C2412_IIS_H */ + diff --git a/arch/arm/plat-s3c24xx/clock-dclk.c b/arch/arm/plat-s3c24xx/clock-dclk.c index 5b75a797b5a..35219dcf9f0 100644 --- a/arch/arm/plat-s3c24xx/clock-dclk.c +++ b/arch/arm/plat-s3c24xx/clock-dclk.c @@ -18,6 +18,7 @@ #include #include +#include #include #include diff --git a/arch/arm/plat-s3c24xx/include/plat/regs-iis.h b/arch/arm/plat-s3c24xx/include/plat/regs-iis.h new file mode 100644 index 00000000000..a6f1d5df13b --- /dev/null +++ b/arch/arm/plat-s3c24xx/include/plat/regs-iis.h @@ -0,0 +1,77 @@ +/* arch/arm/mach-s3c2410/include/mach/regs-iis.h + * + * Copyright (c) 2003 Simtec Electronics + * http://www.simtec.co.uk/products/SWLINUX/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * S3C2410 IIS register definition +*/ + +#ifndef __ASM_ARCH_REGS_IIS_H +#define __ASM_ARCH_REGS_IIS_H + +#define S3C2410_IISCON (0x00) + +#define S3C2410_IISCON_LRINDEX (1<<8) +#define S3C2410_IISCON_TXFIFORDY (1<<7) +#define S3C2410_IISCON_RXFIFORDY (1<<6) +#define S3C2410_IISCON_TXDMAEN (1<<5) +#define S3C2410_IISCON_RXDMAEN (1<<4) +#define S3C2410_IISCON_TXIDLE (1<<3) +#define S3C2410_IISCON_RXIDLE (1<<2) +#define S3C2410_IISCON_PSCEN (1<<1) +#define S3C2410_IISCON_IISEN (1<<0) + +#define S3C2410_IISMOD (0x04) + +#define S3C2440_IISMOD_MPLL (1<<9) +#define S3C2410_IISMOD_SLAVE (1<<8) +#define S3C2410_IISMOD_NOXFER (0<<6) +#define S3C2410_IISMOD_RXMODE (1<<6) +#define S3C2410_IISMOD_TXMODE (2<<6) +#define S3C2410_IISMOD_TXRXMODE (3<<6) +#define S3C2410_IISMOD_LR_LLOW (0<<5) +#define S3C2410_IISMOD_LR_RLOW (1<<5) +#define S3C2410_IISMOD_IIS (0<<4) +#define S3C2410_IISMOD_MSB (1<<4) +#define S3C2410_IISMOD_8BIT (0<<3) +#define S3C2410_IISMOD_16BIT (1<<3) +#define S3C2410_IISMOD_BITMASK (1<<3) +#define S3C2410_IISMOD_256FS (0<<2) +#define S3C2410_IISMOD_384FS (1<<2) +#define S3C2410_IISMOD_16FS (0<<0) +#define S3C2410_IISMOD_32FS (1<<0) +#define S3C2410_IISMOD_48FS (2<<0) +#define S3C2410_IISMOD_FS_MASK (3<<0) + +#define S3C2410_IISPSR (0x08) +#define S3C2410_IISPSR_INTMASK (31<<5) +#define S3C2410_IISPSR_INTSHIFT (5) +#define S3C2410_IISPSR_EXTMASK (31<<0) +#define S3C2410_IISPSR_EXTSHFIT (0) + +#define S3C2410_IISFCON (0x0c) + +#define S3C2410_IISFCON_TXDMA (1<<15) +#define S3C2410_IISFCON_RXDMA (1<<14) +#define S3C2410_IISFCON_TXENABLE (1<<13) +#define S3C2410_IISFCON_RXENABLE (1<<12) +#define S3C2410_IISFCON_TXMASK (0x3f << 6) +#define S3C2410_IISFCON_TXSHIFT (6) +#define S3C2410_IISFCON_RXMASK (0x3f) +#define S3C2410_IISFCON_RXSHIFT (0) + +#define S3C2400_IISFCON_TXDMA (1<<11) +#define S3C2400_IISFCON_RXDMA (1<<10) +#define S3C2400_IISFCON_TXENABLE (1<<9) +#define S3C2400_IISFCON_RXENABLE (1<<8) +#define S3C2400_IISFCON_TXMASK (0x07 << 4) +#define S3C2400_IISFCON_TXSHIFT (4) +#define S3C2400_IISFCON_RXMASK (0x07) +#define S3C2400_IISFCON_RXSHIFT (0) + +#define S3C2410_IISFIFO (0x10) +#endif /* __ASM_ARCH_REGS_IIS_H */ -- cgit v1.2.3 From 899e6cf5e6d83a91d2e257f7a4e8ca98db3831cc Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Wed, 4 Mar 2009 00:49:28 +0000 Subject: S3C: Move to The file needs to be common to both ARCH_S3C2410 and ARCH_S3C64XX as they share common driver code, so move it to . Signed-off-by: Ben Dooks Signed-off-by: Mark Brown --- arch/arm/mach-s3c2410/include/mach/audio.h | 45 ------------------------------ arch/arm/plat-s3c/include/plat/audio.h | 45 ++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 45 deletions(-) delete mode 100644 arch/arm/mach-s3c2410/include/mach/audio.h create mode 100644 arch/arm/plat-s3c/include/plat/audio.h (limited to 'arch') diff --git a/arch/arm/mach-s3c2410/include/mach/audio.h b/arch/arm/mach-s3c2410/include/mach/audio.h deleted file mode 100644 index de0e8da48bc..00000000000 --- a/arch/arm/mach-s3c2410/include/mach/audio.h +++ /dev/null @@ -1,45 +0,0 @@ -/* arch/arm/mach-s3c2410/include/mach/audio.h - * - * Copyright (c) 2004-2005 Simtec Electronics - * http://www.simtec.co.uk/products/SWLINUX/ - * Ben Dooks - * - * S3C24XX - Audio platfrom_device info - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef __ASM_ARCH_AUDIO_H -#define __ASM_ARCH_AUDIO_H __FILE__ - -/* struct s3c24xx_iis_ops - * - * called from the s3c24xx audio core to deal with the architecture - * or the codec's setup and control. - * - * the pointer to itself is passed through in case the caller wants to - * embed this in an larger structure for easy reference to it's context. -*/ - -struct s3c24xx_iis_ops { - struct module *owner; - - int (*startup)(struct s3c24xx_iis_ops *me); - void (*shutdown)(struct s3c24xx_iis_ops *me); - int (*suspend)(struct s3c24xx_iis_ops *me); - int (*resume)(struct s3c24xx_iis_ops *me); - - int (*open)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm); - int (*close)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm); - int (*prepare)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm, struct snd_pcm_runtime *rt); -}; - -struct s3c24xx_platdata_iis { - const char *codec_clk; - struct s3c24xx_iis_ops *ops; - int (*match_dev)(struct device *dev); -}; - -#endif /* __ASM_ARCH_AUDIO_H */ diff --git a/arch/arm/plat-s3c/include/plat/audio.h b/arch/arm/plat-s3c/include/plat/audio.h new file mode 100644 index 00000000000..de0e8da48bc --- /dev/null +++ b/arch/arm/plat-s3c/include/plat/audio.h @@ -0,0 +1,45 @@ +/* arch/arm/mach-s3c2410/include/mach/audio.h + * + * Copyright (c) 2004-2005 Simtec Electronics + * http://www.simtec.co.uk/products/SWLINUX/ + * Ben Dooks + * + * S3C24XX - Audio platfrom_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __ASM_ARCH_AUDIO_H +#define __ASM_ARCH_AUDIO_H __FILE__ + +/* struct s3c24xx_iis_ops + * + * called from the s3c24xx audio core to deal with the architecture + * or the codec's setup and control. + * + * the pointer to itself is passed through in case the caller wants to + * embed this in an larger structure for easy reference to it's context. +*/ + +struct s3c24xx_iis_ops { + struct module *owner; + + int (*startup)(struct s3c24xx_iis_ops *me); + void (*shutdown)(struct s3c24xx_iis_ops *me); + int (*suspend)(struct s3c24xx_iis_ops *me); + int (*resume)(struct s3c24xx_iis_ops *me); + + int (*open)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm); + int (*close)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm); + int (*prepare)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm, struct snd_pcm_runtime *rt); +}; + +struct s3c24xx_platdata_iis { + const char *codec_clk; + struct s3c24xx_iis_ops *ops; + int (*match_dev)(struct device *dev); +}; + +#endif /* __ASM_ARCH_AUDIO_H */ -- cgit v1.2.3 From a6bc77241d79d39ad2ad8010a82ce7e0f437ae05 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Fri, 6 Mar 2009 05:06:27 +0000 Subject: sh: multiple vectors per irq - sh7763 Update intc tables and platform data to use one linux irq per maskable interrupt source instead of keeping the one-to-one mapping between vectors and linux irqs. This fixes potential irq masking issues for sh7763 hardware blocks such as RTC/SCIF/DMAC/GETHER/PCIC5/MMCIF/SIM/GPIO/USBF. Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7763.c | 113 +++++++++++---------------------- 1 file changed, 37 insertions(+), 76 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index 3c5b629887a..14a916f0d75 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c @@ -3,7 +3,7 @@ * * Copyright (C) 2006 Paul Mundt * Copyright (C) 2007 Yoshihiro Shimoda - * Copyright (C) 2008 Nobuhiro Iwamatsu + * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -22,18 +22,7 @@ static struct resource rtc_resources[] = { .flags = IORESOURCE_IO, }, [1] = { - /* Period IRQ */ - .start = 21, - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* Carry IRQ */ - .start = 22, - .flags = IORESOURCE_IRQ, - }, - [3] = { - /* Alarm IRQ */ - .start = 20, + /* Shared Period/Carry/Alarm IRQ */ .flags = IORESOURCE_IRQ, }, }; @@ -50,17 +39,17 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 40, 41, 43, 42 }, + .irqs = { 40, 40, 40, 40 }, }, { .mapbase = 0xffe08000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 76, 77, 79, 78 }, + .irqs = { 76, 76, 76, 76 }, }, { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 104, 105, 107, 106 }, + .irqs = { 104, 104, 104, 104 }, }, { .flags = 0, } @@ -148,93 +137,65 @@ enum { IRL_HHLL, IRL_HHLH, IRL_HHHL, IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, - RTC_ATI, RTC_PRI, RTC_CUI, - WDT, TMU0, TMU1, TMU2, TMU2_TICPI, - HUDI, LCDC, - DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE, - SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, - DMAC0_DMINT4, DMAC0_DMINT5, - IIC0, IIC1, - CMT, - GEINT0, GEINT1, GEINT2, - HAC, - PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, - PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0, - STIF0, STIF1, - SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, - SIOF0, SIOF1, SIOF2, - USBH, USBFI0, USBFI1, - TPU, PCC, - MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY, - SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND, + RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI, + HUDI, LCDC, DMAC, SCIF0, IIC0, IIC1, CMT, GETHER, HAC, + PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5, + STIF0, STIF1, SCIF1, SIOF0, SIOF1, SIOF2, + USBH, USBF, TPU, PCC, MMCIF, SIM, TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3, - SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, - GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3, + SCIF2, GPIO, /* interrupt groups */ - TMU012, TMU345, RTC, DMAC, SCIF0, GETHER, PCIC5, - SCIF1, USBF, MMCIF, SIM, SCIF2, GPIO, + TMU012, TMU345, }; static struct intc_vect vectors[] __initdata = { - INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), - INTC_VECT(RTC_CUI, 0x4c0), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600), INTC_VECT(LCDC, 0x620), - INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660), - INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0), - INTC_VECT(DMAC0_DMAE, 0x6c0), - INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720), - INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), - INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0), + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x6c0), + INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720), + INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760), + INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0), - INTC_VECT(CMT, 0x900), INTC_VECT(GEINT0, 0x920), - INTC_VECT(GEINT1, 0x940), INTC_VECT(GEINT2, 0x960), + INTC_VECT(CMT, 0x900), INTC_VECT(GETHER, 0x920), + INTC_VECT(GETHER, 0x940), INTC_VECT(GETHER, 0x960), INTC_VECT(HAC, 0x980), INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20), INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60), - INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0), - INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0), - INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20), + INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0), + INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0), + INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20), INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60), - INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0), - INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0), + INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0), + INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0), INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20), - INTC_VECT(USBH, 0xc60), INTC_VECT(USBFI0, 0xc80), - INTC_VECT(USBFI1, 0xca0), + INTC_VECT(USBH, 0xc60), INTC_VECT(USBF, 0xc80), + INTC_VECT(USBF, 0xca0), INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0), - INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20), - INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60), - INTC_VECT(SIM_ERI, 0xd80), INTC_VECT(SIM_RXI, 0xda0), - INTC_VECT(SIM_TXI, 0xdc0), INTC_VECT(SIM_TEND, 0xde0), + INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20), + INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60), + INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0), + INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0), INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60), INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0), INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0), - INTC_VECT(SCIF2_ERI, 0xf00), INTC_VECT(SCIF2_RXI, 0xf20), - INTC_VECT(SCIF2_BRI, 0xf40), INTC_VECT(SCIF2_TXI, 0xf60), - INTC_VECT(GPIO_CH0, 0xf80), INTC_VECT(GPIO_CH1, 0xfa0), - INTC_VECT(GPIO_CH2, 0xfc0), INTC_VECT(GPIO_CH3, 0xfe0), + INTC_VECT(SCIF2, 0xf00), INTC_VECT(SCIF2, 0xf20), + INTC_VECT(SCIF2, 0xf40), INTC_VECT(SCIF2, 0xf60), + INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0), + INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0), }; static struct intc_group groups[] __initdata = { INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), INTC_GROUP(TMU345, TMU3, TMU4, TMU5), - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), - INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, - DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), - INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), - INTC_GROUP(GETHER, GEINT0, GEINT1, GEINT2), - INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0), - INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), - INTC_GROUP(USBF, USBFI0, USBFI1), - INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY), - INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND), - INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), - INTC_GROUP(GPIO, GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3), }; static struct intc_mask_reg mask_registers[] __initdata = { -- cgit v1.2.3 From 6a242909b01120f6f3d571c0b75e20ec61f0d8d3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Mar 2009 14:33:58 +0900 Subject: percpu: clean up percpu constants Impact: cleaup Make the following cleanups. * There isn't much arch-specific about PERCPU_MODULE_RESERVE. Always define it whether arch overrides PERCPU_ENOUGH_ROOM or not. * blackfin overrides PERCPU_ENOUGH_ROOM to align static area size. Do it by default. * percpu allocation sizes doesn't have much to do with the page size. Don't use PAGE_SHIFT in their definition. Signed-off-by: Tejun Heo Cc: Bryan Wu --- arch/blackfin/include/asm/percpu.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h index 797c0c16506..c94c7bc88c7 100644 --- a/arch/blackfin/include/asm/percpu.h +++ b/arch/blackfin/include/asm/percpu.h @@ -3,14 +3,4 @@ #include -#ifdef CONFIG_MODULES -#define PERCPU_MODULE_RESERVE 8192 -#else -#define PERCPU_MODULE_RESERVE 0 -#endif - -#define PERCPU_ENOUGH_ROOM \ - (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ - PERCPU_MODULE_RESERVE) - #endif /* __ARCH_BLACKFIN_PERCPU__ */ -- cgit v1.2.3 From cafe8816b217b98dc3f268d3b77445da498beb4f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Mar 2009 14:33:59 +0900 Subject: percpu: use negative for auto for pcpu_setup_first_chunk() arguments Impact: argument semantic cleanup In pcpu_setup_first_chunk(), zero @unit_size and @dyn_size meant auto-sizing. It's okay for @unit_size as 0 doesn't make sense but 0 dynamic reserve size is valid. Alos, if arch @dyn_size is calculated from other parameters, it might end up passing in 0 @dyn_size and malfunction when the size is automatically adjusted. This patch makes both @unit_size and @dyn_size ssize_t and use -1 for auto sizing. Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index c29f301d388..ef3a2cd3fe6 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -344,7 +344,7 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", pcpu4k_nr_static_pages, static_size); - ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL, + ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, -1, -1, NULL, pcpu4k_populate_pte); goto out_free_ar; -- cgit v1.2.3 From 9a4f8a878b68d5a5d9ee60908a52cf6a55e1b823 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Mar 2009 14:33:59 +0900 Subject: x86: make embedding percpu allocator return excessive free space Impact: reduce unnecessary memory usage on certain configurations Embedding percpu allocator allocates unit_size * smp_num_possible_cpus() bytes consecutively and use it for the first chunk. However, if the static area is small, this can result in excessive prellocated free space in the first chunk due to PCPU_MIN_UNIT_SIZE restriction. This patch makes embedding percpu allocator preallocate only what's necessary as described by PERPCU_DYNAMIC_RESERVE and return the leftover to the bootmem allocator. Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 44 +++++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index ef3a2cd3fe6..38e2b2a470a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -241,24 +241,31 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) * Embedding allocator * * The first chunk is sized to just contain the static area plus - * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using - * bootmem allocator and used as-is without being mapped into vmalloc - * area. This enables the first chunk to piggy back on the linear - * physical PMD mapping and doesn't add any additional pressure to - * TLB. + * module and dynamic reserves, and allocated as a contiguous area + * using bootmem allocator and used as-is without being mapped into + * vmalloc area. This enables the first chunk to piggy back on the + * linear physical PMD mapping and doesn't add any additional pressure + * to TLB. Note that if the needed size is smaller than the minimum + * unit size, the leftover is returned to the bootmem allocator. */ static void *pcpue_ptr __initdata; +static size_t pcpue_size __initdata; static size_t pcpue_unit_size __initdata; static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) { - return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size - + ((size_t)pageno << PAGE_SHIFT)); + size_t off = (size_t)pageno << PAGE_SHIFT; + + if (off >= pcpue_size) + return NULL; + + return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off); } static ssize_t __init setup_pcpu_embed(size_t static_size) { unsigned int cpu; + size_t dyn_size; /* * If large page isn't supported, there's no benefit in doing @@ -269,25 +276,30 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) return -EINVAL; /* allocate and copy */ - pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); - pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE); + pcpue_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); + pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); + dyn_size = pcpue_size - static_size; + pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, PAGE_SIZE); if (!pcpue_ptr) return -ENOMEM; - for_each_possible_cpu(cpu) - memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load, - static_size); + for_each_possible_cpu(cpu) { + void *ptr = pcpue_ptr + cpu * pcpue_unit_size; + + free_bootmem(__pa(ptr + pcpue_size), + pcpue_unit_size - pcpue_size); + memcpy(ptr, __per_cpu_load, static_size); + } /* we're ready, commit */ pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", - pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size); + pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); return pcpu_setup_first_chunk(pcpue_get_page, static_size, - pcpue_unit_size, - pcpue_unit_size - static_size, pcpue_ptr, - NULL); + pcpue_unit_size, dyn_size, + pcpue_ptr, NULL); } /* -- cgit v1.2.3 From edcb463997ed7b2ffa3bac76e3e75957318f2e01 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Mar 2009 14:33:59 +0900 Subject: percpu, module: implement reserved allocation and use it for module percpu variables Impact: add reserved allocation functionality and use it for module percpu variables This patch implements reserved allocation from the first chunk. When setting up the first chunk, arch can ask to set aside certain number of bytes right after the core static area which is available only through a separate reserved allocator. This will be used primarily for module static percpu variables on architectures with limited relocation range to ensure that the module perpcu symbols are inside the relocatable range. If reserved area is requested, the first chunk becomes reserved and isn't available for regular allocation. If the first chunk also includes piggy-back dynamic allocation area, a separate chunk mapping the same region is created to serve dynamic allocation. The first one is called static first chunk and the second dynamic first chunk. Although they share the page map, their different area map initializations guarantee they serve disjoint areas according to their purposes. If arch doesn't setup reserved area, reserved allocation is handled like any other allocation. Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 38e2b2a470a..dd4eabc747c 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -217,7 +217,7 @@ proceed: pr_info("PERCPU: Remapped at %p with large pages, static data " "%zu bytes\n", vm.addr, static_size); - ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE, + ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 0, PMD_SIZE, pcpur_size - static_size, vm.addr, NULL); goto out_free_ar; @@ -297,7 +297,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); - return pcpu_setup_first_chunk(pcpue_get_page, static_size, + return pcpu_setup_first_chunk(pcpue_get_page, static_size, 0, pcpue_unit_size, dyn_size, pcpue_ptr, NULL); } @@ -356,8 +356,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", pcpu4k_nr_static_pages, static_size); - ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, -1, -1, NULL, - pcpu4k_populate_pte); + ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, -1, -1, + NULL, pcpu4k_populate_pte); goto out_free_ar; enomem: -- cgit v1.2.3 From 6b19b0c2400437a3c10059ede0e59b517092e1bd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Mar 2009 14:33:59 +0900 Subject: x86, percpu: setup reserved percpu area for x86_64 Impact: fix relocation overflow during module load x86_64 uses 32bit relocations for symbol access and static percpu symbols whether in core or modules must be inside 2GB of the percpu segement base which the dynamic percpu allocator doesn't guarantee. This patch makes x86_64 reserve PERCPU_MODULE_RESERVE bytes in the first chunk so that module percpu areas are always allocated from the first chunk which is always inside the relocatable range. This problem exists for any percpu allocator but is easily triggered when using the embedding allocator because the second chunk is located beyond 2GB on it. This patch also changes the meaning of PERCPU_DYNAMIC_RESERVE such that it only indicates the size of the area to reserve for dynamic allocation as static and dynamic areas can be separate. New PERCPU_DYNAMIC_RESERVED is increased by 4k for both 32 and 64bits as the reserved area separation eats away some allocatable space and having slightly more headroom (currently between 4 and 8k after minimal boot sans module area) makes sense for common case performance. x86_32 can address anywhere from anywhere and doesn't need reserving. Mike Galbraith first reported the problem first and bisected it to the embedding percpu allocator commit. Signed-off-by: Tejun Heo Reported-by: Mike Galbraith Reported-by: Jaswinder Singh Rajput --- arch/x86/kernel/setup_percpu.c | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index dd4eabc747c..efa615f2bf4 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -42,6 +42,19 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { }; EXPORT_SYMBOL(__per_cpu_offset); +/* + * On x86_64 symbols referenced from code should be reachable using + * 32bit relocations. Reserve space for static percpu variables in + * modules so that they are always served from the first chunk which + * is located at the percpu segment base. On x86_32, anything can + * address anywhere. No need to reserve space in the first chunk. + */ +#ifdef CONFIG_X86_64 +#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE +#else +#define PERCPU_FIRST_CHUNK_RESERVE 0 +#endif + /** * pcpu_need_numa - determine percpu allocation needs to consider NUMA * @@ -141,7 +154,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) { static struct vm_struct vm; pg_data_t *last; - size_t ptrs_size; + size_t ptrs_size, dyn_size; unsigned int cpu; ssize_t ret; @@ -169,12 +182,14 @@ proceed: * Currently supports only single page. Supporting multiple * pages won't be too difficult if it ever becomes necessary. */ - pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); + pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + + PERCPU_DYNAMIC_RESERVE); if (pcpur_size > PMD_SIZE) { pr_warning("PERCPU: static data is larger than large page, " "can't use large page\n"); return -EINVAL; } + dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; /* allocate pointer array and alloc large pages */ ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); @@ -217,8 +232,9 @@ proceed: pr_info("PERCPU: Remapped at %p with large pages, static data " "%zu bytes\n", vm.addr, static_size); - ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 0, PMD_SIZE, - pcpur_size - static_size, vm.addr, NULL); + ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, + PERCPU_FIRST_CHUNK_RESERVE, + PMD_SIZE, dyn_size, vm.addr, NULL); goto out_free_ar; enomem: @@ -276,9 +292,10 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) return -EINVAL; /* allocate and copy */ - pcpue_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); + pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + + PERCPU_DYNAMIC_RESERVE); pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); - dyn_size = pcpue_size - static_size; + dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, PAGE_SIZE); @@ -297,7 +314,8 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); - return pcpu_setup_first_chunk(pcpue_get_page, static_size, 0, + return pcpu_setup_first_chunk(pcpue_get_page, static_size, + PERCPU_FIRST_CHUNK_RESERVE, pcpue_unit_size, dyn_size, pcpue_ptr, NULL); } @@ -356,8 +374,9 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", pcpu4k_nr_static_pages, static_size); - ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, -1, -1, - NULL, pcpu4k_populate_pte); + ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, + PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, + pcpu4k_populate_pte); goto out_free_ar; enomem: -- cgit v1.2.3 From 075901af281b2afb47b1423ac488e713844db396 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Fri, 6 Mar 2009 14:37:34 +0900 Subject: sh: Restore RTC IRQ setting for SH7763 setup. This was accidentally dropped in the multiple vectors per irq conversion. Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7763.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index 14a916f0d75..bdf0f61ae1e 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c @@ -23,6 +23,7 @@ static struct resource rtc_resources[] = { }, [1] = { /* Shared Period/Carry/Alarm IRQ */ + .start = 20, .flags = IORESOURCE_IRQ, }, }; -- cgit v1.2.3 From bb943a286c6f2993a737cc44c170d8b26e72c8ff Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 17:58:51 +0900 Subject: sh: multiple vectors per irq - sh7203. Follow the conversions as per the other subtypes. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2a/setup-sh7203.c | 213 +++++++++++---------------------- 1 file changed, 73 insertions(+), 140 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index e98dc445035..18d127ca0e6 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c @@ -1,7 +1,7 @@ /* * SH7203 and SH7263 Setup * - * Copyright (C) 2007 Paul Mundt + * Copyright (C) 2007 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -18,50 +18,31 @@ enum { /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, - DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI, - DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI, - DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI, - DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI, + DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, USB, LCDC, CMT0, CMT1, BSC, WDT, - MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D, - MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F, - MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U, - MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U, - MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V, - MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V, + + MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, + MTU3_ABCD, MTU4_ABCD, MTU2_TCI3V, MTU2_TCI4V, + ADC_ADI, - IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI, - IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI, - IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI, - IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI, IIC33_TEI, - SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, - SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI, - SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI, - SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI, - SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI, - SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI, + + IIC30, IIC31, IIC32, IIC33, + SCIF0, SCIF1, SCIF2, SCIF3, + + SSU0, SSU1, + SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII, /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */ ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY, - FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, - - SDHI3, SDHI0, SDHI1, - - RTC_ARM, RTC_PRD, RTC_CUP, - RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, RCAN0_SLE, - RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, RCAN1_SLE, + FLCTL, SDHI3, SDHI0, SDHI1, RTC, RCAN0, RCAN1, SRC_OVF, SRC_ODFI, SRC_IDEI, IEBI, /* interrupt groups */ - PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, - MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, - MTU3_ABCD, MTU4_ABCD, - IIC30, IIC31, IIC32, IIC33, SCIF0, SCIF1, SCIF2, SCIF3, - SSU0, SSU1, ROMDEC, SDHI, FLCTL, RTC, RCAN0, RCAN1, SRC + PINT, ROMDEC, SDHI, SRC }; static struct intc_vect vectors[] __initdata = { @@ -73,68 +54,68 @@ static struct intc_vect vectors[] __initdata = { INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), - INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109), - INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113), - INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117), - INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121), - INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125), - INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129), - INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133), - INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137), + INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109), + INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113), + INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117), + INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121), + INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125), + INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129), + INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133), + INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137), INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141), INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143), INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145), - INTC_IRQ(MTU2_TGI0A, 146), INTC_IRQ(MTU2_TGI0B, 147), - INTC_IRQ(MTU2_TGI0C, 148), INTC_IRQ(MTU2_TGI0D, 149), - INTC_IRQ(MTU2_TCI0V, 150), - INTC_IRQ(MTU2_TGI0E, 151), INTC_IRQ(MTU2_TGI0F, 152), - INTC_IRQ(MTU2_TGI1A, 153), INTC_IRQ(MTU2_TGI1B, 154), - INTC_IRQ(MTU2_TCI1V, 155), INTC_IRQ(MTU2_TCI1U, 156), - INTC_IRQ(MTU2_TGI2A, 157), INTC_IRQ(MTU2_TGI2B, 158), - INTC_IRQ(MTU2_TCI2V, 159), INTC_IRQ(MTU2_TCI2U, 160), - INTC_IRQ(MTU2_TGI3A, 161), INTC_IRQ(MTU2_TGI3B, 162), - INTC_IRQ(MTU2_TGI3C, 163), INTC_IRQ(MTU2_TGI3D, 164), + INTC_IRQ(MTU0_ABCD, 146), INTC_IRQ(MTU0_ABCD, 147), + INTC_IRQ(MTU0_ABCD, 148), INTC_IRQ(MTU0_ABCD, 149), + INTC_IRQ(MTU0_VEF, 150), + INTC_IRQ(MTU0_VEF, 151), INTC_IRQ(MTU0_VEF, 152), + INTC_IRQ(MTU1_AB, 153), INTC_IRQ(MTU1_AB, 154), + INTC_IRQ(MTU1_VU, 155), INTC_IRQ(MTU1_VU, 156), + INTC_IRQ(MTU2_AB, 157), INTC_IRQ(MTU2_AB, 158), + INTC_IRQ(MTU2_VU, 159), INTC_IRQ(MTU2_VU, 160), + INTC_IRQ(MTU3_ABCD, 161), INTC_IRQ(MTU3_ABCD, 162), + INTC_IRQ(MTU3_ABCD, 163), INTC_IRQ(MTU3_ABCD, 164), INTC_IRQ(MTU2_TCI3V, 165), - INTC_IRQ(MTU2_TGI4A, 166), INTC_IRQ(MTU2_TGI4B, 167), - INTC_IRQ(MTU2_TGI4C, 168), INTC_IRQ(MTU2_TGI4D, 169), + INTC_IRQ(MTU4_ABCD, 166), INTC_IRQ(MTU4_ABCD, 167), + INTC_IRQ(MTU4_ABCD, 168), INTC_IRQ(MTU4_ABCD, 169), INTC_IRQ(MTU2_TCI4V, 170), INTC_IRQ(ADC_ADI, 171), - INTC_IRQ(IIC30_STPI, 172), INTC_IRQ(IIC30_NAKI, 173), - INTC_IRQ(IIC30_RXI, 174), INTC_IRQ(IIC30_TXI, 175), - INTC_IRQ(IIC30_TEI, 176), - INTC_IRQ(IIC31_STPI, 177), INTC_IRQ(IIC31_NAKI, 178), - INTC_IRQ(IIC31_RXI, 179), INTC_IRQ(IIC31_TXI, 180), - INTC_IRQ(IIC31_TEI, 181), - INTC_IRQ(IIC32_STPI, 182), INTC_IRQ(IIC32_NAKI, 183), - INTC_IRQ(IIC32_RXI, 184), INTC_IRQ(IIC32_TXI, 185), - INTC_IRQ(IIC32_TEI, 186), - INTC_IRQ(IIC33_STPI, 187), INTC_IRQ(IIC33_NAKI, 188), - INTC_IRQ(IIC33_RXI, 189), INTC_IRQ(IIC33_TXI, 190), - INTC_IRQ(IIC33_TEI, 191), - INTC_IRQ(SCIF0_BRI, 192), INTC_IRQ(SCIF0_ERI, 193), - INTC_IRQ(SCIF0_RXI, 194), INTC_IRQ(SCIF0_TXI, 195), - INTC_IRQ(SCIF1_BRI, 196), INTC_IRQ(SCIF1_ERI, 197), - INTC_IRQ(SCIF1_RXI, 198), INTC_IRQ(SCIF1_TXI, 199), - INTC_IRQ(SCIF2_BRI, 200), INTC_IRQ(SCIF2_ERI, 201), - INTC_IRQ(SCIF2_RXI, 202), INTC_IRQ(SCIF2_TXI, 203), - INTC_IRQ(SCIF3_BRI, 204), INTC_IRQ(SCIF3_ERI, 205), - INTC_IRQ(SCIF3_RXI, 206), INTC_IRQ(SCIF3_TXI, 207), - INTC_IRQ(SSU0_SSERI, 208), INTC_IRQ(SSU0_SSRXI, 209), - INTC_IRQ(SSU0_SSTXI, 210), - INTC_IRQ(SSU1_SSERI, 211), INTC_IRQ(SSU1_SSRXI, 212), - INTC_IRQ(SSU1_SSTXI, 213), + INTC_IRQ(IIC30, 172), INTC_IRQ(IIC30, 173), + INTC_IRQ(IIC30, 174), INTC_IRQ(IIC30, 175), + INTC_IRQ(IIC30, 176), + INTC_IRQ(IIC31, 177), INTC_IRQ(IIC31, 178), + INTC_IRQ(IIC31, 179), INTC_IRQ(IIC31, 180), + INTC_IRQ(IIC31, 181), + INTC_IRQ(IIC32, 182), INTC_IRQ(IIC32, 183), + INTC_IRQ(IIC32, 184), INTC_IRQ(IIC32, 185), + INTC_IRQ(IIC32, 186), + INTC_IRQ(IIC33, 187), INTC_IRQ(IIC33, 188), + INTC_IRQ(IIC33, 189), INTC_IRQ(IIC33, 190), + INTC_IRQ(IIC33, 191), + INTC_IRQ(SCIF0, 192), INTC_IRQ(SCIF0, 193), + INTC_IRQ(SCIF0, 194), INTC_IRQ(SCIF0, 195), + INTC_IRQ(SCIF1, 196), INTC_IRQ(SCIF1, 197), + INTC_IRQ(SCIF1, 198), INTC_IRQ(SCIF1, 199), + INTC_IRQ(SCIF2, 200), INTC_IRQ(SCIF2, 201), + INTC_IRQ(SCIF2, 202), INTC_IRQ(SCIF2, 203), + INTC_IRQ(SCIF3, 204), INTC_IRQ(SCIF3, 205), + INTC_IRQ(SCIF3, 206), INTC_IRQ(SCIF3, 207), + INTC_IRQ(SSU0, 208), INTC_IRQ(SSU0, 209), + INTC_IRQ(SSU0, 210), + INTC_IRQ(SSU1, 211), INTC_IRQ(SSU1, 212), + INTC_IRQ(SSU1, 213), INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215), INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217), - INTC_IRQ(FLCTL_FLSTEI, 224), INTC_IRQ(FLCTL_FLTENDI, 225), - INTC_IRQ(FLCTL_FLTREQ0I, 226), INTC_IRQ(FLCTL_FLTREQ1I, 227), - INTC_IRQ(RTC_ARM, 231), INTC_IRQ(RTC_PRD, 232), - INTC_IRQ(RTC_CUP, 233), - INTC_IRQ(RCAN0_ERS, 234), INTC_IRQ(RCAN0_OVR, 235), - INTC_IRQ(RCAN0_RM0, 236), INTC_IRQ(RCAN0_RM1, 237), - INTC_IRQ(RCAN0_SLE, 238), - INTC_IRQ(RCAN1_ERS, 239), INTC_IRQ(RCAN1_OVR, 240), - INTC_IRQ(RCAN1_RM0, 241), INTC_IRQ(RCAN1_RM1, 242), - INTC_IRQ(RCAN1_SLE, 243), + INTC_IRQ(FLCTL, 224), INTC_IRQ(FLCTL, 225), + INTC_IRQ(FLCTL, 226), INTC_IRQ(FLCTL, 227), + INTC_IRQ(RTC, 231), INTC_IRQ(RTC, 232), + INTC_IRQ(RTC, 233), + INTC_IRQ(RCAN0, 234), INTC_IRQ(RCAN0, 235), + INTC_IRQ(RCAN0, 236), INTC_IRQ(RCAN0, 237), + INTC_IRQ(RCAN0, 238), + INTC_IRQ(RCAN1, 239), INTC_IRQ(RCAN1, 240), + INTC_IRQ(RCAN1, 241), INTC_IRQ(RCAN1, 242), + INTC_IRQ(RCAN1, 243), /* SH7263-specific trash */ #ifdef CONFIG_CPU_SUBTYPE_SH7263 @@ -154,44 +135,6 @@ static struct intc_vect vectors[] __initdata = { static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), - INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI), - INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI), - INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI), - INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI), - INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI), - INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI), - INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI), - INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI), - INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D), - INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F), - INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B), - INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U), - INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B), - INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U), - INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D), - INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D), - INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, - IIC30_TEI), - INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, - IIC31_TEI), - INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, - IIC32_TEI), - INTC_GROUP(IIC33, IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI, - IIC33_TEI), - INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), - INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI), - INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), - INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI), - INTC_GROUP(SSU0, SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI), - INTC_GROUP(SSU1, SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI), - INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, - FLCTL_FLTREQ1I), - INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP), - INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, - RCAN0_SLE), - INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, - RCAN1_SLE), - #ifdef CONFIG_CPU_SUBTYPE_SH7263 INTC_GROUP(ROMDEC, ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY), @@ -242,22 +185,22 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 193, 194, 195, 192 }, + .irqs = { 192, 192, 192, 192 }, }, { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 197, 198, 199, 196 }, + .irqs = { 196, 196, 196, 196 }, }, { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 201, 202, 203, 200 }, + .irqs = { 200, 200, 200, 200 }, }, { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 205, 206, 207, 204 }, + .irqs = { 204, 204, 204, 204 }, }, { .flags = 0, } @@ -278,17 +221,7 @@ static struct resource rtc_resources[] = { .flags = IORESOURCE_IO, }, [1] = { - /* Period IRQ */ - .start = 232, - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* Carry IRQ */ - .start = 233, - .flags = IORESOURCE_IRQ, - }, - [3] = { - /* Alarm IRQ */ + /* Shared Period/Carry/Alarm IRQ */ .start = 231, .flags = IORESOURCE_IRQ, }, -- cgit v1.2.3 From e45efe68d11e9f3c836b019a32b879a26a2b9b33 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 18:02:33 +0900 Subject: sh: multiple vectors per irq - sh7263. Convert over the SH7263 IRQ groups as well. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2a/setup-sh7203.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index 18d127ca0e6..820dfb2e865 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c @@ -34,15 +34,11 @@ enum { SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII, /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */ - ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF, - ROMDEC_IREADY, - - FLCTL, SDHI3, SDHI0, SDHI1, RTC, RCAN0, RCAN1, - - SRC_OVF, SRC_ODFI, SRC_IDEI, IEBI, + ROMDEC, FLCTL, SDHI, RTC, RCAN0, RCAN1, + SRC, IEBI, /* interrupt groups */ - PINT, ROMDEC, SDHI, SRC + PINT, }; static struct intc_vect vectors[] __initdata = { @@ -119,14 +115,15 @@ static struct intc_vect vectors[] __initdata = { /* SH7263-specific trash */ #ifdef CONFIG_CPU_SUBTYPE_SH7263 - INTC_IRQ(ROMDEC_ISY, 218), INTC_IRQ(ROMDEC_IERR, 219), - INTC_IRQ(ROMDEC_IARG, 220), INTC_IRQ(ROMDEC_ISEC, 221), - INTC_IRQ(ROMDEC_IBUF, 222), INTC_IRQ(ROMDEC_IREADY, 223), + INTC_IRQ(ROMDEC, 218), INTC_IRQ(ROMDEC, 219), + INTC_IRQ(ROMDEC, 220), INTC_IRQ(ROMDEC, 221), + INTC_IRQ(ROMDEC, 222), INTC_IRQ(ROMDEC, 223), - INTC_IRQ(SDHI3, 228), INTC_IRQ(SDHI0, 229), INTC_IRQ(SDHI1, 230), + INTC_IRQ(SDHI, 228), INTC_IRQ(SDHI, 229), + INTC_IRQ(SDHI, 230), - INTC_IRQ(SRC_OVF, 244), INTC_IRQ(SRC_ODFI, 245), - INTC_IRQ(SRC_IDEI, 246), + INTC_IRQ(SRC, 244), INTC_IRQ(SRC, 245), + INTC_IRQ(SRC, 246), INTC_IRQ(IEBI, 247), #endif @@ -135,12 +132,6 @@ static struct intc_vect vectors[] __initdata = { static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), -#ifdef CONFIG_CPU_SUBTYPE_SH7263 - INTC_GROUP(ROMDEC, ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, - ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY), - INTC_GROUP(SDHI, SDHI3, SDHI0, SDHI1), - INTC_GROUP(SRC, SRC_OVF, SRC_ODFI, SRC_IDEI), -#endif }; static struct intc_prio_reg prio_registers[] __initdata = { -- cgit v1.2.3 From d55eedd57d05dbb00e4b66d1c01ea6fac0274c38 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 18:21:38 +0900 Subject: sh: multiple vectors per irq - sh7201. Follow the conversions as per the other subtypes. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2a/setup-sh7201.c | 224 ++++++++++++--------------------- 1 file changed, 80 insertions(+), 144 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c index 0631e421c02..00f42f9e3f5 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c @@ -2,6 +2,7 @@ * SH7201 setup * * Copyright (C) 2008 Peter Griffin pgriffin@mpc-data.co.uk + * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -18,57 +19,32 @@ enum { /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, + ADC_ADI, - MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D, - MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F, - MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U, - MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U, - MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V, - MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V, - MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W, - RTC_ARM, RTC_PRD, RTC_CUP, - WDT, - IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI, - IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI, - IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI, + + MTU20_ABCD, MTU20_VEF, MTU21_AB, MTU21_VU, MTU22_AB, MTU22_VU, + MTU23_ABCD, MTU24_ABCD, MTU25_UVW, MTU2_TCI3V, MTU2_TCI4V, + + RTC, WDT, + + IIC30, IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1, DMAC2_DMINT2, DMAC3_DMINT3, - SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, - SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI, - SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI, - SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI, - SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI, - SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI, - SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI, - SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI, + SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4, DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, - RCAN0_ERS, RCAN0_OVR, - RCAN0_SLE, - RCAN0_RM0, RCAN0_RM1, - - RCAN1_ERS, RCAN1_OVR, - RCAN1_SLE, - RCAN1_RM0, RCAN1_RM1, + RCAN0, RCAN1, SSI0_SSII, SSI1_SSII, - TMR0_CMIA0, TMR0_CMIB0, TMR0_OVI0, - TMR1_CMIA1, TMR1_CMIB1, TMR1_OVI1, + TMR0, TMR1, /* interrupt groups */ - - IRQ, PINT, ADC, - MTU20_ABCD, MTU20_VEF, MTU21_AB, MTU21_VU, MTU22_AB, MTU22_VU, - MTU23_ABCD, MTU24_ABCD, MTU25_UVW, - RTC, IIC30, IIC31, IIC32, - SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7, - RCAN0, RCAN1, TMR0, TMR1 - + PINT, }; static struct intc_vect vectors[] __initdata = { @@ -76,6 +52,7 @@ static struct intc_vect vectors[] __initdata = { INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), + INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), @@ -83,123 +60,92 @@ static struct intc_vect vectors[] __initdata = { INTC_IRQ(ADC_ADI, 92), - INTC_IRQ(MTU2_TGI0A, 108), INTC_IRQ(MTU2_TGI0B, 109), - INTC_IRQ(MTU2_TGI0C, 110), INTC_IRQ(MTU2_TGI0D, 111), - INTC_IRQ(MTU2_TCI0V, 112), - INTC_IRQ(MTU2_TGI0E, 113), INTC_IRQ(MTU2_TGI0F, 114), + INTC_IRQ(MTU20_ABCD, 108), INTC_IRQ(MTU20_ABCD, 109), + INTC_IRQ(MTU20_ABCD, 110), INTC_IRQ(MTU20_ABCD, 111), + + INTC_IRQ(MTU20_VEF, 112), INTC_IRQ(MTU20_VEF, 113), + INTC_IRQ(MTU20_VEF, 114), + + INTC_IRQ(MTU21_AB, 116), INTC_IRQ(MTU21_AB, 117), + INTC_IRQ(MTU21_VU, 120), INTC_IRQ(MTU21_VU, 121), - INTC_IRQ(MTU2_TGI1A, 116), INTC_IRQ(MTU2_TGI1B, 117), - INTC_IRQ(MTU2_TCI1V, 120), INTC_IRQ(MTU2_TCI1U, 121), + INTC_IRQ(MTU22_AB, 124), INTC_IRQ(MTU22_AB, 125), + INTC_IRQ(MTU22_VU, 128), INTC_IRQ(MTU22_VU, 129), - INTC_IRQ(MTU2_TGI2A, 124), INTC_IRQ(MTU2_TGI2B, 125), - INTC_IRQ(MTU2_TCI2V, 128), INTC_IRQ(MTU2_TCI2U, 129), + INTC_IRQ(MTU23_ABCD, 132), INTC_IRQ(MTU23_ABCD, 133), + INTC_IRQ(MTU23_ABCD, 134), INTC_IRQ(MTU23_ABCD, 135), - INTC_IRQ(MTU2_TGI3A, 132), INTC_IRQ(MTU2_TGI3B, 133), - INTC_IRQ(MTU2_TGI3C, 134), INTC_IRQ(MTU2_TGI3D, 135), INTC_IRQ(MTU2_TCI3V, 136), - INTC_IRQ(MTU2_TGI4A, 140), INTC_IRQ(MTU2_TGI4B, 141), - INTC_IRQ(MTU2_TGI4C, 142), INTC_IRQ(MTU2_TGI4D, 143), + INTC_IRQ(MTU24_ABCD, 140), INTC_IRQ(MTU24_ABCD, 141), + INTC_IRQ(MTU24_ABCD, 142), INTC_IRQ(MTU24_ABCD, 143), + INTC_IRQ(MTU2_TCI4V, 144), - INTC_IRQ(MTU2_TGI5U, 148), INTC_IRQ(MTU2_TGI5V, 149), - INTC_IRQ(MTU2_TGI5W, 150), + INTC_IRQ(MTU25_UVW, 148), INTC_IRQ(MTU25_UVW, 149), + INTC_IRQ(MTU25_UVW, 150), + + INTC_IRQ(RTC, 152), INTC_IRQ(RTC, 153), + INTC_IRQ(RTC, 154), - INTC_IRQ(RTC_ARM, 152), INTC_IRQ(RTC_PRD, 153), - INTC_IRQ(RTC_CUP, 154), INTC_IRQ(WDT, 156), + INTC_IRQ(WDT, 156), - INTC_IRQ(IIC30_STPI, 157), INTC_IRQ(IIC30_NAKI, 158), - INTC_IRQ(IIC30_RXI, 159), INTC_IRQ(IIC30_TXI, 160), - INTC_IRQ(IIC30_TEI, 161), + INTC_IRQ(IIC30, 157), INTC_IRQ(IIC30, 158), + INTC_IRQ(IIC30, 159), INTC_IRQ(IIC30, 160), + INTC_IRQ(IIC30, 161), - INTC_IRQ(IIC31_STPI, 164), INTC_IRQ(IIC31_NAKI, 165), - INTC_IRQ(IIC31_RXI, 166), INTC_IRQ(IIC31_TXI, 167), - INTC_IRQ(IIC31_TEI, 168), + INTC_IRQ(IIC31, 164), INTC_IRQ(IIC31, 165), + INTC_IRQ(IIC31, 166), INTC_IRQ(IIC31, 167), + INTC_IRQ(IIC31, 168), - INTC_IRQ(IIC32_STPI, 170), INTC_IRQ(IIC32_NAKI, 171), - INTC_IRQ(IIC32_RXI, 172), INTC_IRQ(IIC32_TXI, 173), - INTC_IRQ(IIC32_TEI, 174), + INTC_IRQ(IIC32, 170), INTC_IRQ(IIC32, 171), + INTC_IRQ(IIC32, 172), INTC_IRQ(IIC32, 173), + INTC_IRQ(IIC32, 174), INTC_IRQ(DMAC0_DMINT0, 176), INTC_IRQ(DMAC1_DMINT1, 177), INTC_IRQ(DMAC2_DMINT2, 178), INTC_IRQ(DMAC3_DMINT3, 179), - INTC_IRQ(SCIF0_BRI, 180), INTC_IRQ(SCIF0_ERI, 181), - INTC_IRQ(SCIF0_RXI, 182), INTC_IRQ(SCIF0_TXI, 183), - INTC_IRQ(SCIF1_BRI, 184), INTC_IRQ(SCIF1_ERI, 185), - INTC_IRQ(SCIF1_RXI, 186), INTC_IRQ(SCIF1_TXI, 187), - INTC_IRQ(SCIF2_BRI, 188), INTC_IRQ(SCIF2_ERI, 189), - INTC_IRQ(SCIF2_RXI, 190), INTC_IRQ(SCIF2_TXI, 191), - INTC_IRQ(SCIF3_BRI, 192), INTC_IRQ(SCIF3_ERI, 193), - INTC_IRQ(SCIF3_RXI, 194), INTC_IRQ(SCIF3_TXI, 195), - INTC_IRQ(SCIF4_BRI, 196), INTC_IRQ(SCIF4_ERI, 197), - INTC_IRQ(SCIF4_RXI, 198), INTC_IRQ(SCIF4_TXI, 199), - INTC_IRQ(SCIF5_BRI, 200), INTC_IRQ(SCIF5_ERI, 201), - INTC_IRQ(SCIF5_RXI, 202), INTC_IRQ(SCIF5_TXI, 203), - INTC_IRQ(SCIF6_BRI, 204), INTC_IRQ(SCIF6_ERI, 205), - INTC_IRQ(SCIF6_RXI, 206), INTC_IRQ(SCIF6_TXI, 207), - INTC_IRQ(SCIF7_BRI, 208), INTC_IRQ(SCIF7_ERI, 209), - INTC_IRQ(SCIF7_RXI, 210), INTC_IRQ(SCIF7_TXI, 211), + INTC_IRQ(SCIF0, 180), INTC_IRQ(SCIF0, 181), + INTC_IRQ(SCIF0, 182), INTC_IRQ(SCIF0, 183), + INTC_IRQ(SCIF1, 184), INTC_IRQ(SCIF1, 185), + INTC_IRQ(SCIF1, 186), INTC_IRQ(SCIF1, 187), + INTC_IRQ(SCIF2, 188), INTC_IRQ(SCIF2, 189), + INTC_IRQ(SCIF2, 190), INTC_IRQ(SCIF2, 191), + INTC_IRQ(SCIF3, 192), INTC_IRQ(SCIF3, 193), + INTC_IRQ(SCIF3, 194), INTC_IRQ(SCIF3, 195), + INTC_IRQ(SCIF4, 196), INTC_IRQ(SCIF4, 197), + INTC_IRQ(SCIF4, 198), INTC_IRQ(SCIF4, 199), + INTC_IRQ(SCIF5, 200), INTC_IRQ(SCIF5, 201), + INTC_IRQ(SCIF5, 202), INTC_IRQ(SCIF5, 203), + INTC_IRQ(SCIF6, 204), INTC_IRQ(SCIF6, 205), + INTC_IRQ(SCIF6, 206), INTC_IRQ(SCIF6, 207), + INTC_IRQ(SCIF7, 208), INTC_IRQ(SCIF7, 209), + INTC_IRQ(SCIF7, 210), INTC_IRQ(SCIF7, 211), INTC_IRQ(DMAC0_DMINTA, 212), INTC_IRQ(DMAC4_DMINT4, 216), INTC_IRQ(DMAC5_DMINT5, 217), INTC_IRQ(DMAC6_DMINT6, 218), INTC_IRQ(DMAC7_DMINT7, 219), - INTC_IRQ(RCAN0_ERS, 228), INTC_IRQ(RCAN0_OVR, 229), - INTC_IRQ(RCAN0_SLE, 230), - INTC_IRQ(RCAN0_RM0, 231), INTC_IRQ(RCAN0_RM1, 232), + INTC_IRQ(RCAN0, 228), INTC_IRQ(RCAN0, 229), + INTC_IRQ(RCAN0, 230), + INTC_IRQ(RCAN0, 231), INTC_IRQ(RCAN0, 232), - INTC_IRQ(RCAN1_ERS, 234), INTC_IRQ(RCAN1_OVR, 235), - INTC_IRQ(RCAN1_SLE, 236), - INTC_IRQ(RCAN1_RM0, 237), INTC_IRQ(RCAN1_RM1, 238), + INTC_IRQ(RCAN1, 234), INTC_IRQ(RCAN1, 235), + INTC_IRQ(RCAN1, 236), + INTC_IRQ(RCAN1, 237), INTC_IRQ(RCAN1, 238), INTC_IRQ(SSI0_SSII, 244), INTC_IRQ(SSI1_SSII, 245), - INTC_IRQ(TMR0_CMIA0, 246), INTC_IRQ(TMR0_CMIB0, 247), - INTC_IRQ(TMR0_OVI0, 248), - - INTC_IRQ(TMR1_CMIA1, 252), INTC_IRQ(TMR1_CMIB1, 253), - INTC_IRQ(TMR1_OVI1, 254), + INTC_IRQ(TMR0, 246), INTC_IRQ(TMR0, 247), + INTC_IRQ(TMR0, 248), + INTC_IRQ(TMR1, 252), INTC_IRQ(TMR1, 253), + INTC_IRQ(TMR1, 254), }; static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), - INTC_GROUP(MTU20_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D), - INTC_GROUP(MTU20_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F), - - INTC_GROUP(MTU21_AB, MTU2_TGI1A, MTU2_TGI1B), - INTC_GROUP(MTU21_VU, MTU2_TCI1V, MTU2_TCI1U), - INTC_GROUP(MTU22_AB, MTU2_TGI2A, MTU2_TGI2B), - INTC_GROUP(MTU22_VU, MTU2_TCI2V, MTU2_TCI2U), - INTC_GROUP(MTU23_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D), - INTC_GROUP(MTU24_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D), - INTC_GROUP(MTU25_UVW, MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W), - INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP ), - - INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, - IIC30_TEI), - INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, - IIC31_TEI), - INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, - IIC32_TEI), - - INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), - INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI), - INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), - INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI), - INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI), - INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI), - INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI), - INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI), - - INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, - RCAN0_SLE), - INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, - RCAN1_SLE), - - INTC_GROUP(TMR0, TMR0_CMIA0, TMR0_CMIB0, TMR0_OVI0), - INTC_GROUP(TMR1, TMR1_CMIA1, TMR1_CMIB1, TMR1_OVI1), }; static struct intc_prio_reg prio_registers[] __initdata = { @@ -212,7 +158,7 @@ static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffe9806, 0, 16, 4, /* IPR09 */ { RTC, WDT, IIC30, 0 } }, { 0xfffe9808, 0, 16, 4, /* IPR10 */ { IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1 } }, - { 0xfffe980a, 0, 16, 4, /* IPR11 */ { DMAC2_DMINT2, DMAC3_DMINT3, SCIF0 , SCIF1 } }, + { 0xfffe980a, 0, 16, 4, /* IPR11 */ { DMAC2_DMINT2, DMAC3_DMINT3, SCIF0, SCIF1 } }, { 0xfffe980c, 0, 16, 4, /* IPR12 */ { SCIF2, SCIF3, SCIF4, SCIF5 } }, { 0xfffe980e, 0, 16, 4, /* IPR13 */ { SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4 } }, { 0xfffe9810, 0, 16, 4, /* IPR14 */ { DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, 0 } }, @@ -234,42 +180,42 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 181, 182, 183, 180} + .irqs = { 180, 180, 180, 180 } }, { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 185, 186, 187, 184} + .irqs = { 184, 184, 184, 184 } }, { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 189, 186, 187, 188} + .irqs = { 188, 188, 188, 188 } }, { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 193, 194, 195, 192} + .irqs = { 192, 192, 192, 192 } }, { .mapbase = 0xfffea000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 196, 198, 199, 196} + .irqs = { 196, 196, 196, 196 } }, { .mapbase = 0xfffea800, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 201, 202, 203, 200} + .irqs = { 200, 200, 200, 200 } }, { .mapbase = 0xfffeb000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 205, 206, 207, 204} + .irqs = { 204, 204, 204, 204 } }, { .mapbase = 0xfffeb800, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 209, 210, 211, 208} + .irqs = { 208, 208, 208, 208 } }, { .flags = 0, } @@ -290,17 +236,7 @@ static struct resource rtc_resources[] = { .flags = IORESOURCE_IO, }, [1] = { - /* Period IRQ */ - .start = 153, - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* Carry IRQ */ - .start = 154, - .flags = IORESOURCE_IRQ, - }, - [3] = { - /* Alarm IRQ */ + /* Shared Period/Carry/Alarm IRQ */ .start = 152, .flags = IORESOURCE_IRQ, }, -- cgit v1.2.3 From f858abbecd0a3ec0ab3e3678612d626a0bd49686 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 18:34:15 +0900 Subject: sh: multiple vectors per irq - sh7206. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2a/setup-sh7206.c | 154 +++++++++++++-------------------- 1 file changed, 58 insertions(+), 96 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c index e6d4ec445dd..c46a8355726 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c @@ -2,6 +2,7 @@ * SH7206 Setup * * Copyright (C) 2006 Yoshinori Sato + * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -19,34 +20,23 @@ enum { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, ADC_ADI0, ADC_ADI1, - DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI, - DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI, - DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI, - DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI, + + DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, + + MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, + MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S, + IIC3, + CMT0, CMT1, BSC, WDT, - MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D, - MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F, - MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U, - MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U, - MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V, - MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V, - MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W, - POE2_OEI1, POE2_OEI2, - MTU2S_TGI3A, MTU2S_TGI3B, MTU2S_TGI3C, MTU2S_TGI3D, MTU2S_TCI3V, - MTU2S_TGI4A, MTU2S_TGI4B, MTU2S_TGI4C, MTU2S_TGI4D, MTU2S_TCI4V, - MTU2S_TGI5U, MTU2S_TGI5V, MTU2S_TGI5W, + + MTU2_TCI3V, MTU2_TCI4V, MTU2S_TCI3V, MTU2S_TCI4V, + POE2_OEI3, - IIC3_STPI, IIC3_NAKI, IIC3_RXI, IIC3_TXI, IIC3_TEI, - SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, - SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI, - SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI, - SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI, + + SCIF0, SCIF1, SCIF2, SCIF3, /* interrupt groups */ - PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, - MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, - MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S, - IIC3, SCIF0, SCIF1, SCIF2, SCIF3, + PINT, }; static struct intc_vect vectors[] __initdata = { @@ -59,86 +49,58 @@ static struct intc_vect vectors[] __initdata = { INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), INTC_IRQ(ADC_ADI0, 92), INTC_IRQ(ADC_ADI1, 96), - INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109), - INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113), - INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117), - INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121), - INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125), - INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129), - INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133), - INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137), + INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109), + INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113), + INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117), + INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121), + INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125), + INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129), + INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133), + INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137), INTC_IRQ(CMT0, 140), INTC_IRQ(CMT1, 144), INTC_IRQ(BSC, 148), INTC_IRQ(WDT, 152), - INTC_IRQ(MTU2_TGI0A, 156), INTC_IRQ(MTU2_TGI0B, 157), - INTC_IRQ(MTU2_TGI0C, 158), INTC_IRQ(MTU2_TGI0D, 159), - INTC_IRQ(MTU2_TCI0V, 160), - INTC_IRQ(MTU2_TGI0E, 161), INTC_IRQ(MTU2_TGI0F, 162), - INTC_IRQ(MTU2_TGI1A, 164), INTC_IRQ(MTU2_TGI1B, 165), - INTC_IRQ(MTU2_TCI1V, 168), INTC_IRQ(MTU2_TCI1U, 169), - INTC_IRQ(MTU2_TGI2A, 172), INTC_IRQ(MTU2_TGI2B, 173), - INTC_IRQ(MTU2_TCI2V, 176), INTC_IRQ(MTU2_TCI2U, 177), - INTC_IRQ(MTU2_TGI3A, 180), INTC_IRQ(MTU2_TGI3B, 181), - INTC_IRQ(MTU2_TGI3C, 182), INTC_IRQ(MTU2_TGI3D, 183), + INTC_IRQ(MTU0_ABCD, 156), INTC_IRQ(MTU0_ABCD, 157), + INTC_IRQ(MTU0_ABCD, 158), INTC_IRQ(MTU0_ABCD, 159), + INTC_IRQ(MTU0_VEF, 160), INTC_IRQ(MTU0_VEF, 161), + INTC_IRQ(MTU0_VEF, 162), + INTC_IRQ(MTU1_AB, 164), INTC_IRQ(MTU1_AB, 165), + INTC_IRQ(MTU1_VU, 168), INTC_IRQ(MTU1_VU, 169), + INTC_IRQ(MTU2_AB, 172), INTC_IRQ(MTU2_AB, 173), + INTC_IRQ(MTU2_VU, 176), INTC_IRQ(MTU2_VU, 177), + INTC_IRQ(MTU3_ABCD, 180), INTC_IRQ(MTU3_ABCD, 181), + INTC_IRQ(MTU3_ABCD, 182), INTC_IRQ(MTU3_ABCD, 183), INTC_IRQ(MTU2_TCI3V, 184), - INTC_IRQ(MTU2_TGI4A, 188), INTC_IRQ(MTU2_TGI4B, 189), - INTC_IRQ(MTU2_TGI4C, 190), INTC_IRQ(MTU2_TGI4D, 191), + INTC_IRQ(MTU4_ABCD, 188), INTC_IRQ(MTU4_ABCD, 189), + INTC_IRQ(MTU4_ABCD, 190), INTC_IRQ(MTU4_ABCD, 191), INTC_IRQ(MTU2_TCI4V, 192), - INTC_IRQ(MTU2_TGI5U, 196), INTC_IRQ(MTU2_TGI5V, 197), - INTC_IRQ(MTU2_TGI5W, 198), - INTC_IRQ(POE2_OEI1, 200), INTC_IRQ(POE2_OEI2, 201), - INTC_IRQ(MTU2S_TGI3A, 204), INTC_IRQ(MTU2S_TGI3B, 205), - INTC_IRQ(MTU2S_TGI3C, 206), INTC_IRQ(MTU2S_TGI3D, 207), + INTC_IRQ(MTU5, 196), INTC_IRQ(MTU5, 197), + INTC_IRQ(MTU5, 198), + INTC_IRQ(POE2_12, 200), INTC_IRQ(POE2_12, 201), + INTC_IRQ(MTU3S_ABCD, 204), INTC_IRQ(MTU3S_ABCD, 205), + INTC_IRQ(MTU3S_ABCD, 206), INTC_IRQ(MTU3S_ABCD, 207), INTC_IRQ(MTU2S_TCI3V, 208), - INTC_IRQ(MTU2S_TGI4A, 212), INTC_IRQ(MTU2S_TGI4B, 213), - INTC_IRQ(MTU2S_TGI4C, 214), INTC_IRQ(MTU2S_TGI4D, 215), + INTC_IRQ(MTU4S_ABCD, 212), INTC_IRQ(MTU4S_ABCD, 213), + INTC_IRQ(MTU4S_ABCD, 214), INTC_IRQ(MTU4S_ABCD, 215), INTC_IRQ(MTU2S_TCI4V, 216), - INTC_IRQ(MTU2S_TGI5U, 220), INTC_IRQ(MTU2S_TGI5V, 221), - INTC_IRQ(MTU2S_TGI5W, 222), + INTC_IRQ(MTU5S, 220), INTC_IRQ(MTU5S, 221), + INTC_IRQ(MTU5S, 222), INTC_IRQ(POE2_OEI3, 224), - INTC_IRQ(IIC3_STPI, 228), INTC_IRQ(IIC3_NAKI, 229), - INTC_IRQ(IIC3_RXI, 230), INTC_IRQ(IIC3_TXI, 231), - INTC_IRQ(IIC3_TEI, 232), - INTC_IRQ(SCIF0_BRI, 240), INTC_IRQ(SCIF0_ERI, 241), - INTC_IRQ(SCIF0_RXI, 242), INTC_IRQ(SCIF0_TXI, 243), - INTC_IRQ(SCIF1_BRI, 244), INTC_IRQ(SCIF1_ERI, 245), - INTC_IRQ(SCIF1_RXI, 246), INTC_IRQ(SCIF1_TXI, 247), - INTC_IRQ(SCIF2_BRI, 248), INTC_IRQ(SCIF2_ERI, 249), - INTC_IRQ(SCIF2_RXI, 250), INTC_IRQ(SCIF2_TXI, 251), - INTC_IRQ(SCIF3_BRI, 252), INTC_IRQ(SCIF3_ERI, 253), - INTC_IRQ(SCIF3_RXI, 254), INTC_IRQ(SCIF3_TXI, 255), + INTC_IRQ(IIC3, 228), INTC_IRQ(IIC3, 229), + INTC_IRQ(IIC3, 230), INTC_IRQ(IIC3, 231), + INTC_IRQ(IIC3, 232), + INTC_IRQ(SCIF0, 240), INTC_IRQ(SCIF0, 241), + INTC_IRQ(SCIF0, 242), INTC_IRQ(SCIF0, 243), + INTC_IRQ(SCIF1, 244), INTC_IRQ(SCIF1, 245), + INTC_IRQ(SCIF1, 246), INTC_IRQ(SCIF1, 247), + INTC_IRQ(SCIF2, 248), INTC_IRQ(SCIF2, 249), + INTC_IRQ(SCIF2, 250), INTC_IRQ(SCIF2, 251), + INTC_IRQ(SCIF3, 252), INTC_IRQ(SCIF3, 253), + INTC_IRQ(SCIF3, 254), INTC_IRQ(SCIF3, 255), }; static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), - INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI), - INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI), - INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI), - INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI), - INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI), - INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI), - INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI), - INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI), - INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D), - INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F), - INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B), - INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U), - INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B), - INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U), - INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D), - INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D), - INTC_GROUP(MTU5, MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W), - INTC_GROUP(POE2_12, POE2_OEI1, POE2_OEI2), - INTC_GROUP(MTU3S_ABCD, MTU2S_TGI3A, MTU2S_TGI3B, - MTU2S_TGI3C, MTU2S_TGI3D), - INTC_GROUP(MTU4S_ABCD, MTU2S_TGI4A, MTU2S_TGI4B, - MTU2S_TGI4C, MTU2S_TGI4D), - INTC_GROUP(MTU5S, MTU2S_TGI5U, MTU2S_TGI5V, MTU2S_TGI5W), - INTC_GROUP(IIC3, IIC3_STPI, IIC3_NAKI, IIC3_RXI, IIC3_TXI, IIC3_TEI), - INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), - INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI), - INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), - INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI), }; static struct intc_prio_reg prio_registers[] __initdata = { @@ -174,22 +136,22 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 241, 242, 243, 240 }, + .irqs = { 240, 240, 240, 240 }, }, { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 245, 246, 247, 244 }, + .irqs = { 244, 244, 244, 244 }, }, { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 249, 250, 251, 248 }, + .irqs = { 248, 248, 248, 248 }, }, { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 253, 254, 255, 252 }, + .irqs = { 252, 252, 252, 252 }, }, { .flags = 0, } -- cgit v1.2.3 From 5dece2bbda728fbf6a70e48be782c24c427072b0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 19:19:31 +0900 Subject: sh: multiple vectors per irq - sh7619. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2/setup-sh7619.c | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index 56e5878e551..0e32d8e448c 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -2,6 +2,7 @@ * SH7619 Setup * * Copyright (C) 2006 Yoshinori Sato + * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -18,15 +19,10 @@ enum { /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, WDT, EDMAC, CMT0, CMT1, - SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, - SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, - SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, + SCIF0, SCIF1, SCIF2, HIF_HIFI, HIF_HIFBI, DMAC0, DMAC1, DMAC2, DMAC3, SIOF, - - /* interrupt groups */ - SCIF0, SCIF1, SCIF2, }; static struct intc_vect vectors[] __initdata = { @@ -36,24 +32,18 @@ static struct intc_vect vectors[] __initdata = { INTC_IRQ(IRQ6, 82), INTC_IRQ(IRQ7, 83), INTC_IRQ(WDT, 84), INTC_IRQ(EDMAC, 85), INTC_IRQ(CMT0, 86), INTC_IRQ(CMT1, 87), - INTC_IRQ(SCIF0_ERI, 88), INTC_IRQ(SCIF0_RXI, 89), - INTC_IRQ(SCIF0_BRI, 90), INTC_IRQ(SCIF0_TXI, 91), - INTC_IRQ(SCIF1_ERI, 92), INTC_IRQ(SCIF1_RXI, 93), - INTC_IRQ(SCIF1_BRI, 94), INTC_IRQ(SCIF1_TXI, 95), - INTC_IRQ(SCIF2_ERI, 96), INTC_IRQ(SCIF2_RXI, 97), - INTC_IRQ(SCIF2_BRI, 98), INTC_IRQ(SCIF2_TXI, 99), + INTC_IRQ(SCIF0, 88), INTC_IRQ(SCIF0, 89), + INTC_IRQ(SCIF0, 90), INTC_IRQ(SCIF0, 91), + INTC_IRQ(SCIF1, 92), INTC_IRQ(SCIF1, 93), + INTC_IRQ(SCIF1, 94), INTC_IRQ(SCIF1, 95), + INTC_IRQ(SCIF2, 96), INTC_IRQ(SCIF2, 97), + INTC_IRQ(SCIF2, 98), INTC_IRQ(SCIF2, 99), INTC_IRQ(HIF_HIFI, 100), INTC_IRQ(HIF_HIFBI, 101), INTC_IRQ(DMAC0, 104), INTC_IRQ(DMAC1, 105), INTC_IRQ(DMAC2, 106), INTC_IRQ(DMAC3, 107), INTC_IRQ(SIOF, 108), }; -static struct intc_group groups[] __initdata = { - INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), - INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), - INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), -}; - static struct intc_prio_reg prio_registers[] __initdata = { { 0xf8140006, 0, 16, 4, /* IPRA */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, { 0xf8140008, 0, 16, 4, /* IPRB */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, @@ -64,7 +54,7 @@ static struct intc_prio_reg prio_registers[] __initdata = { { 0xf8080008, 0, 16, 4, /* IPRG */ { SIOF } }, }; -static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, groups, +static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL, NULL, prio_registers, NULL); static struct plat_sci_port sci_platform_data[] = { @@ -72,17 +62,17 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xf8400000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 88, 89, 91, 90}, + .irqs = { 88, 88, 88, 88 }, }, { .mapbase = 0xf8410000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 92, 93, 95, 94}, + .irqs = { 92, 92, 92, 92 }, }, { .mapbase = 0xf8420000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 96, 97, 99, 98}, + .irqs = { 96, 96, 96, 96 }, }, { .flags = 0, } -- cgit v1.2.3 From 053bfc5360c106528bbb57464ad2cbf4a2af8133 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 19:19:54 +0900 Subject: sh: multiple vectors per irq - mxg. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2a/setup-mxg.c | 65 +++++++++++++------------------------ 1 file changed, 23 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c index e611d79fac4..844293723cf 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c +++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c @@ -1,7 +1,7 @@ /* * Renesas MX-G (R8A03022BG) Setup * - * Copyright (C) 2008 Paul Mundt + * Copyright (C) 2008, 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -20,23 +20,15 @@ enum { IRQ8, IRQ9, IRQ10, IRQ11, IRQ12, IRQ13, IRQ14, IRQ15, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, - SINT8, SINT7, SINT6, SINT5, SINT4, SINT3, SINT2, SINT1, - SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, - SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI, + SCIF0, SCIF1, - MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D, - MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F, - MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U, - MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U, - MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V, - MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V, - MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W, + MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5 + MTU2_TGI3B, MTU2_TGI3C, /* interrupt groups */ - PINT, SCIF0, SCIF1, - MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5 + PINT, }; static struct intc_vect vectors[] __initdata = { @@ -59,47 +51,36 @@ static struct intc_vect vectors[] __initdata = { INTC_IRQ(SINT4, 98), INTC_IRQ(SINT3, 99), INTC_IRQ(SINT2, 100), INTC_IRQ(SINT1, 101), - INTC_IRQ(SCIF0_RXI, 220), INTC_IRQ(SCIF0_TXI, 221), - INTC_IRQ(SCIF0_BRI, 222), INTC_IRQ(SCIF0_ERI, 223), - INTC_IRQ(SCIF1_RXI, 224), INTC_IRQ(SCIF1_TXI, 225), - INTC_IRQ(SCIF1_BRI, 226), INTC_IRQ(SCIF1_ERI, 227), + INTC_IRQ(SCIF0, 220), INTC_IRQ(SCIF0, 221), + INTC_IRQ(SCIF0, 222), INTC_IRQ(SCIF0, 223), + INTC_IRQ(SCIF1, 224), INTC_IRQ(SCIF1, 225), + INTC_IRQ(SCIF1, 226), INTC_IRQ(SCIF1, 227), - INTC_IRQ(MTU2_TGI0A, 228), INTC_IRQ(MTU2_TGI0B, 229), - INTC_IRQ(MTU2_TGI0C, 230), INTC_IRQ(MTU2_TGI0D, 231), - INTC_IRQ(MTU2_TCI0V, 232), INTC_IRQ(MTU2_TGI0E, 233), + INTC_IRQ(MTU2_GROUP1, 228), INTC_IRQ(MTU2_GROUP1, 229), + INTC_IRQ(MTU2_GROUP1, 230), INTC_IRQ(MTU2_GROUP1, 231), + INTC_IRQ(MTU2_GROUP1, 232), INTC_IRQ(MTU2_GROUP1, 233), - INTC_IRQ(MTU2_TGI0F, 234), INTC_IRQ(MTU2_TGI1A, 235), - INTC_IRQ(MTU2_TGI1B, 236), INTC_IRQ(MTU2_TCI1V, 237), - INTC_IRQ(MTU2_TCI1U, 238), INTC_IRQ(MTU2_TGI2A, 239), + INTC_IRQ(MTU2_GROUP2, 234), INTC_IRQ(MTU2_GROUP2, 235), + INTC_IRQ(MTU2_GROUP2, 236), INTC_IRQ(MTU2_GROUP2, 237), + INTC_IRQ(MTU2_GROUP2, 238), INTC_IRQ(MTU2_GROUP2, 239), - INTC_IRQ(MTU2_TGI2B, 240), INTC_IRQ(MTU2_TCI2V, 241), - INTC_IRQ(MTU2_TCI2U, 242), INTC_IRQ(MTU2_TGI3A, 243), + INTC_IRQ(MTU2_GROUP3, 240), INTC_IRQ(MTU2_GROUP3, 241), + INTC_IRQ(MTU2_GROUP3, 242), INTC_IRQ(MTU2_GROUP3, 243), INTC_IRQ(MTU2_TGI3B, 244), INTC_IRQ(MTU2_TGI3C, 245), - INTC_IRQ(MTU2_TGI3D, 246), INTC_IRQ(MTU2_TCI3V, 247), - INTC_IRQ(MTU2_TGI4A, 248), INTC_IRQ(MTU2_TGI4B, 249), - INTC_IRQ(MTU2_TGI4C, 250), INTC_IRQ(MTU2_TGI4D, 251), + INTC_IRQ(MTU2_GROUP4, 246), INTC_IRQ(MTU2_GROUP4, 247), + INTC_IRQ(MTU2_GROUP4, 248), INTC_IRQ(MTU2_GROUP4, 249), + INTC_IRQ(MTU2_GROUP4, 250), INTC_IRQ(MTU2_GROUP4, 251), - INTC_IRQ(MTU2_TCI4V, 252), INTC_IRQ(MTU2_TGI5U, 253), - INTC_IRQ(MTU2_TGI5V, 254), INTC_IRQ(MTU2_TGI5W, 255), + INTC_IRQ(MTU2_GROUP5, 252), INTC_IRQ(MTU2_GROUP5, 253), + INTC_IRQ(MTU2_GROUP5, 254), INTC_IRQ(MTU2_GROUP5, 255), }; static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), - INTC_GROUP(MTU2_GROUP1, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D, - MTU2_TCI0V, MTU2_TGI0E), - INTC_GROUP(MTU2_GROUP2, MTU2_TGI0F, MTU2_TGI1A, MTU2_TGI1B, - MTU2_TCI1V, MTU2_TCI1U, MTU2_TGI2A), - INTC_GROUP(MTU2_GROUP3, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U, - MTU2_TGI3A), - INTC_GROUP(MTU2_GROUP4, MTU2_TGI3D, MTU2_TCI3V, MTU2_TGI4A, - MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D), - INTC_GROUP(MTU2_GROUP5, MTU2_TCI4V, MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W), - INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), - INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI), }; static struct intc_prio_reg prio_registers[] __initdata = { @@ -137,7 +118,7 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xff804000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 223, 220, 221, 222 }, + .irqs = { 220, 220, 220, 220 }, }, { .flags = 0, } -- cgit v1.2.3 From 592acbda89f40a93f7d71564cdf1da83229d226e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 19:20:14 +0900 Subject: sh: multiple vectors per irq - sh770x. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/setup-sh770x.c | 68 +++++++++++------------------------ 1 file changed, 21 insertions(+), 47 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 93c55e2ed95..a74f960b5e7 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -2,6 +2,7 @@ * SH3 Setup code for SH7706, SH7707, SH7708, SH7709 * * Copyright (C) 2007 Magnus Damm + * Copyright (C) 2009 Paul Mundt * * Based on setup-sh7709.c * @@ -24,46 +25,37 @@ enum { /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, PINT07, PINT815, - DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3, - SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, - SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, - SCI_ERI, SCI_RXI, SCI_TXI, SCI_TEI, - ADC_ADI, + DMAC, SCIF0, SCIF2, SCI, ADC_ADI, LCDC, PCC0, PCC1, - TMU0, TMU1, TMU2_TUNI, TMU2_TICPI, - RTC_ATI, RTC_PRI, RTC_CUI, - WDT, - REF_RCMI, REF_ROVI, - - /* interrupt groups */ - RTC, REF, TMU2, DMAC, SCI, SCIF2, SCIF0, + TMU0, TMU1, TMU2, + RTC, WDT, REF, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), - INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), - INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), - INTC_VECT(RTC_CUI, 0x4c0), - INTC_VECT(SCI_ERI, 0x4e0), INTC_VECT(SCI_RXI, 0x500), - INTC_VECT(SCI_TXI, 0x520), INTC_VECT(SCI_TEI, 0x540), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(SCI, 0x4e0), INTC_VECT(SCI, 0x500), + INTC_VECT(SCI, 0x520), INTC_VECT(SCI, 0x540), INTC_VECT(WDT, 0x560), - INTC_VECT(REF_RCMI, 0x580), - INTC_VECT(REF_ROVI, 0x5a0), + INTC_VECT(REF, 0x580), + INTC_VECT(REF, 0x5a0), #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ defined(CONFIG_CPU_SUBTYPE_SH7707) || \ defined(CONFIG_CPU_SUBTYPE_SH7709) /* IRQ0->5 are handled in setup-sh3.c */ - INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820), - INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860), + INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820), + INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860), INTC_VECT(ADC_ADI, 0x980), - INTC_VECT(SCIF2_ERI, 0x900), INTC_VECT(SCIF2_RXI, 0x920), - INTC_VECT(SCIF2_BRI, 0x940), INTC_VECT(SCIF2_TXI, 0x960), + INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920), + INTC_VECT(SCIF2, 0x940), INTC_VECT(SCIF2, 0x960), #endif #if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ defined(CONFIG_CPU_SUBTYPE_SH7709) INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720), - INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0), - INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0), + INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0), + INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0), #endif #if defined(CONFIG_CPU_SUBTYPE_SH7707) INTC_VECT(LCDC, 0x9a0), @@ -71,16 +63,6 @@ static struct intc_vect vectors[] __initdata = { #endif }; -static struct intc_group groups[] __initdata = { - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), - INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI), - INTC_GROUP(REF, REF_RCMI, REF_ROVI), - INTC_GROUP(DMAC, DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3), - INTC_GROUP(SCI, SCI_ERI, SCI_RXI, SCI_TXI, SCI_TEI), - INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), - INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), -}; - static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } }, @@ -101,7 +83,7 @@ static struct intc_prio_reg prio_registers[] __initdata = { #endif }; -static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups, +static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, NULL, NULL, prio_registers, NULL); static struct resource rtc_resources[] = { @@ -111,14 +93,6 @@ static struct resource rtc_resources[] = { .flags = IORESOURCE_IO, }, [1] = { - .start = 21, - .flags = IORESOURCE_IRQ, - }, - [2] = { - .start = 22, - .flags = IORESOURCE_IRQ, - }, - [3] = { .start = 20, .flags = IORESOURCE_IRQ, }, @@ -136,7 +110,7 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xfffffe80, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCI, - .irqs = { 23, 24, 25, 0 }, + .irqs = { 23, 23, 23, 0 }, }, #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ defined(CONFIG_CPU_SUBTYPE_SH7707) || \ @@ -145,7 +119,7 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xa4000150, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 56, 57, 59, 58 }, + .irqs = { 56, 56, 56, 56 }, }, #endif #if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ @@ -154,7 +128,7 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xa4000140, .flags = UPF_BOOT_AUTOCONF, .type = PORT_IRDA, - .irqs = { 52, 53, 55, 54 }, + .irqs = { 52, 52, 52, 52 }, }, #endif { -- cgit v1.2.3 From 0caedb02c4a02b6fd0f1a62dfeb917353d3c6162 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 19:20:32 +0900 Subject: sh: multiple vectors per irq - sh7705. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/setup-sh7705.c | 61 +++++++++++------------------------ 1 file changed, 19 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index 6468ae86b94..63b67badd67 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c @@ -1,7 +1,7 @@ /* * SH7705 Setup * - * Copyright (C) 2006, 2007 Paul Mundt + * Copyright (C) 2006 - 2009 Paul Mundt * Copyright (C) 2007 Nobuhiro Iwamatsu * * This file is subject to the terms and conditions of the GNU General Public @@ -21,51 +21,36 @@ enum { /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, PINT07, PINT815, - DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3, - SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, - SCIF2_ERI, SCIF2_RXI, SCIF2_TXI, - ADC_ADI, - USB_USI0, USB_USI1, + + DMAC, SCIF0, SCIF2, ADC_ADI, USB, + TPU0, TPU1, TPU2, TPU3, - TMU0, TMU1, TMU2_TUNI, TMU2_TICPI, - RTC_ATI, RTC_PRI, RTC_CUI, - WDT, - REF_RCMI, + TMU0, TMU1, TMU2, - /* interrupt groups */ - RTC, TMU2, DMAC, USB, SCIF2, SCIF0, + RTC, WDT, REF_RCMI, }; static struct intc_vect vectors[] __initdata = { /* IRQ0->5 are handled in setup-sh3.c */ INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720), - INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820), - INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860), - INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0), - INTC_VECT(SCIF0_TXI, 0x8e0), - INTC_VECT(SCIF2_ERI, 0x900), INTC_VECT(SCIF2_RXI, 0x920), - INTC_VECT(SCIF2_TXI, 0x960), + INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820), + INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860), + INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0), + INTC_VECT(SCIF0, 0x8e0), + INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920), + INTC_VECT(SCIF2, 0x960), INTC_VECT(ADC_ADI, 0x980), - INTC_VECT(USB_USI0, 0xa20), INTC_VECT(USB_USI1, 0xa40), + INTC_VECT(USB, 0xa20), INTC_VECT(USB, 0xa40), INTC_VECT(TPU0, 0xc00), INTC_VECT(TPU1, 0xc20), INTC_VECT(TPU2, 0xc80), INTC_VECT(TPU3, 0xca0), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), - INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), - INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), - INTC_VECT(RTC_CUI, 0x4c0), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), INTC_VECT(WDT, 0x560), INTC_VECT(REF_RCMI, 0x580), }; -static struct intc_group groups[] __initdata = { - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), - INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI), - INTC_GROUP(DMAC, DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3), - INTC_GROUP(USB, USB_USI0, USB_USI1), - INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), - INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), -}; - static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } }, @@ -78,7 +63,7 @@ static struct intc_prio_reg prio_registers[] __initdata = { }; -static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups, +static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL, NULL, prio_registers, NULL); static struct plat_sci_port sci_platform_data[] = { @@ -86,12 +71,12 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xa4410000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 56, 57, 59 }, + .irqs = { 56, 56, 56 }, }, { .mapbase = 0xa4400000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 52, 53, 55 }, + .irqs = { 52, 52, 52 }, }, { .flags = 0, } @@ -115,14 +100,6 @@ static struct resource rtc_resources[] = { .start = 20, .flags = IORESOURCE_IRQ, }, - [2] = { - .start = 21, - .flags = IORESOURCE_IRQ, - }, - [3] = { - .start = 22, - .flags = IORESOURCE_IRQ, - }, }; static struct sh_rtc_platform_info rtc_info = { -- cgit v1.2.3 From 56d604defa91684509ac842317ea14501f224299 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 19:20:48 +0900 Subject: sh: multiple vectors per irq - sh7710. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/setup-sh7710.c | 69 +++++++++++------------------------ 1 file changed, 21 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c index 77eee481de4..335098b66e2 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c @@ -1,7 +1,7 @@ /* * SH3 Setup code for SH7710, SH7712 * - * Copyright (C) 2006, 2007 Paul Mundt + * Copyright (C) 2006 - 2009 Paul Mundt * Copyright (C) 2007 Nobuhiro Iwamatsu * * This file is subject to the terms and conditions of the GNU General Public @@ -20,59 +20,40 @@ enum { /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, - DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3, - SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, - SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, - DMAC_DEI4, DMAC_DEI5, - IPSEC, + DMAC1, SCIF0, SCIF1, DMAC2, IPSEC, EDMAC0, EDMAC1, EDMAC2, - SIOF0_ERI, SIOF0_TXI, SIOF0_RXI, SIOF0_CCI, - SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI, - TMU0, TMU1, TMU2, - RTC_ATI, RTC_PRI, RTC_CUI, - WDT, - REF, + SIOF0, SIOF1, - /* interrupt groups */ - RTC, DMAC1, SCIF0, SCIF1, DMAC2, SIOF0, SIOF1, + TMU0, TMU1, TMU2, + RTC, WDT, REF, }; static struct intc_vect vectors[] __initdata = { /* IRQ0->5 are handled in setup-sh3.c */ - INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820), - INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860), - INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0), - INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0), - INTC_VECT(SCIF1_ERI, 0x900), INTC_VECT(SCIF1_RXI, 0x920), - INTC_VECT(SCIF1_BRI, 0x940), INTC_VECT(SCIF1_TXI, 0x960), - INTC_VECT(DMAC_DEI4, 0xb80), INTC_VECT(DMAC_DEI5, 0xba0), + INTC_VECT(DMAC1, 0x800), INTC_VECT(DMAC1, 0x820), + INTC_VECT(DMAC1, 0x840), INTC_VECT(DMAC1, 0x860), + INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0), + INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0), + INTC_VECT(SCIF1, 0x900), INTC_VECT(SCIF1, 0x920), + INTC_VECT(SCIF1, 0x940), INTC_VECT(SCIF1, 0x960), + INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0), #ifdef CONFIG_CPU_SUBTYPE_SH7710 INTC_VECT(IPSEC, 0xbe0), #endif INTC_VECT(EDMAC0, 0xc00), INTC_VECT(EDMAC1, 0xc20), INTC_VECT(EDMAC2, 0xc40), - INTC_VECT(SIOF0_ERI, 0xe00), INTC_VECT(SIOF0_TXI, 0xe20), - INTC_VECT(SIOF0_RXI, 0xe40), INTC_VECT(SIOF0_CCI, 0xe60), - INTC_VECT(SIOF1_ERI, 0xe80), INTC_VECT(SIOF1_TXI, 0xea0), - INTC_VECT(SIOF1_RXI, 0xec0), INTC_VECT(SIOF1_CCI, 0xee0), + INTC_VECT(SIOF0, 0xe00), INTC_VECT(SIOF0, 0xe20), + INTC_VECT(SIOF0, 0xe40), INTC_VECT(SIOF0, 0xe60), + INTC_VECT(SIOF1, 0xe80), INTC_VECT(SIOF1, 0xea0), + INTC_VECT(SIOF1, 0xec0), INTC_VECT(SIOF1, 0xee0), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), - INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), - INTC_VECT(RTC_CUI, 0x4c0), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), INTC_VECT(WDT, 0x560), INTC_VECT(REF, 0x580), }; -static struct intc_group groups[] __initdata = { - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), - INTC_GROUP(DMAC1, DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3), - INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), - INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), - INTC_GROUP(DMAC2, DMAC_DEI4, DMAC_DEI5), - INTC_GROUP(SIOF0, SIOF0_ERI, SIOF0_TXI, SIOF0_RXI, SIOF0_CCI), - INTC_GROUP(SIOF1, SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI), -}; - static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } }, @@ -85,7 +66,7 @@ static struct intc_prio_reg prio_registers[] __initdata = { { 0xa4080006, 0, 16, 4, /* IPRI */ { 0, 0, SIOF1 } }, }; -static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups, +static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, NULL, NULL, prio_registers, NULL); static struct resource rtc_resources[] = { @@ -98,14 +79,6 @@ static struct resource rtc_resources[] = { .start = 20, .flags = IORESOURCE_IRQ, }, - [2] = { - .start = 21, - .flags = IORESOURCE_IRQ, - }, - [3] = { - .start = 22, - .flags = IORESOURCE_IRQ, - }, }; static struct sh_rtc_platform_info rtc_info = { @@ -127,12 +100,12 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xa4400000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 52, 53, 55, 54 }, + .irqs = { 52, 52, 52, 52 }, }, { .mapbase = 0xa4410000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 56, 57, 59, 58 }, + .irqs = { 56, 56, 56, 56 }, }, { .flags = 0, -- cgit v1.2.3 From edab56f4c90f45ba4e61d06d3fc9658da4e94bde Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 6 Mar 2009 19:21:02 +0900 Subject: sh: multiple vectors per irq - sh7720. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/setup-sh7720.c | 68 ++++++++++++----------------------- 1 file changed, 23 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index f807a21b066..003874a2fd2 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c @@ -2,6 +2,7 @@ * SH7720 Setup * * Copyright (C) 2007 Markus Brunner, Mark Jonas + * Copyright (C) 2009 Paul Mundt * * Based on arch/sh/kernel/cpu/sh4/setup-sh7750.c: * @@ -26,17 +27,7 @@ static struct resource rtc_resources[] = { .flags = IORESOURCE_IO, }, [1] = { - /* Period IRQ */ - .start = 21, - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* Carry IRQ */ - .start = 22, - .flags = IORESOURCE_IRQ, - }, - [3] = { - /* Alarm IRQ */ + /* Shared Period/Carry/Alarm IRQ */ .start = 20, .flags = IORESOURCE_IRQ, }, @@ -150,62 +141,49 @@ enum { UNUSED = 0, /* interrupt sources */ - TMU0, TMU1, TMU2, RTC_ATI, RTC_PRI, RTC_CUI, - WDT, REF_RCMI, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND, + TMU0, TMU1, TMU2, RTC, + WDT, REF_RCMI, SIM, IRQ0, IRQ1, IRQ2, IRQ3, USBF_SPD, TMU_SUNI, IRQ5, IRQ4, - DMAC1_DEI0, DMAC1_DEI1, DMAC1_DEI2, DMAC1_DEI3, LCDC, SSL, - ADC, DMAC2_DEI4, DMAC2_DEI5, USBFI0, USBFI1, CMT, + DMAC1, LCDC, SSL, + ADC, DMAC2, USBFI, CMT, SCIF0, SCIF1, - PINT07, PINT815, TPU0, TPU1, TPU2, TPU3, IIC, - SIOF0, SIOF1, MMCI0, MMCI1, MMCI2, MMCI3, PCC, + PINT07, PINT815, TPU, IIC, + SIOF0, SIOF1, MMC, PCC, USBHI, AFEIF, H_UDI, - /* interrupt groups */ - TMU, RTC, SIM, DMAC1, USBFI, DMAC2, USB, TPU, MMC, }; static struct intc_vect vectors[] __initdata = { /* IRQ0->5 are handled in setup-sh3.c */ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), - INTC_VECT(TMU2, 0x440), INTC_VECT(RTC_ATI, 0x480), - INTC_VECT(RTC_PRI, 0x4a0), INTC_VECT(RTC_CUI, 0x4c0), - INTC_VECT(SIM_ERI, 0x4e0), INTC_VECT(SIM_RXI, 0x500), - INTC_VECT(SIM_TXI, 0x520), INTC_VECT(SIM_TEND, 0x540), + INTC_VECT(TMU2, 0x440), INTC_VECT(RTC, 0x480), + INTC_VECT(RTC, 0x4a0), INTC_VECT(RTC, 0x4c0), + INTC_VECT(SIM, 0x4e0), INTC_VECT(SIM, 0x500), + INTC_VECT(SIM, 0x520), INTC_VECT(SIM, 0x540), INTC_VECT(WDT, 0x560), INTC_VECT(REF_RCMI, 0x580), /* H_UDI cannot be masked */ INTC_VECT(TMU_SUNI, 0x6c0), - INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1_DEI0, 0x800), - INTC_VECT(DMAC1_DEI1, 0x820), INTC_VECT(DMAC1_DEI2, 0x840), - INTC_VECT(DMAC1_DEI3, 0x860), INTC_VECT(LCDC, 0x900), + INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1, 0x800), + INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC1, 0x840), + INTC_VECT(DMAC1, 0x860), INTC_VECT(LCDC, 0x900), #if defined(CONFIG_CPU_SUBTYPE_SH7720) INTC_VECT(SSL, 0x980), #endif - INTC_VECT(USBFI0, 0xa20), INTC_VECT(USBFI1, 0xa40), + INTC_VECT(USBFI, 0xa20), INTC_VECT(USBFI, 0xa40), INTC_VECT(USBHI, 0xa60), - INTC_VECT(DMAC2_DEI4, 0xb80), INTC_VECT(DMAC2_DEI5, 0xba0), + INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0), INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00), INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80), INTC_VECT(PINT815, 0xca0), INTC_VECT(SIOF0, 0xd00), - INTC_VECT(SIOF1, 0xd20), INTC_VECT(TPU0, 0xd80), - INTC_VECT(TPU1, 0xda0), INTC_VECT(TPU2, 0xdc0), - INTC_VECT(TPU3, 0xde0), INTC_VECT(IIC, 0xe00), - INTC_VECT(MMCI0, 0xe80), INTC_VECT(MMCI1, 0xea0), - INTC_VECT(MMCI2, 0xec0), INTC_VECT(MMCI3, 0xee0), + INTC_VECT(SIOF1, 0xd20), INTC_VECT(TPU, 0xd80), + INTC_VECT(TPU, 0xda0), INTC_VECT(TPU, 0xdc0), + INTC_VECT(TPU, 0xde0), INTC_VECT(IIC, 0xe00), + INTC_VECT(MMC, 0xe80), INTC_VECT(MMC, 0xea0), + INTC_VECT(MMC, 0xec0), INTC_VECT(MMC, 0xee0), INTC_VECT(CMT, 0xf00), INTC_VECT(PCC, 0xf60), INTC_VECT(AFEIF, 0xfe0), }; -static struct intc_group groups[] __initdata = { - INTC_GROUP(TMU, TMU0, TMU1, TMU2), - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), - INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND), - INTC_GROUP(DMAC1, DMAC1_DEI0, DMAC1_DEI1, DMAC1_DEI2, DMAC1_DEI3), - INTC_GROUP(USBFI, USBFI0, USBFI1), - INTC_GROUP(DMAC2, DMAC2_DEI4, DMAC2_DEI5), - INTC_GROUP(TPU, TPU0, TPU1, TPU2, TPU3), - INTC_GROUP(MMC, MMCI0, MMCI1, MMCI2, MMCI3), -}; - static struct intc_prio_reg prio_registers[] __initdata = { { 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, { 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } }, @@ -219,7 +197,7 @@ static struct intc_prio_reg prio_registers[] __initdata = { { 0xA4080008UL, 0, 16, 4, /* IPRJ */ { 0, USBHI, 0, AFEIF } }, }; -static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups, +static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, NULL, NULL, prio_registers, NULL); void __init plat_irq_setup(void) -- cgit v1.2.3 From f8cf8176c7fc2c790e900595755b93e30633754d Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Wed, 4 Mar 2009 00:49:31 +0000 Subject: ASoC: Add s3c64xx-i2s support Add the initial code to support the S3C64XX I2S hardware using the s3c-i2s-v2 core code. Signed-off-by: Ben Dooks Signed-off-by: Mark Brown --- arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h index 25d4058bcfe..a5600b381d4 100644 --- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h +++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h @@ -33,6 +33,9 @@ #define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) #define S3C2412_IISCON_IIS_ACTIVE (1 << 0) +#define S3C64XX_IISMOD_IMS_PCLK (0 << 10) +#define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10) + #define S3C2412_IISMOD_MASTER_INTERNAL (0 << 10) #define S3C2412_IISMOD_MASTER_EXTERNAL (1 << 10) #define S3C2412_IISMOD_SLAVE (2 << 10) -- cgit v1.2.3 From 20a41eac4fbaa22d051d0fbaeaf3315d2d8c4860 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 4 Mar 2009 21:16:57 +0100 Subject: ASoC: Fix name of register bit in pxa-ssp A bit in PXA's SSCR0 register was erroneously named ADC but its name is in fact ACS (audio clock select). Signed-off-by: Daniel Mack Signed-off-by: Mark Brown --- arch/arm/mach-pxa/include/mach/regs-ssp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-pxa/include/mach/regs-ssp.h b/arch/arm/mach-pxa/include/mach/regs-ssp.h index 3c04cde2cf1..cacdcae451e 100644 --- a/arch/arm/mach-pxa/include/mach/regs-ssp.h +++ b/arch/arm/mach-pxa/include/mach/regs-ssp.h @@ -47,7 +47,7 @@ #define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */ #define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */ #define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */ -#define SSCR0_ADC (1 << 30) /* Audio clock select */ +#define SSCR0_ACS (1 << 30) /* Audio clock select */ #define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ #endif -- cgit v1.2.3 From b0c5033f02182d1e9634edc737df88b82264e820 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 5 Mar 2009 14:21:26 +0100 Subject: ASoC: add two more bitfields for PXA SSP Add two more bitfields for the PSP register. As they seem to exist for PXA3xx only, define them conditionally. Signed-off-by: Daniel Mack Signed-off-by: Mark Brown --- arch/arm/mach-pxa/include/mach/regs-ssp.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-pxa/include/mach/regs-ssp.h b/arch/arm/mach-pxa/include/mach/regs-ssp.h index cacdcae451e..f43905a2773 100644 --- a/arch/arm/mach-pxa/include/mach/regs-ssp.h +++ b/arch/arm/mach-pxa/include/mach/regs-ssp.h @@ -106,6 +106,11 @@ #define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */ #define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */ +#if defined(CONFIG_PXA3xx) +#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */ +#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */ +#endif + #define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */ #define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */ #define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */ -- cgit v1.2.3 From 3a450de1365d20afde406f0d9b2931a5e4a4fd6a Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Fri, 6 Mar 2009 17:30:56 -0600 Subject: x86: UV: remove uv_flush_tlb_others() WARN_ON In uv_flush_tlb_others() (arch/x86/kernel/tlb_uv.c), the "WARN_ON(!in_atomic())" fails if CONFIG_PREEMPT is not enabled. And CONFIG_PREEMPT is not enabled by default in the distribution that most UV owners will use. We could #ifdef CONFIG_PREEMPT the warning, but that is not good form. And there seems to be no suitable fix to in_atomic() when CONFIG_PREMPT is not on. As Ingo commented: > and we have no proper primitive to test for atomicity. (mainly > because we dont know about atomicity on a non-preempt kernel) So we drop the WARN_ON. Signed-off-by: Cliff Wickman Signed-off-by: Tejun Heo Signed-off-by: Ingo Molnar --- arch/x86/kernel/tlb_uv.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f04549afcfe..d038b9c45cf 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -314,8 +314,6 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, int locals = 0; struct bau_desc *bau_desc; - WARN_ON(!in_atomic()); - cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); uv_cpu = uv_blade_processor_id(); -- cgit v1.2.3 From eac84739721857f4d5be3d9127f4644f16a9bea4 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 9 Mar 2009 17:47:13 +0000 Subject: ASoC: Fix Samsung S3C2412_IISMOD_SDF_{MSB,LSB} definitions The definitions of S3C2412_IISMOD_SDF_MSB and S3C2412_IISMOD_SDF_LSB are incorrect, being the same S3C2412_IISMOD_SDF_IIS which is the only correct one in this series. Signed-off-by: Ben Dooks Signed-off-by: Mark Brown --- arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h index a5600b381d4..0fad7571030 100644 --- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h +++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h @@ -47,8 +47,8 @@ #define S3C2412_IISMOD_LR_LLOW (0 << 7) #define S3C2412_IISMOD_LR_RLOW (1 << 7) #define S3C2412_IISMOD_SDF_IIS (0 << 5) -#define S3C2412_IISMOD_SDF_MSB (0 << 5) -#define S3C2412_IISMOD_SDF_LSB (0 << 5) +#define S3C2412_IISMOD_SDF_MSB (1 << 5) +#define S3C2412_IISMOD_SDF_LSB (2 << 5) #define S3C2412_IISMOD_SDF_MASK (3 << 5) #define S3C2412_IISMOD_RCLK_256FS (0 << 3) #define S3C2412_IISMOD_RCLK_512FS (1 << 3) -- cgit v1.2.3 From 2ef7f0dab6b3d171b6aff00a47077385ae3155b5 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 6 Mar 2009 09:47:02 +0000 Subject: sh: hibernation support Add Suspend-to-disk / swsusp / CONFIG_HIBERNATION support to the SuperH architecture. To suspend, use "swapon /dev/sda2; echo disk > /sys/power/state" To resume, pass "resume=/dev/sda2" on the kernel command line. The patch "pm: rework includes, remove arch ifdefs V2" is needed to allow the generic swsusp code to build properly. Hibernation is not enabled with this patch though, a patch setting ARCH_HIBERNATION_POSSIBLE will be submitted later. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/sections.h | 1 + arch/sh/include/asm/suspend.h | 13 ++++ arch/sh/kernel/Makefile_32 | 1 + arch/sh/kernel/asm-offsets.c | 8 +++ arch/sh/kernel/cpu/sh3/Makefile | 2 + arch/sh/kernel/cpu/sh3/entry.S | 22 ++++-- arch/sh/kernel/cpu/sh3/swsusp.S | 147 ++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/cpu/sh4/Makefile | 1 + arch/sh/kernel/swsusp.c | 38 +++++++++++ 9 files changed, 228 insertions(+), 5 deletions(-) create mode 100644 arch/sh/include/asm/suspend.h create mode 100644 arch/sh/kernel/cpu/sh3/swsusp.S create mode 100644 arch/sh/kernel/swsusp.c (limited to 'arch') diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 8f8f4ad400d..01a4076a371 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -3,6 +3,7 @@ #include +extern void __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; extern char _ebss[]; diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h new file mode 100644 index 00000000000..5d6d8aba568 --- /dev/null +++ b/arch/sh/include/asm/suspend.h @@ -0,0 +1,13 @@ +#ifndef _ASM_SH_SUSPEND_H +#define _ASM_SH_SUSPEND_H + +static inline int arch_prepare_suspend(void) { return 0; } + +#include + +struct swsusp_arch_regs { + struct pt_regs user_regs; + unsigned long bank1_regs[8]; +}; + +#endif /* _ASM_SH_SUSPEND_H */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 2e1b86e16ab..82a3a150c00 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -30,5 +30,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o +obj-$(CONFIG_HIBERNATION) += swsusp.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index 57cf0e0680f..99aceb28ee2 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c @@ -12,8 +12,10 @@ #include #include #include +#include #include +#include int main(void) { @@ -25,5 +27,11 @@ int main(void) DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); +#ifdef CONFIG_HIBERNATION + DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); + DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address)); + DEFINE(PBE_NEXT, offsetof(struct pbe, next)); + DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs)); +#endif return 0; } diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile index e07c69e16d9..ecab274141a 100644 --- a/arch/sh/kernel/cpu/sh3/Makefile +++ b/arch/sh/kernel/cpu/sh3/Makefile @@ -4,6 +4,8 @@ obj-y := ex.o probe.o entry.o setup-sh3.o +obj-$(CONFIG_HIBERNATION) += swsusp.o + # CPU subtype setup obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index c4829d6dee5..fba6ac20bb1 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -216,7 +216,7 @@ ENTRY(sh_bios_handler) ! r9 trashed ! BL=0 on entry, on exit BL=1 (depending on r8). -restore_regs: +ENTRY(restore_regs) mov.l @r15+, r0 mov.l @r15+, r1 mov.l @r15+, r2 @@ -362,8 +362,10 @@ general_exception: nop ! Save registers / Switch to bank 0 + mov.l k4, k2 ! keep vector in k2 + mov.l 1f, k4 ! SR bits to clear in k4 bsr save_regs ! needs original pr value in k3 - mov k4, k2 ! keep vector in k2 + nop bra handle_exception_special nop @@ -471,6 +473,7 @@ handle_exception: ! Save registers / Switch to bank 0 mov.l 5f, k2 ! vector register address + mov.l 1f, k4 ! SR bits to clear in k4 bsr save_regs ! needs original pr value in k3 mov.l @k2, k2 ! read out vector and keep in k2 @@ -495,10 +498,10 @@ handle_exception_special: ! k0 contains original stack pointer* ! k1 trashed ! k3 passes original pr* -! k4 trashed +! k4 passes SR bitmask ! BL=1 on entry, on exit BL=0. -save_regs: +ENTRY(save_regs) mov #-1, r1 mov.l k1, @-r15 ! set TRA (default: -1) sts.l macl, @-r15 @@ -518,8 +521,16 @@ save_regs: mov.l r8, @-r15 mov.l 0f, k3 ! SR bits to set in k3 - mov.l 1f, k4 ! SR bits to clear in k4 + ! fall-through + +! save_low_regs() +! - modify SR for bank switch +! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack +! k3 passes bits to set in SR +! k4 passes bits to clear in SR + +ENTRY(save_low_regs) stc sr, r8 or k3, r8 and k4, r8 @@ -565,6 +576,7 @@ ENTRY(handle_interrupt) PREF(k0) ! Save registers / Switch to bank 0 + mov.l 1f, k4 ! SR bits to clear in k4 bsr save_regs ! needs original pr value in k3 mov #-1, k2 ! default vector kept in k2 diff --git a/arch/sh/kernel/cpu/sh3/swsusp.S b/arch/sh/kernel/cpu/sh3/swsusp.S new file mode 100644 index 00000000000..01145426a2b --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/swsusp.S @@ -0,0 +1,147 @@ +/* + * arch/sh/kernel/cpu/sh3/swsusp.S + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +#define k0 r0 +#define k1 r1 +#define k2 r2 +#define k3 r3 +#define k4 r4 + +! swsusp_arch_resume() +! - copy restore_pblist pages +! - restore registers from swsusp_arch_regs_cpu0 + +ENTRY(swsusp_arch_resume) + mov.l 1f, r15 + mov.l 2f, r4 + mov.l @r4, r4 + +swsusp_copy_loop: + mov r4, r0 + cmp/eq #0, r0 + bt swsusp_restore_regs + + mov.l @(PBE_ADDRESS, r4), r2 + mov.l @(PBE_ORIG_ADDRESS, r4), r5 + + mov #(PAGE_SIZE >> 10), r3 + shll8 r3 + shlr2 r3 /* PAGE_SIZE / 16 */ +swsusp_copy_page: + dt r3 + mov.l @r2+,r1 /* 16n+0 */ + mov.l r1,@r5 + add #4,r5 + mov.l @r2+,r1 /* 16n+4 */ + mov.l r1,@r5 + add #4,r5 + mov.l @r2+,r1 /* 16n+8 */ + mov.l r1,@r5 + add #4,r5 + mov.l @r2+,r1 /* 16n+12 */ + mov.l r1,@r5 + bf/s swsusp_copy_page + add #4,r5 + + bra swsusp_copy_loop + mov.l @(PBE_NEXT, r4), r4 + +swsusp_restore_regs: + ! BL=0: R7->R0 is bank0 + mov.l 3f, r8 + mov.l 4f, r5 + jsr @r5 + nop + + ! BL=1: R7->R0 is bank1 + lds k2, pr + ldc k3, ssr + + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + + rte + nop + ! BL=0: R7->R0 is bank0 + + .align 2 +1: .long swsusp_arch_regs_cpu0 +2: .long restore_pblist +3: .long 0x20000000 ! RB=1 +4: .long restore_regs + +! swsusp_arch_suspend() +! - prepare pc for resume, return from function without swsusp_save on resume +! - save registers in swsusp_arch_regs_cpu0 +! - call swsusp_save write suspend image + +ENTRY(swsusp_arch_suspend) + sts pr, r0 ! save pr in r0 + mov r15, r2 ! save sp in r2 + mov r8, r5 ! save r8 in r5 + stc sr, r1 + ldc r1, ssr ! save sr in ssr + mov.l 1f, r1 + ldc r1, spc ! setup pc value for resuming + mov.l 5f, r15 ! use swsusp_arch_regs_cpu0 as stack + mov.l 6f, r3 + add r3, r15 ! save from top of structure + + ! BL=0: R7->R0 is bank0 + mov.l 2f, r3 ! get new SR value for bank1 + mov #0, r4 + mov.l 7f, r1 + jsr @r1 ! switch to bank1 and save bank1 r7->r0 + not r4, r4 + + ! BL=1: R7->R0 is bank1 + stc r2_bank, k0 ! fetch old sp from r2_bank0 + mov.l 3f, k4 ! SR bits to clear in k4 + mov.l 8f, k1 + jsr @k1 ! switch to bank0 and save all regs + stc r0_bank, k3 ! fetch old pr from r0_bank0 + + ! BL=0: R7->R0 is bank0 + mov r2, r15 ! restore old sp + mov r5, r8 ! restore old r8 + stc ssr, r1 + ldc r1, sr ! restore old sr + lds r0, pr ! restore old pr + mov.l 4f, r0 + jmp @r0 + nop + +swsusp_call_save: + mov r2, r15 ! restore old sp + mov r5, r8 ! restore old r8 + lds r0, pr ! restore old pr + rts + mov #0, r0 + + .align 2 +1: .long swsusp_call_save +2: .long 0x20000000 ! RB=1 +3: .long 0xdfffffff ! RB=0 +4: .long swsusp_save +5: .long swsusp_arch_regs_cpu0 +6: .long SWSUSP_ARCH_REGS_SIZE +7: .long save_low_regs +8: .long save_regs diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile index d608557c7a3..203b18347b8 100644 --- a/arch/sh/kernel/cpu/sh4/Makefile +++ b/arch/sh/kernel/cpu/sh4/Makefile @@ -5,6 +5,7 @@ obj-y := probe.o common.o common-y += $(addprefix ../sh3/, entry.o ex.o) +obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o) obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o obj-$(CONFIG_SH_STORE_QUEUES) += sq.o diff --git a/arch/sh/kernel/swsusp.c b/arch/sh/kernel/swsusp.c new file mode 100644 index 00000000000..12b64a0f2f0 --- /dev/null +++ b/arch/sh/kernel/swsusp.c @@ -0,0 +1,38 @@ +/* + * swsusp.c - SuperH hibernation support + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct swsusp_arch_regs swsusp_arch_regs_cpu0; + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; + unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; + + return (pfn >= begin_pfn) && (pfn < end_pfn); +} + +void save_processor_state(void) +{ + init_fpu(current); +} + +void restore_processor_state(void) +{ + local_flush_tlb_all(); +} -- cgit v1.2.3 From 508407149a7f927c4b65a20e0a08a2a94dc769c6 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 10 Mar 2009 06:13:22 +0000 Subject: sh: Show sleep state with Migo-R LEDs If CONFIG_PM is set, let Migo-R LEDs show sleep states. D11 will show STATUS0 and D12 PDSTATUS. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-migor/setup.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 28e56c5809a..bc35b4cae6b 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -450,6 +450,14 @@ static struct spi_board_info migor_spi_devices[] = { static int __init migor_devices_setup(void) { + +#ifdef CONFIG_PM + /* Let D11 LED show STATUS0 */ + gpio_request(GPIO_FN_STATUS0, NULL); + + /* Lit D12 LED show PDSTATUS */ + gpio_request(GPIO_FN_PDSTATUS, NULL); +#else /* Lit D11 LED */ gpio_request(GPIO_PTJ7, NULL); gpio_direction_output(GPIO_PTJ7, 1); @@ -459,6 +467,7 @@ static int __init migor_devices_setup(void) gpio_request(GPIO_PTJ5, NULL); gpio_direction_output(GPIO_PTJ5, 1); gpio_export(GPIO_PTJ5, 0); +#endif /* SMC91C111 - Enable IRQ0, Setup CS4 for 16-bit fast access */ gpio_request(GPIO_FN_IRQ0, NULL); -- cgit v1.2.3 From 2f47f44790a9c8fc43e515df3c6be19a35ee5de5 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 10 Mar 2009 15:49:54 +0900 Subject: sh: Support fixed 32-bit PMB mappings from bootloader. This provides a method for supporting fixed PMB mappings inherited from the bootloader, as an alternative to the dynamic PMB mapping currently used by the kernel. In the future these methods will be combined. P1/P2 area is handled like a regular 29-bit physical address, and local bus device are assigned P3 area addresses. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/boot/Makefile | 20 ++++++++++-------- arch/sh/include/asm/addrspace.h | 4 ++-- arch/sh/include/asm/io.h | 4 ++-- arch/sh/include/asm/page.h | 7 ++++++- arch/sh/kernel/vmlinux_32.lds.S | 5 ++++- arch/sh/mm/Kconfig | 29 +++++++++++++++++++++++++- arch/sh/mm/Makefile_32 | 1 + arch/sh/mm/ioremap_32.c | 6 ++++-- arch/sh/mm/pmb-fixed.c | 45 +++++++++++++++++++++++++++++++++++++++++ 9 files changed, 104 insertions(+), 17 deletions(-) create mode 100644 arch/sh/mm/pmb-fixed.c (limited to 'arch') diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile index c16ccd4bfa1..95483d16125 100644 --- a/arch/sh/boot/Makefile +++ b/arch/sh/boot/Makefile @@ -33,20 +33,24 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ -ifeq ($(CONFIG_32BIT),y) -KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ - $$[$(CONFIG_PAGE_OFFSET) + \ - $(CONFIG_ZERO_PAGE_OFFSET)]') -else +KERNEL_MEMORY := 0x00000000 +ifeq ($(CONFIG_PMB_FIXED),y) +KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ + $$[$(CONFIG_MEMORY_START) & 0x1fffffff]') +endif +ifeq ($(CONFIG_29BIT),y) +KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ + $$[$(CONFIG_MEMORY_START)]') +endif + KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ - $(CONFIG_MEMORY_START) + \ + $(KERNEL_MEMORY) + \ $(CONFIG_ZERO_PAGE_OFFSET)]') -endif KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ - $(CONFIG_MEMORY_START) + \ + $(KERNEL_MEMORY) + \ $(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]') quiet_cmd_uimage = UIMAGE $@ diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 36736c7e93d..80d40813e05 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -31,7 +31,7 @@ /* Returns the physical address of a PnSEG (n=1,2) address */ #define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) -#ifdef CONFIG_29BIT +#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) /* * Map an address to a certain privileged segment */ @@ -43,7 +43,7 @@ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) #define P4SEGADDR(a) \ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) -#endif /* 29BIT */ +#endif /* 29BIT || PMB_FIXED */ #endif /* P1SEG */ /* Check if an address can be reached in 29 bits */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 61f6dae4053..0454f8d6805 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -238,7 +238,7 @@ extern void onchip_unmap(unsigned long vaddr); static inline void __iomem * __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { -#ifdef CONFIG_SUPERH32 +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) unsigned long last_addr = offset + size - 1; #endif void __iomem *ret; @@ -247,7 +247,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) if (ret) return ret; -#ifdef CONFIG_SUPERH32 +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 5871d78e47e..9c6d21ec024 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -129,7 +129,12 @@ typedef struct page *pgtable_t; * is not visible (it is part of the PMB mapping) and so needs to be * added or subtracted as required. */ -#ifdef CONFIG_32BIT +#if defined(CONFIG_PMB_FIXED) +/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ +#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) +#define __pa(x) ((unsigned long)(x) - PMB_OFFSET) +#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) +#elif defined(CONFIG_32BIT) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) #else diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S index 7b4b82bd115..d0b2a715cd1 100644 --- a/arch/sh/kernel/vmlinux_32.lds.S +++ b/arch/sh/kernel/vmlinux_32.lds.S @@ -15,7 +15,10 @@ OUTPUT_ARCH(sh) ENTRY(_start) SECTIONS { -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB_FIXED + . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) + + CONFIG_ZERO_PAGE_OFFSET; +#elif defined(CONFIG_32BIT) . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET; #else . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET; diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 555ec9714b9..10c24356d2d 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -57,7 +57,7 @@ config 32BIT bool default y if CPU_SH5 -config PMB +config PMB_ENABLE bool "Support 32-bit physical addressing through PMB" depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) select 32BIT @@ -67,6 +67,33 @@ config PMB 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. +choice + prompt "PMB handling type" + depends on PMB_ENABLE + default PMB_FIXED + +config PMB + bool "PMB" + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) + select 32BIT + help + If you say Y here, physical addressing will be extended to + 32-bits through the SH-4A PMB. If this is not set, legacy + 29-bit physical addressing will be used. + +config PMB_FIXED + bool "fixed PMB" + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \ + CPU_SUBTYPE_SH7785) + select 32BIT + help + If this option is enabled, fixed PMB mappings are inherited + from the boot loader, and the kernel does not attempt dynamic + management. This is the closest to legacy 29-bit physical mode, + and allows systems to support up to 512MiB of system memory. + +endchoice + config X2TLB bool "Enable extended TLB mode" depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index cb2f3f29959..469ff167245 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -35,6 +35,7 @@ endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PMB) += pmb.o +obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o obj-$(CONFIG_NUMA) += numa.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 8dc77026a0b..60cc486d2c2 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -59,11 +59,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) return (void __iomem *)phys_addr; +#if !defined(CONFIG_PMB_FIXED) /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) return NULL; +#endif /* * Mappings have to be page-aligned @@ -81,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB /* * First try to remap through the PMB once a valid VMA has been * established. Smaller allocations (or the rest of the size @@ -122,7 +124,7 @@ void __iounmap(void __iomem *addr) if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) return; -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB /* * Purge any PMB entries that may have been established for this * mapping, then proceed with conventional VMA teardown. diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c new file mode 100644 index 00000000000..43c8eac4d8a --- /dev/null +++ b/arch/sh/mm/pmb-fixed.c @@ -0,0 +1,45 @@ +/* + * arch/sh/mm/fixed_pmb.c + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +static int __uses_jump_to_uncached fixed_pmb_init(void) +{ + int i; + unsigned long addr, data; + + jump_to_uncached(); + + for (i = 0; i < PMB_ENTRY_MAX; i++) { + addr = PMB_DATA + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + if (!(data & PMB_V)) + continue; + + if (data & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data &= ~PMB_WT; +#else + data &= ~(PMB_C | PMB_WT); +#endif + } + ctrl_outl(data, addr); + } + + back_to_cached(); + + return 0; +} +arch_initcall(fixed_pmb_init); -- cgit v1.2.3 From df4d4f1a47ed6080c7b4010149126bd1013dc3f1 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 10 Mar 2009 15:50:44 +0900 Subject: sh: sh7785lcr: Updates for fixed PMB. Add a new defconfig for SH7785LCR in 32-bit mode, and update the power off code to avoid 29-bit assumptions. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/boards/Kconfig | 2 +- arch/sh/boards/board-sh7785lcr.c | 10 +- arch/sh/configs/sh7785lcr_32bit_defconfig | 1553 +++++++++++++++++++++++++++++ 3 files changed, 1563 insertions(+), 2 deletions(-) create mode 100644 arch/sh/configs/sh7785lcr_32bit_defconfig (limited to 'arch') diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 694abecf602..bd3569a9934 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -155,7 +155,7 @@ config SH_SH7785LCR config SH_SH7785LCR_29BIT_PHYSMAPS bool "SH7785LCR 29bit physmaps" - depends on SH_SH7785LCR + depends on SH_SH7785LCR && 29BIT default y help This board has 2 physical memory maps. It can be changed with diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c index 38a64968d7b..6746a5fba3a 100644 --- a/arch/sh/boards/board-sh7785lcr.c +++ b/arch/sh/boards/board-sh7785lcr.c @@ -275,7 +275,15 @@ void __init init_sh7785lcr_IRQ(void) static void sh7785lcr_power_off(void) { - ctrl_outb(0x01, P2SEGADDR(PLD_POFCR)); + unsigned char *p; + + p = ioremap(PLD_POFCR, PLD_POFCR + 1); + if (!p) { + printk(KERN_ERR "%s: ioremap error.\n", __func__); + return; + } + *p = 0x01; + iounmap(p); } /* Initialize the board */ diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig new file mode 100644 index 00000000000..54e1dee8e24 --- /dev/null +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -0,0 +1,1553 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.29-rc4 +# Fri Feb 20 18:25:29 2009 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +# CONFIG_GENERIC_GPIO is not set +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_ARCH_SUSPEND_POSSIBLE is not set +# CONFIG_ARCH_HIBERNATION_POSSIBLE is not set +CONFIG_SYS_SUPPORTS_NUMA=y +CONFIG_SYS_SUPPORTS_PCI=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_IO_TRAPPED=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_CLASSIC_RCU=y +# CONFIG_TREE_RCU is not set +# CONFIG_PREEMPT_RCU is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_GROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_USER_SCHED=y +# CONFIG_CGROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +# CONFIG_BLK_DEV_INITRD is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_COMPAT_BRK=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_ANON_INODES=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_PROFILING=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_FREEZER is not set + +# +# System type +# +CONFIG_CPU_SH4=y +CONFIG_CPU_SH4A=y +CONFIG_CPU_SHX2=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7201 is not set +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +# CONFIG_CPU_SUBTYPE_SH7705 is not set +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7751R is not set +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +# CONFIG_CPU_SUBTYPE_SH7763 is not set +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +CONFIG_CPU_SUBTYPE_SH7785=y +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set +# CONFIG_CPU_SUBTYPE_SH5_101 is not set +# CONFIG_CPU_SUBTYPE_SH5_103 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_MMU=y +CONFIG_PAGE_OFFSET=0x80000000 +CONFIG_MEMORY_START=0x40000000 +CONFIG_MEMORY_SIZE=0x20000000 +# CONFIG_29BIT is not set +CONFIG_32BIT=y +CONFIG_PMB_ENABLE=y +# CONFIG_PMB is not set +CONFIG_PMB_FIXED=y +# CONFIG_X2TLB is not set +CONFIG_VSYSCALL=y +# CONFIG_NUMA is not set +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=2 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_ENTRY_OFFSET=0x00001000 +CONFIG_SELECT_MEMORY_MODEL=y +# CONFIG_FLATMEM_MANUAL is not set +# CONFIG_DISCONTIGMEM_MANUAL is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_STATIC=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_UNEVICTABLE_LRU=y + +# +# Cache configuration +# +# CONFIG_SH_DIRECT_MAPPED is not set +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SH_FPU=y +CONFIG_SH_STORE_QUEUES=y +CONFIG_CPU_HAS_INTEVT=y +CONFIG_CPU_HAS_SR_RB=y +CONFIG_CPU_HAS_PTEA=y +CONFIG_CPU_HAS_FPU=y + +# +# Board support +# +# CONFIG_SH_HIGHLANDER is not set +CONFIG_SH_SH7785LCR=y + +# +# Timer and clock configuration +# +CONFIG_SH_TMU=y +CONFIG_SH_TIMER_IRQ=28 +CONFIG_SH_PCLK_FREQ=50000000 +CONFIG_TICK_ONESHOT=y +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +# CONFIG_SH_DMA is not set + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +CONFIG_HEARTBEAT=y +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +# CONFIG_CRASH_DUMP is not set +# CONFIG_SECCOMP is not set +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_GUSA=y + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +# CONFIG_CMDLINE_BOOL is not set + +# +# Bus options +# +CONFIG_PCI=y +CONFIG_SH_PCIDMA_NONCOHERENT=y +CONFIG_PCI_AUTO=y +CONFIG_PCI_AUTO_UPDATE_RESOURCES=y +# CONFIG_PCIEPORTBUS is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +CONFIG_PCI_LEGACY=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCCARD is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options (EXPERIMENTAL) +# +# CONFIG_PM is not set +# CONFIG_CPU_IDLE is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_COMPAT_NET_DEV_OPS=y +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_MULTIPLE_TABLES is not set +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +# CONFIG_WIRELESS_OLD_REGULATORY is not set +CONFIG_WIRELESS_EXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +# CONFIG_MAC80211 is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_CONCAT=y +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CFI_AMDSTD=y +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_QINFO_PROBE is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SX8 is not set +# CONFIG_BLK_DEV_UB is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_HD is not set +# CONFIG_MISC_DEVICES is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_TGT is not set +# CONFIG_SCSI_NETLINK is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +# CONFIG_SCSI_MULTI_LUN is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set +CONFIG_SCSI_WAIT_SCAN=m + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +# CONFIG_SCSI_LOWLEVEL is not set +# CONFIG_SCSI_DH is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_SATA_PMP=y +# CONFIG_SATA_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y +# CONFIG_SATA_SVW is not set +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SX4 is not set +CONFIG_SATA_SIL=y +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CS5520 is not set +# CONFIG_PATA_CS5530 is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_SC1200 is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_SCH is not set +# CONFIG_MD is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# + +# +# Enable only one of the two stacks, unless you know what you are doing +# +# CONFIG_FIREWIRE is not set +# CONFIG_IEEE1394 is not set +# CONFIG_I2O is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +# CONFIG_ARCNET is not set +# CONFIG_NET_ETHERNET is not set +CONFIG_MII=y +CONFIG_NETDEV_1000=y +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_E1000E is not set +# CONFIG_IP1000 is not set +# CONFIG_IGB is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_R8169=y +# CONFIG_SIS190 is not set +# CONFIG_SKGE is not set +# CONFIG_SKY2 is not set +# CONFIG_VIA_VELOCITY is not set +# CONFIG_TIGON3 is not set +# CONFIG_BNX2 is not set +# CONFIG_QLA3XXX is not set +# CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_JME is not set +# CONFIG_NETDEV_10000 is not set +# CONFIG_TR is not set + +# +# Wireless LAN +# +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set +# CONFIG_IWLWIFI_LEDS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_USBNET is not set +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NET_FC is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=m +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SH_KEYSC is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=6 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_DEVPORT=y +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_CHARDEV is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOPCA=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SH_MOBILE is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Graphics adapter I2C/DDC channel drivers +# +# CONFIG_I2C_VOODOO3 is not set + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_PCA_PLATFORM=y +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_PCF8575 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +# CONFIG_SPI is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +CONFIG_MFD_SM501=y +# CONFIG_HTC_PASIC3 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_REGULATOR is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_DAB is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +CONFIG_FB_SH_MOBILE_LCDC=m +CONFIG_FB_SM501=y +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_LOGO_SUPERH_MONO is not set +# CONFIG_LOGO_SUPERH_VGA16 is not set +# CONFIG_LOGO_SUPERH_CLUT224 is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HID_DEBUG is not set +# CONFIG_HIDRAW is not set + +# +# USB Input Devices +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set + +# +# Special HID drivers +# +CONFIG_HID_COMPAT=y +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_EZKEY=y +CONFIG_HID_GYRATION=y +CONFIG_HID_LOGITECH=y +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_NTRIG is not set +CONFIG_HID_PANTHERLORD=y +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PETALYNX=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SUNPLUS=y +# CONFIG_GREENASIA_FF is not set +# CONFIG_HID_TOPSEED is not set +CONFIG_THRUSTMASTER_FF=m +CONFIG_ZEROPLUS_FF=m +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB_ARCH_HAS_OHCI=y +CONFIG_USB_ARCH_HAS_EHCI=y +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set +# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +CONFIG_USB_DEVICE_CLASS=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_MON=y +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_EHCI_HCD=m +# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +CONFIG_USB_OHCI_HCD=m +# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set +# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +CONFIG_USB_R8A66597_HCD=y +# CONFIG_USB_WHCI_HCD is not set +# CONFIG_USB_HWA_HCD is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# + +# +# see USB_STORAGE Help for more information +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_BERRY_CHARGE is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_PHIDGET is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +CONFIG_USB_TEST=m +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_VST is not set +# CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +CONFIG_RTC_DRV_RS5C372=y +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_SH is not set +# CONFIG_DMADEVICES is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_FILE_LOCKING=y +# CONFIG_XFS_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_DNOTIFY=y +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +CONFIG_NTFS_FS=y +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +CONFIG_MINIX_FS=y +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +CONFIG_NFSD_V4=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_REGISTER_V4 is not set +CONFIG_RPCSEC_GSS_KRB5=y +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +CONFIG_NLS_CODEPAGE_932=y +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_DEBUG_PREEMPT=y +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_FRAME_POINTER is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y + +# +# Tracers +# +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_CONTEXT_SWITCH_TRACER is not set +# CONFIG_BOOT_TRACER is not set +# CONFIG_TRACE_BRANCH_PROFILING is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_EARLY_SCIF_CONSOLE is not set +# CONFIG_DEBUG_BOOTMEM is not set +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_4KSTACKS is not set +# CONFIG_IRQSTACKS is not set +# CONFIG_DUMP_CODE is not set +# CONFIG_SH_NO_BSS_INIT is not set +# CONFIG_MORE_COMPILE_OPTIONS is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_PLIST=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y -- cgit v1.2.3 From 8ffe31334262108be343d92e81649f9dc3efe826 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 10 Mar 2009 15:51:49 +0900 Subject: sh: pci-sh7780: fix pci memory address for fixed PMB Fix the problem that cannot work a PCI device when 32-bit physical address mode. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/drivers/pci/pci-sh7780.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c index 078dc44d6b0..773d575a04b 100644 --- a/arch/sh/drivers/pci/pci-sh7780.c +++ b/arch/sh/drivers/pci/pci-sh7780.c @@ -127,8 +127,8 @@ int __init sh7780_pcic_init(struct sh4_pci_address_map *map) pci_write_reg(word, SH4_PCILSR0); pci_write_reg(0x00000001, SH4_PCILSR1); /* Set the values on window 0 PCI config registers */ - word = (CONFIG_MEMORY_SIZE > 0x08000000) ? 0x10000000 : 0x08000000; - pci_write_reg(word | 0xa0000000, SH4_PCILAR0); + word = CONFIG_MEMORY_START | (CONFIG_MEMORY_SIZE - 0x01000000); + pci_write_reg(word, SH4_PCILAR0); pci_write_reg(word, SH7780_PCIMBAR0); /* Set the values on window 1 PCI config registers */ pci_write_reg(0x00000000, SH4_PCILAR1); -- cgit v1.2.3 From dc65a977ce59f2c3171ea451b90c7e5effb6737e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 10 Mar 2009 16:26:29 +0900 Subject: sh: Define ARCH_SHMOBILE for SH-Mobile CPUs. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index a4c2c845863..adffbf4048b 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -179,6 +179,9 @@ config CPU_SHX2 config CPU_SHX3 bool +config ARCH_SHMOBILE + bool + choice prompt "Processor sub-type selection" @@ -330,6 +333,7 @@ config CPU_SUBTYPE_SH7723 bool "Support SH7723 processor" select CPU_SH4A select CPU_SHX2 + select ARCH_SHMOBILE select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_CMT help @@ -377,12 +381,14 @@ config CPU_SUBTYPE_SHX3 config CPU_SUBTYPE_SH7343 bool "Support SH7343 processor" select CPU_SH4AL_DSP + select ARCH_SHMOBILE select SYS_SUPPORTS_CMT config CPU_SUBTYPE_SH7722 bool "Support SH7722 processor" select CPU_SH4AL_DSP select CPU_SHX2 + select ARCH_SHMOBILE select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA select SYS_SUPPORTS_CMT @@ -391,6 +397,7 @@ config CPU_SUBTYPE_SH7366 bool "Support SH7366 processor" select CPU_SH4AL_DSP select CPU_SHX2 + select ARCH_SHMOBILE select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA select SYS_SUPPORTS_CMT -- cgit v1.2.3 From 19390c4d03688b9940a1836f06b76ec622b9cd6f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 10 Mar 2009 16:27:48 +0900 Subject: linker script: define __per_cpu_load on all SMP capable archs Impact: __per_cpu_load available on all SMP capable archs Percpu now requires three symbols to be defined - __per_cpu_load, __per_cpu_start and __per_cpu_end. There were three archs which didn't have it. Update them as follows. * powerpc: can use generic PERCPU() macro. Compile tested for powerpc32, compile/boot tested for powerpc64. * ia64: can use generic PERCPU_VADDR() macro. __phys_per_cpu_start is identical to __per_cpu_load. Compile tested and symbol table looks identical after the change except for the additional __per_cpu_load. * arm: added explicit __per_cpu_load definition. Currently uses unified .init output section so can't use the generic macro. Dunno whether the unified .init ouput section is required by arch peculiarity so I left it alone. Please break it up and use PERCPU() if possible. Signed-off-by: Tejun Heo Cc: Benjamin Herrenschmidt Cc: Pat Gefre Cc: Russell King --- arch/arm/kernel/vmlinux.lds.S | 1 + arch/ia64/kernel/vmlinux.lds.S | 12 ++---------- arch/powerpc/kernel/vmlinux.lds.S | 9 +-------- 3 files changed, 4 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 85598f7da40..1602373e539 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -64,6 +64,7 @@ SECTIONS __initramfs_end = .; #endif . = ALIGN(4096); + __per_cpu_load = .; __per_cpu_start = .; *(.data.percpu.page_aligned) *(.data.percpu) diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index f45e4e508ec..3765efc5f96 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -213,17 +213,9 @@ SECTIONS { *(.data.cacheline_aligned) } /* Per-cpu data: */ - percpu : { } :percpu . = ALIGN(PERCPU_PAGE_SIZE); - __phys_per_cpu_start = .; - .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) - { - __per_cpu_start = .; - *(.data.percpu.page_aligned) - *(.data.percpu) - *(.data.percpu.shared_aligned) - __per_cpu_end = .; - } + PERCPU_VADDR(PERCPU_ADDR, :percpu) + __phys_per_cpu_start = __per_cpu_load; . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits * into percpu page size */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 295ccc5e86b..67f07f45338 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -181,14 +181,7 @@ SECTIONS __initramfs_end = .; } #endif - . = ALIGN(PAGE_SIZE); - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { - __per_cpu_start = .; - *(.data.percpu.page_aligned) - *(.data.percpu) - *(.data.percpu.shared_aligned) - __per_cpu_end = .; - } + PERCPU(PAGE_SIZE) . = ALIGN(8); .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { -- cgit v1.2.3 From e01009833e22dc87075d770554b34d797843ed23 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 10 Mar 2009 16:27:48 +0900 Subject: percpu: make x86 addr <-> pcpu ptr conversion macros generic Impact: generic addr <-> pcpu ptr conversion macros There's nothing arch specific about x86 __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr(). With proper __per_cpu_load and __per_cpu_start defined, they'll do the right thing regardless of actual layout. Move these macros from arch/x86/include/asm/percpu.h to mm/percpu.c and allow archs to override it as necessary. Signed-off-by: Tejun Heo --- arch/x86/include/asm/percpu.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 8f1d2fbec1d..aee103b26d0 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -43,14 +43,6 @@ #else /* ...!ASSEMBLY */ #include -#include - -#define __addr_to_pcpu_ptr(addr) \ - (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ - + (unsigned long)__per_cpu_start) -#define __pcpu_ptr_to_addr(ptr) \ - (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ - - (unsigned long)__per_cpu_start) #ifdef CONFIG_SMP #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x -- cgit v1.2.3 From 6074d5b0a319fe8400ff079a3c289406ca024321 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 10 Mar 2009 16:27:48 +0900 Subject: percpu: more flexibility for @dyn_size of pcpu_setup_first_chunk() Impact: cleanup, more flexibility for first chunk init Non-negative @dyn_size used to be allowed iff @unit_size wasn't auto. This restriction stemmed from implementation detail and made things a bit less intuitive. This patch allows @dyn_size to be specified regardless of @unit_size and swaps the positions of @dyn_size and @unit_size so that the parameter order makes more sense (static, reserved and dyn sizes followed by enclosing unit_size). While at it, add @unit_size >= PCPU_MIN_UNIT_SIZE sanity check. Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index efa615f2bf4..e41c51f6ada 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -233,8 +233,8 @@ proceed: "%zu bytes\n", vm.addr, static_size); ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, - PERCPU_FIRST_CHUNK_RESERVE, - PMD_SIZE, dyn_size, vm.addr, NULL); + PERCPU_FIRST_CHUNK_RESERVE, dyn_size, + PMD_SIZE, vm.addr, NULL); goto out_free_ar; enomem: @@ -315,9 +315,8 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); return pcpu_setup_first_chunk(pcpue_get_page, static_size, - PERCPU_FIRST_CHUNK_RESERVE, - pcpue_unit_size, dyn_size, - pcpue_ptr, NULL); + PERCPU_FIRST_CHUNK_RESERVE, dyn_size, + pcpue_unit_size, pcpue_ptr, NULL); } /* @@ -375,8 +374,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) pcpu4k_nr_static_pages, static_size); ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, - PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, - pcpu4k_populate_pte); + PERCPU_FIRST_CHUNK_RESERVE, -1, + -1, NULL, pcpu4k_populate_pte); goto out_free_ar; enomem: -- cgit v1.2.3 From 66c3a75772247c31feabefb724e082220a1ab060 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 10 Mar 2009 16:27:48 +0900 Subject: percpu: generalize embedding first chunk setup helper Impact: code reorganization Separate out embedding first chunk setup helper from x86 embedding first chunk allocator and put it in mm/percpu.c. This will be used by the default percpu first chunk allocator and possibly by other archs. Signed-off-by: Tejun Heo --- arch/x86/kernel/setup_percpu.c | 54 +++++------------------------------------- 1 file changed, 6 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index e41c51f6ada..400331b50a5 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -257,31 +257,13 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) * Embedding allocator * * The first chunk is sized to just contain the static area plus - * module and dynamic reserves, and allocated as a contiguous area - * using bootmem allocator and used as-is without being mapped into - * vmalloc area. This enables the first chunk to piggy back on the - * linear physical PMD mapping and doesn't add any additional pressure - * to TLB. Note that if the needed size is smaller than the minimum - * unit size, the leftover is returned to the bootmem allocator. + * module and dynamic reserves and embedded into linear physical + * mapping so that it can use PMD mapping without additional TLB + * pressure. */ -static void *pcpue_ptr __initdata; -static size_t pcpue_size __initdata; -static size_t pcpue_unit_size __initdata; - -static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) -{ - size_t off = (size_t)pageno << PAGE_SHIFT; - - if (off >= pcpue_size) - return NULL; - - return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off); -} - static ssize_t __init setup_pcpu_embed(size_t static_size) { - unsigned int cpu; - size_t dyn_size; + size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; /* * If large page isn't supported, there's no benefit in doing @@ -291,32 +273,8 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) if (!cpu_has_pse || pcpu_need_numa()) return -EINVAL; - /* allocate and copy */ - pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + - PERCPU_DYNAMIC_RESERVE); - pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); - dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; - - pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, - PAGE_SIZE); - if (!pcpue_ptr) - return -ENOMEM; - - for_each_possible_cpu(cpu) { - void *ptr = pcpue_ptr + cpu * pcpue_unit_size; - - free_bootmem(__pa(ptr + pcpue_size), - pcpue_unit_size - pcpue_size); - memcpy(ptr, __per_cpu_load, static_size); - } - - /* we're ready, commit */ - pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", - pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); - - return pcpu_setup_first_chunk(pcpue_get_page, static_size, - PERCPU_FIRST_CHUNK_RESERVE, dyn_size, - pcpue_unit_size, pcpue_ptr, NULL); + return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, + reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); } /* -- cgit v1.2.3 From ae68df5635a191c7edb75f5c1c1406353cb24a9f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 10 Mar 2009 17:00:48 +0900 Subject: sh: Generate uImage by default on Urquell board. Signed-off-by: Paul Mundt --- arch/sh/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 4067b0d9287..bece1f7535f 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -80,6 +80,7 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment \ defaultimage-$(CONFIG_SUPERH32) := zImage defaultimage-$(CONFIG_SH_SH7785LCR) := uImage defaultimage-$(CONFIG_SH_RSK) := uImage +defaultimage-$(CONFIG_SH_URQUELL) := uImage defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux -- cgit v1.2.3 From 71b973a42c5456824c8712e00659d9616d395919 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Tue, 10 Mar 2009 17:26:49 +0900 Subject: sh: dma-sh updates for multi IRQ and new SH-4A CPUs. This adds DMA support for newer SH-4A CPUs, particularly SH7763/64/80/85. This also enables multi IRQ support for platforms that have multiple vectors bound to the same IRQ source. Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/drivers/dma/Kconfig | 8 +- arch/sh/drivers/dma/dma-sh.c | 172 +++++++++++++++++++++---------- arch/sh/drivers/dma/dma-sh.h | 75 -------------- arch/sh/include/asm/dma-sh.h | 117 +++++++++++++++++++++ arch/sh/include/asm/dma.h | 4 +- arch/sh/include/cpu-sh3/cpu/dma.h | 17 ++- arch/sh/include/cpu-sh4/cpu/dma-sh4a.h | 94 +++++++++++++++++ arch/sh/include/cpu-sh4/cpu/dma-sh7780.h | 39 ------- arch/sh/include/cpu-sh4/cpu/dma.h | 30 +++--- 9 files changed, 352 insertions(+), 204 deletions(-) delete mode 100644 arch/sh/drivers/dma/dma-sh.h create mode 100644 arch/sh/include/asm/dma-sh.h create mode 100644 arch/sh/include/cpu-sh4/cpu/dma-sh4a.h delete mode 100644 arch/sh/include/cpu-sh4/cpu/dma-sh7780.h (limited to 'arch') diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig index 01936368b8b..57d95fce046 100644 --- a/arch/sh/drivers/dma/Kconfig +++ b/arch/sh/drivers/dma/Kconfig @@ -12,10 +12,10 @@ config SH_DMA config NR_ONCHIP_DMA_CHANNELS int depends on SH_DMA - default "6" if CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721 - default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R - default "12" if CPU_SUBTYPE_SH7780 - default "4" + default "4" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7751 || CPU_SUBTYPE_SH7750S + default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R || CPU_SUBTYPE_SH7760 + default "12" if CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 + default "6" help This allows you to specify the number of channels that the on-chip DMAC supports. This will be 4 for SH7750/SH7751 and 8 for the diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 50887a592dd..ab7b18dcbab 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -17,28 +17,23 @@ #include #include #include -#include "dma-sh.h" - -static int dmte_irq_map[] = { - DMTE0_IRQ, - DMTE1_IRQ, - DMTE2_IRQ, - DMTE3_IRQ, -#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7760) || \ - defined(CONFIG_CPU_SUBTYPE_SH7709) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) - DMTE4_IRQ, - DMTE5_IRQ, +#include + +#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ + defined(CONFIG_CPU_SUBTYPE_SH7764) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) +#define DMAC_IRQ_MULTI 1 #endif -#if defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7760) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) - DMTE6_IRQ, - DMTE7_IRQ, + +#if defined(DMAE1_IRQ) +#define NR_DMAE 2 +#else +#define NR_DMAE 1 #endif + +static const char *dmae_name[] = { + "DMAC Address Error0", "DMAC Address Error1" }; static inline unsigned int get_dmte_irq(unsigned int chan) @@ -46,7 +41,14 @@ static inline unsigned int get_dmte_irq(unsigned int chan) unsigned int irq = 0; if (chan < ARRAY_SIZE(dmte_irq_map)) irq = dmte_irq_map[chan]; + +#if defined(DMAC_IRQ_MULTI) + if (irq > DMTE6_IRQ) + return DMTE6_IRQ; + return DMTE0_IRQ; +#else return irq; +#endif } /* @@ -59,7 +61,7 @@ static inline unsigned int get_dmte_irq(unsigned int chan) */ static inline unsigned int calc_xmit_shift(struct dma_channel *chan) { - u32 chcr = ctrl_inl(CHCR[chan->chan]); + u32 chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT]; } @@ -75,13 +77,13 @@ static irqreturn_t dma_tei(int irq, void *dev_id) struct dma_channel *chan = dev_id; u32 chcr; - chcr = ctrl_inl(CHCR[chan->chan]); + chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); if (!(chcr & CHCR_TE)) return IRQ_NONE; chcr &= ~(CHCR_IE | CHCR_DE); - ctrl_outl(chcr, CHCR[chan->chan]); + ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR)); wake_up(&chan->wait_queue); @@ -94,7 +96,12 @@ static int sh_dmac_request_dma(struct dma_channel *chan) return 0; return request_irq(get_dmte_irq(chan->chan), dma_tei, - IRQF_DISABLED, chan->dev_id, chan); +#if defined(DMAC_IRQ_MULTI) + IRQF_SHARED, +#else + IRQF_DISABLED, +#endif + chan->dev_id, chan); } static void sh_dmac_free_dma(struct dma_channel *chan) @@ -115,7 +122,7 @@ sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) chan->flags &= ~DMA_TEI_CAPABLE; } - ctrl_outl(chcr, CHCR[chan->chan]); + ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR)); chan->flags |= DMA_CONFIGURED; return 0; @@ -126,13 +133,13 @@ static void sh_dmac_enable_dma(struct dma_channel *chan) int irq; u32 chcr; - chcr = ctrl_inl(CHCR[chan->chan]); + chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); chcr |= CHCR_DE; if (chan->flags & DMA_TEI_CAPABLE) chcr |= CHCR_IE; - ctrl_outl(chcr, CHCR[chan->chan]); + ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR)); if (chan->flags & DMA_TEI_CAPABLE) { irq = get_dmte_irq(chan->chan); @@ -150,9 +157,9 @@ static void sh_dmac_disable_dma(struct dma_channel *chan) disable_irq(irq); } - chcr = ctrl_inl(CHCR[chan->chan]); + chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); - ctrl_outl(chcr, CHCR[chan->chan]); + ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR)); } static int sh_dmac_xfer_dma(struct dma_channel *chan) @@ -183,12 +190,13 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan) */ if (chan->sar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) - ctrl_outl(chan->sar, SAR[chan->chan]); + ctrl_outl(chan->sar, (dma_base_addr[chan->chan]+SAR)); if (chan->dar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) - ctrl_outl(chan->dar, DAR[chan->chan]); + ctrl_outl(chan->dar, (dma_base_addr[chan->chan] + DAR)); - ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]); + ctrl_outl(chan->count >> calc_xmit_shift(chan), + (dma_base_addr[chan->chan] + TCR)); sh_dmac_enable_dma(chan); @@ -197,36 +205,26 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan) static int sh_dmac_get_dma_residue(struct dma_channel *chan) { - if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE)) + if (!(ctrl_inl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE)) return 0; - return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan); + return ctrl_inl(dma_base_addr[chan->chan] + TCR) + << calc_xmit_shift(chan); } -#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7709) -#define dmaor_read_reg() ctrl_inw(DMAOR) -#define dmaor_write_reg(data) ctrl_outw(data, DMAOR) -#else -#define dmaor_read_reg() ctrl_inl(DMAOR) -#define dmaor_write_reg(data) ctrl_outl(data, DMAOR) -#endif - -static inline int dmaor_reset(void) +static inline int dmaor_reset(int no) { - unsigned long dmaor = dmaor_read_reg(); + unsigned long dmaor = dmaor_read_reg(no); /* Try to clear the error flags first, incase they are set */ dmaor &= ~(DMAOR_NMIF | DMAOR_AE); - dmaor_write_reg(dmaor); + dmaor_write_reg(no, dmaor); dmaor |= DMAOR_INIT; - dmaor_write_reg(dmaor); + dmaor_write_reg(no, dmaor); /* See if we got an error again */ - if ((dmaor_read_reg() & (DMAOR_AE | DMAOR_NMIF))) { + if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) { printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); return -EINVAL; } @@ -237,10 +235,33 @@ static inline int dmaor_reset(void) #if defined(CONFIG_CPU_SH4) static irqreturn_t dma_err(int irq, void *dummy) { - dmaor_reset(); +#if defined(DMAC_IRQ_MULTI) + int cnt = 0; + switch (irq) { +#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) + case DMTE6_IRQ: + cnt++; +#endif + case DMTE0_IRQ: + if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { + disable_irq(irq); + /* DMA multi and error IRQ */ + return IRQ_HANDLED; + } + default: + return IRQ_NONE; + } +#else + dmaor_reset(0); +#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) + dmaor_reset(1); +#endif disable_irq(irq); return IRQ_HANDLED; +#endif } #endif @@ -259,24 +280,57 @@ static struct dma_info sh_dmac_info = { .flags = DMAC_CHANNELS_TEI_CAPABLE, }; +static unsigned int get_dma_error_irq(int n) +{ +#if defined(DMAC_IRQ_MULTI) + return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6); +#else + return (n == 0) ? DMAE0_IRQ : +#if defined(DMAE1_IRQ) + DMAE1_IRQ; +#else + -1; +#endif +#endif +} + static int __init sh_dmac_init(void) { struct dma_info *info = &sh_dmac_info; int i; #ifdef CONFIG_CPU_SH4 - i = request_irq(DMAE_IRQ, dma_err, IRQF_DISABLED, "DMAC Address Error", 0); - if (unlikely(i < 0)) - return i; + int n; + + for (n = 0; n < NR_DMAE; n++) { + i = request_irq(get_dma_error_irq(n), dma_err, +#if defined(DMAC_IRQ_MULTI) + IRQF_SHARED, +#else + IRQF_DISABLED, #endif + dmae_name[n], (void *)dmae_name[n]); + if (unlikely(i < 0)) { + printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); + return i; + } + } +#endif /* CONFIG_CPU_SH4 */ /* * Initialize DMAOR, and clean up any error flags that may have * been set. */ - i = dmaor_reset(); + i = dmaor_reset(0); + if (unlikely(i != 0)) + return i; +#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) + i = dmaor_reset(1); if (unlikely(i != 0)) return i; +#endif return register_dmac(info); } @@ -284,8 +338,12 @@ static int __init sh_dmac_init(void) static void __exit sh_dmac_exit(void) { #ifdef CONFIG_CPU_SH4 - free_irq(DMAE_IRQ, 0); -#endif + int n; + + for (n = 0; n < NR_DMAE; n++) { + free_irq(get_dma_error_irq(n), (void *)dmae_name[n]); + } +#endif /* CONFIG_CPU_SH4 */ unregister_dmac(&sh_dmac_info); } diff --git a/arch/sh/drivers/dma/dma-sh.h b/arch/sh/drivers/dma/dma-sh.h deleted file mode 100644 index 05fecd5428e..00000000000 --- a/arch/sh/drivers/dma/dma-sh.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * arch/sh/drivers/dma/dma-sh.h - * - * Copyright (C) 2000 Takashi YOSHII - * Copyright (C) 2003 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __DMA_SH_H -#define __DMA_SH_H - -#include - -/* Definitions for the SuperH DMAC */ -#define REQ_L 0x00000000 -#define REQ_E 0x00080000 -#define RACK_H 0x00000000 -#define RACK_L 0x00040000 -#define ACK_R 0x00000000 -#define ACK_W 0x00020000 -#define ACK_H 0x00000000 -#define ACK_L 0x00010000 -#define DM_INC 0x00004000 -#define DM_DEC 0x00008000 -#define SM_INC 0x00001000 -#define SM_DEC 0x00002000 -#define RS_IN 0x00000200 -#define RS_OUT 0x00000300 -#define TS_BLK 0x00000040 -#define TM_BUR 0x00000020 -#define CHCR_DE 0x00000001 -#define CHCR_TE 0x00000002 -#define CHCR_IE 0x00000004 - -/* DMAOR definitions */ -#define DMAOR_AE 0x00000004 -#define DMAOR_NMIF 0x00000002 -#define DMAOR_DME 0x00000001 - -/* - * Define the default configuration for dual address memory-memory transfer. - * The 0x400 value represents auto-request, external->external. - */ -#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32) - -#define MAX_DMAC_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) - -/* - * Subtypes that have fewer channels than this simply need to change - * CONFIG_NR_ONCHIP_DMA_CHANNELS. Likewise, subtypes with a larger number - * of channels should expand on this. - * - * For most subtypes we can easily figure these values out with some - * basic calculation, unfortunately on other subtypes these are more - * scattered, so we just leave it unrolled for simplicity. - */ -#define SAR ((unsigned long[]){SH_DMAC_BASE + 0x00, SH_DMAC_BASE + 0x10, \ - SH_DMAC_BASE + 0x20, SH_DMAC_BASE + 0x30, \ - SH_DMAC_BASE + 0x50, SH_DMAC_BASE + 0x60}) -#define DAR ((unsigned long[]){SH_DMAC_BASE + 0x04, SH_DMAC_BASE + 0x14, \ - SH_DMAC_BASE + 0x24, SH_DMAC_BASE + 0x34, \ - SH_DMAC_BASE + 0x54, SH_DMAC_BASE + 0x64}) -#define DMATCR ((unsigned long[]){SH_DMAC_BASE + 0x08, SH_DMAC_BASE + 0x18, \ - SH_DMAC_BASE + 0x28, SH_DMAC_BASE + 0x38, \ - SH_DMAC_BASE + 0x58, SH_DMAC_BASE + 0x68}) -#define CHCR ((unsigned long[]){SH_DMAC_BASE + 0x0c, SH_DMAC_BASE + 0x1c, \ - SH_DMAC_BASE + 0x2c, SH_DMAC_BASE + 0x3c, \ - SH_DMAC_BASE + 0x5c, SH_DMAC_BASE + 0x6c}) - -#define DMAOR (SH_DMAC_BASE + 0x40) - -#endif /* __DMA_SH_H */ - diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h new file mode 100644 index 00000000000..e873ecaa506 --- /dev/null +++ b/arch/sh/include/asm/dma-sh.h @@ -0,0 +1,117 @@ +/* + * arch/sh/include/asm/dma-sh.h + * + * Copyright (C) 2000 Takashi YOSHII + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __DMA_SH_H +#define __DMA_SH_H + +#include + +/* DMAOR contorl: The DMAOR access size is different by CPU.*/ +#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) +#define dmaor_read_reg(n) \ + (n ? ctrl_inw(SH_DMAC_BASE1 + DMAOR) \ + : ctrl_inw(SH_DMAC_BASE0 + DMAOR)) +#define dmaor_write_reg(n, data) \ + (n ? ctrl_outw(data, SH_DMAC_BASE1 + DMAOR) \ + : ctrl_outw(data, SH_DMAC_BASE0 + DMAOR)) +#else /* Other CPU */ +#define dmaor_read_reg(n) ctrl_inw(SH_DMAC_BASE0 + DMAOR) +#define dmaor_write_reg(n, data) ctrl_outw(data, SH_DMAC_BASE0 + DMAOR) +#endif + +static int dmte_irq_map[] __maybe_unused = { +#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 4) + DMTE0_IRQ, + DMTE0_IRQ + 1, + DMTE0_IRQ + 2, + DMTE0_IRQ + 3, +#endif +#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 6) + DMTE4_IRQ, + DMTE4_IRQ + 1, +#endif +#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 8) + DMTE6_IRQ, + DMTE6_IRQ + 1, +#endif +#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 12) + DMTE8_IRQ, + DMTE9_IRQ, + DMTE10_IRQ, + DMTE11_IRQ, +#endif +}; + +/* Definitions for the SuperH DMAC */ +#define REQ_L 0x00000000 +#define REQ_E 0x00080000 +#define RACK_H 0x00000000 +#define RACK_L 0x00040000 +#define ACK_R 0x00000000 +#define ACK_W 0x00020000 +#define ACK_H 0x00000000 +#define ACK_L 0x00010000 +#define DM_INC 0x00004000 +#define DM_DEC 0x00008000 +#define SM_INC 0x00001000 +#define SM_DEC 0x00002000 +#define RS_IN 0x00000200 +#define RS_OUT 0x00000300 +#define TS_BLK 0x00000040 +#define TM_BUR 0x00000020 +#define CHCR_DE 0x00000001 +#define CHCR_TE 0x00000002 +#define CHCR_IE 0x00000004 + +/* DMAOR definitions */ +#define DMAOR_AE 0x00000004 +#define DMAOR_NMIF 0x00000002 +#define DMAOR_DME 0x00000001 + +/* + * Define the default configuration for dual address memory-memory transfer. + * The 0x400 value represents auto-request, external->external. + */ +#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32) + +/* DMA base address */ +static u32 dma_base_addr[] __maybe_unused = { +#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 4) + SH_DMAC_BASE0 + 0x00, /* channel 0 */ + SH_DMAC_BASE0 + 0x10, + SH_DMAC_BASE0 + 0x20, + SH_DMAC_BASE0 + 0x30, +#endif +#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 6) + SH_DMAC_BASE0 + 0x50, + SH_DMAC_BASE0 + 0x60, +#endif +#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 8) + SH_DMAC_BASE1 + 0x00, + SH_DMAC_BASE1 + 0x10, +#endif +#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 12) + SH_DMAC_BASE1 + 0x20, + SH_DMAC_BASE1 + 0x30, + SH_DMAC_BASE1 + 0x50, + SH_DMAC_BASE1 + 0x60, /* channel 11 */ +#endif +}; + +/* DMA register */ +#define SAR 0x00 +#define DAR 0x04 +#define TCR 0x08 +#define CHCR 0x0C +#define DMAOR 0x40 + +#endif /* __DMA_SH_H */ diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h index beca7128e2a..6bd17847387 100644 --- a/arch/sh/include/asm/dma.h +++ b/arch/sh/include/asm/dma.h @@ -25,9 +25,9 @@ #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x10000000) #ifdef CONFIG_NR_DMA_CHANNELS -# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) +# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) #else -# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) +# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) #endif /* diff --git a/arch/sh/include/cpu-sh3/cpu/dma.h b/arch/sh/include/cpu-sh3/cpu/dma.h index 6813c3220a1..0ea15f3f236 100644 --- a/arch/sh/include/cpu-sh3/cpu/dma.h +++ b/arch/sh/include/cpu-sh3/cpu/dma.h @@ -1,22 +1,17 @@ #ifndef __ASM_CPU_SH3_DMA_H #define __ASM_CPU_SH3_DMA_H - #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) -#define SH_DMAC_BASE 0xa4010020 -#else -#define SH_DMAC_BASE 0xa4000020 + defined(CONFIG_CPU_SUBTYPE_SH7721) || \ + defined(CONFIG_CPU_SUBTYPE_SH7710) || \ + defined(CONFIG_CPU_SUBTYPE_SH7712) +#define SH_DMAC_BASE0 0xa4010020 +#else /* SH7705/06/07/09 */ +#define SH_DMAC_BASE0 0xa4000020 #endif -#if defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7709) #define DMTE0_IRQ 48 -#define DMTE1_IRQ 49 -#define DMTE2_IRQ 50 -#define DMTE3_IRQ 51 #define DMTE4_IRQ 76 -#define DMTE5_IRQ 77 -#endif /* Definitions for the SuperH DMAC */ #define TM_BURST 0x00000020 diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h new file mode 100644 index 00000000000..0ed5178fed6 --- /dev/null +++ b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h @@ -0,0 +1,94 @@ +#ifndef __ASM_SH_CPU_SH4_DMA_SH7780_H +#define __ASM_SH_CPU_SH4_DMA_SH7780_H + +#if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ + defined(CONFIG_CPU_SUBTYPE_SH7722) || \ + defined(CONFIG_CPU_SUBTYPE_SH7730) +#define DMTE0_IRQ 48 +#define DMTE4_IRQ 76 +#define DMAE0_IRQ 78 /* DMA Error IRQ*/ +#define SH_DMAC_BASE0 0xFE008020 +#define SH_DMARS_BASE 0xFE009000 +#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ + defined(CONFIG_CPU_SUBTYPE_SH7764) +#define DMTE0_IRQ 34 +#define DMTE4_IRQ 44 +#define DMAE0_IRQ 38 +#define SH_DMAC_BASE0 0xFF608020 +#define SH_DMARS_BASE 0xFF609000 +#elif defined(CONFIG_CPU_SUBTYPE_SH7723) +#define DMTE0_IRQ 48 /* DMAC0A*/ +#define DMTE4_IRQ 40 /* DMAC0B */ +#define DMTE6_IRQ 42 +#define DMTE8_IRQ 76 /* DMAC1A */ +#define DMTE9_IRQ 77 +#define DMTE10_IRQ 72 /* DMAC1B */ +#define DMTE11_IRQ 73 +#define DMAE0_IRQ 78 /* DMA Error IRQ*/ +#define DMAE1_IRQ 74 /* DMA Error IRQ*/ +#define SH_DMAC_BASE0 0xFE008020 +#define SH_DMAC_BASE1 0xFDC08020 +#define SH_DMARS_BASE 0xFDC09000 +#elif defined(CONFIG_CPU_SUBTYPE_SH7780) +#define DMTE0_IRQ 34 +#define DMTE4_IRQ 44 +#define DMTE6_IRQ 46 +#define DMTE8_IRQ 92 +#define DMTE9_IRQ 93 +#define DMTE10_IRQ 94 +#define DMTE11_IRQ 95 +#define DMAE0_IRQ 38 /* DMA Error IRQ */ +#define SH_DMAC_BASE0 0xFC808020 +#define SH_DMAC_BASE1 0xFC818020 +#define SH_DMARS_BASE 0xFC809000 +#else /* SH7785 */ +#define DMTE0_IRQ 33 +#define DMTE4_IRQ 37 +#define DMTE6_IRQ 52 +#define DMTE8_IRQ 54 +#define DMTE9_IRQ 55 +#define DMTE10_IRQ 56 +#define DMTE11_IRQ 57 +#define DMAE0_IRQ 39 /* DMA Error IRQ0 */ +#define DMAE1_IRQ 58 /* DMA Error IRQ1 */ +#define SH_DMAC_BASE0 0xFC808020 +#define SH_DMAC_BASE1 0xFCC08020 +#define SH_DMARS_BASE 0xFC809000 +#endif + +#define REQ_HE 0x000000C0 +#define REQ_H 0x00000080 +#define REQ_LE 0x00000040 +#define TM_BURST 0x0000020 +#define TS_8 0x00000000 +#define TS_16 0x00000008 +#define TS_32 0x00000010 +#define TS_16BLK 0x00000018 +#define TS_32BLK 0x00100000 + +/* + * The SuperH DMAC supports a number of transmit sizes, we list them here, + * with their respective values as they appear in the CHCR registers. + * + * Defaults to a 64-bit transfer size. + */ +enum { + XMIT_SZ_8BIT, + XMIT_SZ_16BIT, + XMIT_SZ_32BIT, + XMIT_SZ_128BIT, + XMIT_SZ_256BIT, +}; + +/* + * The DMA count is defined as the number of bytes to transfer. + */ +static unsigned int ts_shift[] __maybe_unused = { + [XMIT_SZ_8BIT] = 0, + [XMIT_SZ_16BIT] = 1, + [XMIT_SZ_32BIT] = 2, + [XMIT_SZ_128BIT] = 4, + [XMIT_SZ_256BIT] = 5, +}; + +#endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh7780.h b/arch/sh/include/cpu-sh4/cpu/dma-sh7780.h deleted file mode 100644 index 71b426a6e48..00000000000 --- a/arch/sh/include/cpu-sh4/cpu/dma-sh7780.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef __ASM_SH_CPU_SH4_DMA_SH7780_H -#define __ASM_SH_CPU_SH4_DMA_SH7780_H - -#define REQ_HE 0x000000C0 -#define REQ_H 0x00000080 -#define REQ_LE 0x00000040 -#define TM_BURST 0x0000020 -#define TS_8 0x00000000 -#define TS_16 0x00000008 -#define TS_32 0x00000010 -#define TS_16BLK 0x00000018 -#define TS_32BLK 0x00100000 - -/* - * The SuperH DMAC supports a number of transmit sizes, we list them here, - * with their respective values as they appear in the CHCR registers. - * - * Defaults to a 64-bit transfer size. - */ -enum { - XMIT_SZ_8BIT, - XMIT_SZ_16BIT, - XMIT_SZ_32BIT, - XMIT_SZ_128BIT, - XMIT_SZ_256BIT, -}; - -/* - * The DMA count is defined as the number of bytes to transfer. - */ -static unsigned int ts_shift[] __maybe_unused = { - [XMIT_SZ_8BIT] = 0, - [XMIT_SZ_16BIT] = 1, - [XMIT_SZ_32BIT] = 2, - [XMIT_SZ_128BIT] = 4, - [XMIT_SZ_256BIT] = 5, -}; - -#endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/dma.h b/arch/sh/include/cpu-sh4/cpu/dma.h index 235b7cd1fc9..bcb30246e85 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma.h +++ b/arch/sh/include/cpu-sh4/cpu/dma.h @@ -1,31 +1,29 @@ #ifndef __ASM_CPU_SH4_DMA_H #define __ASM_CPU_SH4_DMA_H -#define DMAOR_INIT ( 0x8000 | DMAOR_DME ) - /* SH7751/7760/7780 DMA IRQ sources */ -#define DMTE0_IRQ 34 -#define DMTE1_IRQ 35 -#define DMTE2_IRQ 36 -#define DMTE3_IRQ 37 -#define DMTE4_IRQ 44 -#define DMTE5_IRQ 45 -#define DMTE6_IRQ 46 -#define DMTE7_IRQ 47 -#define DMAE_IRQ 38 #ifdef CONFIG_CPU_SH4A -#define SH_DMAC_BASE 0xfc808020 +#define DMAOR_INIT (DMAOR_DME) #define CHCR_TS_MASK 0x18 #define CHCR_TS_SHIFT 3 -#include -#else -#define SH_DMAC_BASE 0xffa00000 +#include +#else /* CONFIG_CPU_SH4A */ +/* + * SH7750/SH7751/SH7760 + */ +#define DMTE0_IRQ 34 +#define DMTE4_IRQ 44 +#define DMTE6_IRQ 46 +#define DMAE0_IRQ 38 +#define DMAOR_INIT (0x8000|DMAOR_DME) +#define SH_DMAC_BASE0 0xffa00000 +#define SH_DMAC_BASE1 0xffa00070 /* Definitions for the SuperH DMAC */ -#define TM_BURST 0x0000080 +#define TM_BURST 0x00000080 #define TS_8 0x00000010 #define TS_16 0x00000020 #define TS_32 0x00000030 -- cgit v1.2.3 From f24ade3a3332811a512ed3b6c6aa69486719b1d8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 10 Mar 2009 19:02:30 +0100 Subject: x86, sched_clock(): mark variables read-mostly Impact: micro-optimization There's a number of variables in the sched_clock() path that are in .data/.bss - but not marked __read_mostly. This creates the danger of accidental false cacheline sharing with some other, write-often variable. So mark them __read_mostly. Signed-off-by: Ingo Molnar --- arch/x86/kernel/tsc.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 599e5816863..9c8b71531ca 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -17,20 +17,21 @@ #include #include -unsigned int cpu_khz; /* TSC clocks / usec, not used here */ +unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); -unsigned int tsc_khz; + +unsigned int __read_mostly tsc_khz; EXPORT_SYMBOL(tsc_khz); /* * TSC can be unstable due to cpufreq or due to unsynced TSCs */ -static int tsc_unstable; +static int __read_mostly tsc_unstable; /* native_sched_clock() is called before tsc_init(), so we must start with the TSC soft disabled to prevent erroneous rdtsc usage on !cpu_has_tsc processors */ -static int tsc_disabled = -1; +static int __read_mostly tsc_disabled = -1; static int tsc_clocksource_reliable; /* -- cgit v1.2.3 From f455dfb106916d855d59686fe16575c2ceb2cb2a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 Mar 2009 19:51:07 +0000 Subject: ASoC: Fix up merge with the ARM tree The same change has been made with the final lines in slightly differnet orders. Signed-off-by: Mark Brown --- arch/arm/mach-shark/include/mach/io.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/mach-shark/include/mach/io.h b/arch/arm/mach-shark/include/mach/io.h index 8ca7d7f09bd..568daea93fb 100644 --- a/arch/arm/mach-shark/include/mach/io.h +++ b/arch/arm/mach-shark/include/mach/io.h @@ -15,6 +15,7 @@ #define IO_SPACE_LIMIT 0xffffffff #define __io(a) __typesafe_io(PCIO_BASE + (a)) + #define __mem_pci(addr) (addr) #endif -- cgit v1.2.3 From d680c76eccd9222031ee30dcee5fdedba2467610 Mon Sep 17 00:00:00 2001 From: Francesco VIRLINZI Date: Wed, 11 Mar 2009 07:40:54 +0000 Subject: sh: clkfwk: add clk_set_parent/clk_get_parent This patch adds the clk_set_parent/clk_get_parent routines to the sh clock framework. Signed-off-by: Francesco Virlinzi Signed-off-by: Paul Mundt --- arch/sh/include/asm/clock.h | 1 + arch/sh/kernel/cpu/clock.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'arch') diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h index f9c88583d90..2f6c9627bc1 100644 --- a/arch/sh/include/asm/clock.h +++ b/arch/sh/include/asm/clock.h @@ -15,6 +15,7 @@ struct clk_ops { void (*disable)(struct clk *clk); void (*recalc)(struct clk *clk); int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); + int (*set_parent)(struct clk *clk, struct clk *parent); long (*round_rate)(struct clk *clk, unsigned long rate); }; diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index 7b17137536d..332a1798547 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c @@ -239,6 +239,35 @@ void clk_recalc_rate(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_recalc_rate); +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + int ret = -EINVAL; + struct clk *old; + + if (!parent || !clk) + return ret; + + old = clk->parent; + if (likely(clk->ops && clk->ops->set_parent)) { + unsigned long flags; + spin_lock_irqsave(&clock_lock, flags); + ret = clk->ops->set_parent(clk, parent); + spin_unlock_irqrestore(&clock_lock, flags); + clk->parent = (ret ? old : parent); + } + + if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) + propagate_rate(clk); + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ + return clk->parent; +} +EXPORT_SYMBOL_GPL(clk_get_parent); + long clk_round_rate(struct clk *clk, unsigned long rate) { if (likely(clk->ops && clk->ops->round_rate)) { -- cgit v1.2.3 From 4a55026fd7a08074676e87932578ff9e327e82a3 Mon Sep 17 00:00:00 2001 From: Francesco VIRLINZI Date: Wed, 11 Mar 2009 07:42:05 +0000 Subject: sh: clkfwk: Add resume from hibernation support. This patch adds PM support to the clock framework. With this, resume from hibernation is properly supported. Signed-off-by: Francesco Virlinzi Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/clock.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index 332a1798547..3209a8740fa 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include #include @@ -358,6 +360,68 @@ static int show_clocks(char *buf, char **start, off_t off, return p - buf; } +#ifdef CONFIG_PM +static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) +{ + static pm_message_t prev_state; + struct clk *clkp; + + switch (state.event) { + case PM_EVENT_ON: + /* Resumeing from hibernation */ + if (prev_state.event == PM_EVENT_FREEZE) { + list_for_each_entry(clkp, &clock_list, node) + if (likely(clkp->ops)) { + if (likely(clkp->ops->set_parent)) + clkp->ops->set_parent(clkp, + clkp->parent); + if (likely(clkp->ops->set_rate)) + clkp->ops->set_rate(clkp, + clkp->rate, NO_CHANGE); + else if (likely(clkp->ops->recalc)) + clkp->ops->recalc(clkp); + } + } + break; + case PM_EVENT_FREEZE: + break; + case PM_EVENT_SUSPEND: + break; + } + + prev_state = state; + return 0; +} + +static int clks_sysdev_resume(struct sys_device *dev) +{ + return clks_sysdev_suspend(dev, PMSG_ON); +} + +static struct sysdev_class clks_sysdev_class = { + .name = "clks", +}; + +static struct sysdev_driver clks_sysdev_driver = { + .suspend = clks_sysdev_suspend, + .resume = clks_sysdev_resume, +}; + +static struct sys_device clks_sysdev_dev = { + .cls = &clks_sysdev_class, +}; + +static int __init clk_sysdev_init(void) +{ + sysdev_class_register(&clks_sysdev_class); + sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); + sysdev_register(&clks_sysdev_dev); + + return 0; +} +subsys_initcall(clk_sysdev_init); +#endif + int __init clk_init(void) { int i, ret = 0; -- cgit v1.2.3 From 600fa578a95f65bc1f2a03210d3d418747024b43 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 11 Mar 2009 08:14:26 +0000 Subject: sh: improve sh7785lcr power off code Improve the sh7785lcr power off implementation to never return. It takes some time before the board is actually powered off, just hang after asking the harware to power down. This removes the serial port garbage printout. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/board-sh7785lcr.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c index 6746a5fba3a..94c0296bc35 100644 --- a/arch/sh/boards/board-sh7785lcr.c +++ b/arch/sh/boards/board-sh7785lcr.c @@ -284,6 +284,9 @@ static void sh7785lcr_power_off(void) } *p = 0x01; iounmap(p); + set_bl_bit(); + while (1) + cpu_relax(); } /* Initialize the board */ -- cgit v1.2.3 From a2b03461cb072eb6097a55ec0289294b09382284 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 11 Mar 2009 11:02:33 +0000 Subject: [ARM] Revert extraneous changes from the S3C audio header move These changes were included in the S3C audio header move but are not directly related to it. Signed-off-by: Mark Brown --- arch/arm/mach-s3c2410/include/mach/hardware.h | 3 +++ arch/arm/mach-shark/include/mach/io.h | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-s3c2410/include/mach/hardware.h b/arch/arm/mach-s3c2410/include/mach/hardware.h index db72beb61d7..74d5a1a4024 100644 --- a/arch/arm/mach-s3c2410/include/mach/hardware.h +++ b/arch/arm/mach-s3c2410/include/mach/hardware.h @@ -131,4 +131,7 @@ extern int s3c2412_gpio_set_sleepcfg(unsigned int pin, unsigned int state); /* machine specific hardware definitions should go after this */ +/* currently here until moved into config (todo) */ +#define CONFIG_NO_MULTIWORD_IO + #endif /* __ASM_ARCH_HARDWARE_H */ diff --git a/arch/arm/mach-shark/include/mach/io.h b/arch/arm/mach-shark/include/mach/io.h index 8ca7d7f09bd..c5cee829fc8 100644 --- a/arch/arm/mach-shark/include/mach/io.h +++ b/arch/arm/mach-shark/include/mach/io.h @@ -14,7 +14,7 @@ #define PCIO_BASE 0xe0000000 #define IO_SPACE_LIMIT 0xffffffff -#define __io(a) __typesafe_io(PCIO_BASE + (a)) +#define __io(a) ((void __iomem *)(PCIO_BASE + (a))) #define __mem_pci(addr) (addr) #endif -- cgit v1.2.3 From 603b6fd5b8d313a109d3739d8706ee51962ff402 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 11 Mar 2009 18:28:24 +0000 Subject: [ARM] Revert futher extraneous changes from the S3C header move Can't see any immediate need for these; build tested. Signed-off-by: Mark Brown --- arch/arm/mach-s3c2410/include/mach/io.h | 2 +- arch/arm/plat-s3c24xx/clock-dclk.c | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-s3c2410/include/mach/io.h b/arch/arm/mach-s3c2410/include/mach/io.h index c477771c092..9813dbf2ae4 100644 --- a/arch/arm/mach-s3c2410/include/mach/io.h +++ b/arch/arm/mach-s3c2410/include/mach/io.h @@ -9,7 +9,7 @@ #ifndef __ASM_ARM_ARCH_IO_H #define __ASM_ARM_ARCH_IO_H -#include +#include #define IO_SPACE_LIMIT 0xffffffff diff --git a/arch/arm/plat-s3c24xx/clock-dclk.c b/arch/arm/plat-s3c24xx/clock-dclk.c index 35219dcf9f0..5b75a797b5a 100644 --- a/arch/arm/plat-s3c24xx/clock-dclk.c +++ b/arch/arm/plat-s3c24xx/clock-dclk.c @@ -18,7 +18,6 @@ #include #include -#include #include #include -- cgit v1.2.3 From f507cd22035fdadd5dbb476dd05e9e7ee21c3b84 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 6 Mar 2009 02:54:09 +0000 Subject: ps3/block: Replace mtd/ps3vram by block/ps3vram Convert the PS3 Video RAM Storage Driver from an MTD driver to a plain block device driver. The ps3vram driver exposes unused video RAM on the PS3 as a block device suitable for storage or swap. Fast data transfer is achieved using a local cache in system RAM and DMA transfers via the GPU. The new driver is ca. 50% faster for reading, and ca. 10% for writing. Signed-off-by: Geert Uytterhoeven Acked-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/ps3/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig index 920cf7a454b..740ef56a155 100644 --- a/arch/powerpc/platforms/ps3/Kconfig +++ b/arch/powerpc/platforms/ps3/Kconfig @@ -128,6 +128,13 @@ config PS3_FLASH be disabled on the kernel command line using "ps3flash=off", to not allocate this fixed buffer. +config PS3_VRAM + tristate "PS3 Video RAM Storage Driver" + depends on FB_PS3=y && BLOCK && m + help + This driver allows you to use excess PS3 video RAM as volatile + storage or system swap. + config PS3_LPM tristate "PS3 Logical Performance Monitor support" depends on PPC_PS3 -- cgit v1.2.3 From f9847004bf94ba5457738cb5e0f868821864ff50 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 16 Mar 2009 17:52:16 +1000 Subject: m68k: merge the non-MMU and MMU versions of param.h It is trivial to merge the non-MMU and MMU versions of param.h. Without a single file "make headers_install" is broken for m68k (since each of the sub-varients of param.h are not installed). Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/param.h | 25 +++++++++++++++++++++++-- arch/m68k/include/asm/param_mm.h | 22 ---------------------- arch/m68k/include/asm/param_no.h | 22 ---------------------- 3 files changed, 23 insertions(+), 46 deletions(-) delete mode 100644 arch/m68k/include/asm/param_mm.h delete mode 100644 arch/m68k/include/asm/param_no.h (limited to 'arch') diff --git a/arch/m68k/include/asm/param.h b/arch/m68k/include/asm/param.h index 40d1112a458..85c41b75aa7 100644 --- a/arch/m68k/include/asm/param.h +++ b/arch/m68k/include/asm/param.h @@ -1,5 +1,26 @@ +#ifndef _M68K_PARAM_H +#define _M68K_PARAM_H + +#ifdef __KERNEL__ +# define HZ CONFIG_HZ /* Internal kernel timer frequency */ +# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ +# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ +#endif + +#ifndef HZ +#define HZ 100 +#endif + #ifdef __uClinux__ -#include "param_no.h" +#define EXEC_PAGESIZE 4096 #else -#include "param_mm.h" +#define EXEC_PAGESIZE 8192 +#endif + +#ifndef NOGROUP +#define NOGROUP (-1) #endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#endif /* _M68K_PARAM_H */ diff --git a/arch/m68k/include/asm/param_mm.h b/arch/m68k/include/asm/param_mm.h deleted file mode 100644 index 536a2788835..00000000000 --- a/arch/m68k/include/asm/param_mm.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _M68K_PARAM_H -#define _M68K_PARAM_H - -#ifdef __KERNEL__ -# define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ -#endif - -#ifndef HZ -#define HZ 100 -#endif - -#define EXEC_PAGESIZE 8192 - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#endif /* _M68K_PARAM_H */ diff --git a/arch/m68k/include/asm/param_no.h b/arch/m68k/include/asm/param_no.h deleted file mode 100644 index 6044397adb6..00000000000 --- a/arch/m68k/include/asm/param_no.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _M68KNOMMU_PARAM_H -#define _M68KNOMMU_PARAM_H - -#ifdef __KERNEL__ -#define HZ CONFIG_HZ -#define USER_HZ HZ -#define CLOCKS_PER_SEC (USER_HZ) -#endif - -#ifndef HZ -#define HZ 100 -#endif - -#define EXEC_PAGESIZE 4096 - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#endif /* _M68KNOMMU_PARAM_H */ -- cgit v1.2.3 From 9863a0babc2f0d920dfb4ddecf4e5b5342cb986b Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 16 Mar 2009 17:54:49 +1000 Subject: m68k: merge the non-MMU and MMU versions of swab.h It is trivial to merge the non-MMU and MMU versions of swab.h. Without a single file "make headers_install" is broken for m68k (since each of the sub-varients of swab.h are not installed). Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/swab.h | 30 ++++++++++++++++++++++++++---- arch/m68k/include/asm/swab_mm.h | 16 ---------------- arch/m68k/include/asm/swab_no.h | 24 ------------------------ 3 files changed, 26 insertions(+), 44 deletions(-) delete mode 100644 arch/m68k/include/asm/swab_mm.h delete mode 100644 arch/m68k/include/asm/swab_no.h (limited to 'arch') diff --git a/arch/m68k/include/asm/swab.h b/arch/m68k/include/asm/swab.h index 7d7dde1c73e..9e3054ea59e 100644 --- a/arch/m68k/include/asm/swab.h +++ b/arch/m68k/include/asm/swab.h @@ -1,5 +1,27 @@ -#ifdef __uClinux__ -#include "swab_no.h" -#else -#include "swab_mm.h" +#ifndef _M68K_SWAB_H +#define _M68K_SWAB_H + +#include +#include + +#define __SWAB_64_THRU_32__ + +#if defined (__mcfisaaplus__) || defined (__mcfisac__) +static inline __attribute_const__ __u32 __arch_swab32(__u32 val) +{ + __asm__("byterev %0" : "=d" (val) : "0" (val)); + return val; +} + +#define __arch_swab32 __arch_swab32 +#elif !defined(__uClinux__) + +static inline __attribute_const__ __u32 __arch_swab32(__u32 val) +{ + __asm__("rolw #8,%0; swap %0; rolw #8,%0" : "=d" (val) : "0" (val)); + return val; +} +#define __arch_swab32 __arch_swab32 #endif + +#endif /* _M68K_SWAB_H */ diff --git a/arch/m68k/include/asm/swab_mm.h b/arch/m68k/include/asm/swab_mm.h deleted file mode 100644 index 7221e306682..00000000000 --- a/arch/m68k/include/asm/swab_mm.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _M68K_SWAB_H -#define _M68K_SWAB_H - -#include -#include - -#define __SWAB_64_THRU_32__ - -static inline __attribute_const__ __u32 __arch_swab32(__u32 val) -{ - __asm__("rolw #8,%0; swap %0; rolw #8,%0" : "=d" (val) : "0" (val)); - return val; -} -#define __arch_swab32 __arch_swab32 - -#endif /* _M68K_SWAB_H */ diff --git a/arch/m68k/include/asm/swab_no.h b/arch/m68k/include/asm/swab_no.h deleted file mode 100644 index e582257db30..00000000000 --- a/arch/m68k/include/asm/swab_no.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _M68KNOMMU_SWAB_H -#define _M68KNOMMU_SWAB_H - -#include - -#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) -# define __SWAB_64_THRU_32__ -#endif - -#if defined (__mcfisaaplus__) || defined (__mcfisac__) -static inline __attribute_const__ __u32 __arch_swab32(__u32 val) -{ - asm( - "byterev %0" - : "=d" (val) - : "0" (val) - ); - return val; -} - -#define __arch_swab32 __arch_swab32 -#endif - -#endif /* _M68KNOMMU_SWAB_H */ -- cgit v1.2.3 From a05ef46731a1953590062e09a92c2c83ed32a086 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 16 Mar 2009 18:00:25 +1000 Subject: m68k: merge the non-MMU and MMU versions of sigcontext.h It is trivial to merge the non-MMU and MMU versions of sigcontext.h. Without a single file "make headers_install" is broken for m68k (since each of the sub-varients of sigconext.h are not installed). Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/sigcontext.h | 25 ++++++++++++++++++++++--- arch/m68k/include/asm/sigcontext_mm.h | 19 ------------------- arch/m68k/include/asm/sigcontext_no.h | 17 ----------------- 3 files changed, 22 insertions(+), 39 deletions(-) delete mode 100644 arch/m68k/include/asm/sigcontext_mm.h delete mode 100644 arch/m68k/include/asm/sigcontext_no.h (limited to 'arch') diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h index bff6d40345a..523db2a51cf 100644 --- a/arch/m68k/include/asm/sigcontext.h +++ b/arch/m68k/include/asm/sigcontext.h @@ -1,5 +1,24 @@ +#ifndef _ASM_M68k_SIGCONTEXT_H +#define _ASM_M68k_SIGCONTEXT_H + +struct sigcontext { + unsigned long sc_mask; /* old sigmask */ + unsigned long sc_usp; /* old user stack pointer */ + unsigned long sc_d0; + unsigned long sc_d1; + unsigned long sc_a0; + unsigned long sc_a1; #ifdef __uClinux__ -#include "sigcontext_no.h" -#else -#include "sigcontext_mm.h" + unsigned long sc_a5; +#endif + unsigned short sc_sr; + unsigned long sc_pc; + unsigned short sc_formatvec; +#ifndef __uClinux__ + unsigned long sc_fpregs[2*3]; /* room for two fp registers */ + unsigned long sc_fpcntl[3]; + unsigned char sc_fpstate[216]; +#endif +}; + #endif diff --git a/arch/m68k/include/asm/sigcontext_mm.h b/arch/m68k/include/asm/sigcontext_mm.h deleted file mode 100644 index 64fbe34cf26..00000000000 --- a/arch/m68k/include/asm/sigcontext_mm.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _ASM_M68k_SIGCONTEXT_H -#define _ASM_M68k_SIGCONTEXT_H - -struct sigcontext { - unsigned long sc_mask; /* old sigmask */ - unsigned long sc_usp; /* old user stack pointer */ - unsigned long sc_d0; - unsigned long sc_d1; - unsigned long sc_a0; - unsigned long sc_a1; - unsigned short sc_sr; - unsigned long sc_pc; - unsigned short sc_formatvec; - unsigned long sc_fpregs[2*3]; /* room for two fp registers */ - unsigned long sc_fpcntl[3]; - unsigned char sc_fpstate[216]; -}; - -#endif diff --git a/arch/m68k/include/asm/sigcontext_no.h b/arch/m68k/include/asm/sigcontext_no.h deleted file mode 100644 index 36c293fc133..00000000000 --- a/arch/m68k/include/asm/sigcontext_no.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _ASM_M68KNOMMU_SIGCONTEXT_H -#define _ASM_M68KNOMMU_SIGCONTEXT_H - -struct sigcontext { - unsigned long sc_mask; /* old sigmask */ - unsigned long sc_usp; /* old user stack pointer */ - unsigned long sc_d0; - unsigned long sc_d1; - unsigned long sc_a0; - unsigned long sc_a1; - unsigned long sc_a5; - unsigned short sc_sr; - unsigned long sc_pc; - unsigned short sc_formatvec; -}; - -#endif -- cgit v1.2.3 From a6bc3262c561780d2a6587aa3d5715b1e7d8fa13 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 16 Mar 2009 18:52:56 +1100 Subject: sparseirq, powerpc/cell: fix unused variable warning in interrupt.c This new compiler warning: arch/powerpc/platforms/cell/interrupt.c: In function 'handle_iic_irq': arch/powerpc/platforms/cell/interrupt.c:240: warning: unused variable 'cpu' Triggers because the local variable 'cpu' became unused due to commit: dee4102: sparseirq: use kstat_irqs_cpu instead Remove the variable. Signed-off-by: Stephen Rothwell Cc: Yinghai Lu Cc: ppc-dev LKML-Reference: <20090316185256.4a160374.sfr@canb.auug.org.au> Signed-off-by: Ingo Molnar --- arch/powerpc/platforms/cell/interrupt.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 1f0d774ad92..882e47080e7 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -237,8 +237,6 @@ extern int noirqdebug; static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) { - const unsigned int cpu = smp_processor_id(); - spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); -- cgit v1.2.3 From 4c3f450ba4e4c00df91f98664b58f9a98dc049fd Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 12 Mar 2009 08:40:15 +0000 Subject: sh: Add OHCI USB support for SH7786 Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7786.c | 83 ++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index 249b99e1adb..5a47e1cf442 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -18,6 +18,7 @@ #include #include #include +#include #include static struct plat_sci_port sci_platform_data[] = { @@ -68,12 +69,94 @@ static struct platform_device sci_device = { }, }; +static struct resource usb_ohci_resources[] = { + [0] = { + .start = 0xffe70400, + .end = 0xffe704ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 77, + .end = 77, + .flags = IORESOURCE_IRQ, + }, +}; + +static u64 usb_ohci_dma_mask = DMA_BIT_MASK(32); +static struct platform_device usb_ohci_device = { + .name = "sh_ohci", + .id = -1, + .dev = { + .dma_mask = &usb_ohci_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, + .num_resources = ARRAY_SIZE(usb_ohci_resources), + .resource = usb_ohci_resources, +}; + static struct platform_device *sh7786_devices[] __initdata = { &sci_device, + &usb_ohci_device, }; + +/* + * Please call this function if your platform board + * use external clock for USB + * */ +#define USBCTL0 0xffe70858 +#define CLOCK_MODE_MASK 0xffffff7f +#define EXT_CLOCK_MODE 0x00000080 +void __init sh7786_usb_use_exclock(void) +{ + u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK; + __raw_writel(val | EXT_CLOCK_MODE, USBCTL0); +} + +#define USBINITREG1 0xffe70094 +#define USBINITREG2 0xffe7009c +#define USBINITVAL1 0x00ff0040 +#define USBINITVAL2 0x00000001 + +#define USBPCTL1 0xffe70804 +#define USBST 0xffe70808 +#define PHY_ENB 0x00000001 +#define PLL_ENB 0x00000002 +#define PHY_RST 0x00000004 +#define ACT_PLL_STATUS 0xc0000000 +static void __init sh7786_usb_setup(void) +{ + int i = 1000000; + + /* + * USB initial settings + * + * The following settings are necessary + * for using the USB modules. + * + * see "USB Inital Settings" for detail + */ + __raw_writel(USBINITVAL1, USBINITREG1); + __raw_writel(USBINITVAL2, USBINITREG2); + + /* + * Set the PHY and PLL enable bit + */ + __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1); + while (i-- && + ((__raw_readl(USBST) & ACT_PLL_STATUS) != ACT_PLL_STATUS)) + cpu_relax(); + + if (i) { + /* Set the PHY RST bit */ + __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1); + printk(KERN_INFO "sh7786 usb setup done\n"); + } +} + static int __init sh7786_devices_setup(void) { + sh7786_usb_setup(); return platform_add_devices(sh7786_devices, ARRAY_SIZE(sh7786_devices)); } -- cgit v1.2.3 From 039a718ebb37298de87801288673859ac40b6fc4 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 12 Mar 2009 06:34:39 +0000 Subject: sh: Revert CONFIG_NR_ONCHIP_DMA_CHANNELS to MAX_DMA_CHANNELS Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/include/asm/dma-sh.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index e873ecaa506..0c8f8e14622 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -11,6 +11,7 @@ #ifndef __DMA_SH_H #define __DMA_SH_H +#include #include /* DMAOR contorl: The DMAOR access size is different by CPU.*/ @@ -29,21 +30,21 @@ #endif static int dmte_irq_map[] __maybe_unused = { -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 4) +#if (MAX_DMA_CHANNELS >= 4) DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3, #endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 6) +#if (MAX_DMA_CHANNELS >= 6) DMTE4_IRQ, DMTE4_IRQ + 1, #endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 8) +#if (MAX_DMA_CHANNELS >= 8) DMTE6_IRQ, DMTE6_IRQ + 1, #endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 12) +#if (MAX_DMA_CHANNELS >= 12) DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, @@ -85,21 +86,21 @@ static int dmte_irq_map[] __maybe_unused = { /* DMA base address */ static u32 dma_base_addr[] __maybe_unused = { -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 4) +#if (MAX_DMA_CHANNELS >= 4) SH_DMAC_BASE0 + 0x00, /* channel 0 */ SH_DMAC_BASE0 + 0x10, SH_DMAC_BASE0 + 0x20, SH_DMAC_BASE0 + 0x30, #endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 6) +#if (MAX_DMA_CHANNELS >= 6) SH_DMAC_BASE0 + 0x50, SH_DMAC_BASE0 + 0x60, #endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 8) +#if (MAX_DMA_CHANNELS >= 8) SH_DMAC_BASE1 + 0x00, SH_DMAC_BASE1 + 0x10, #endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 12) +#if (MAX_DMA_CHANNELS >= 12) SH_DMAC_BASE1 + 0x20, SH_DMAC_BASE1 + 0x30, SH_DMAC_BASE1 + 0x50, -- cgit v1.2.3 From a83c0b739f3ad1887704cfa9f1ee5ee208cf1532 Mon Sep 17 00:00:00 2001 From: Francesco VIRLINZI Date: Wed, 11 Mar 2009 10:39:02 +0000 Subject: sh: PMB hibernation support This implements preliminary suspend/resume support for the PMB. Signed-off-by: Francesco Virlinzi Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'arch') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 84241676265..b1a714a92b1 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -15,6 +15,8 @@ */ #include #include +#include +#include #include #include #include @@ -402,3 +404,39 @@ static int __init pmb_debugfs_init(void) return 0; } postcore_initcall(pmb_debugfs_init); + +#ifdef CONFIG_PM +static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) +{ + static pm_message_t prev_state; + + /* Restore the PMB after a resume from hibernation */ + if (state.event == PM_EVENT_ON && + prev_state.event == PM_EVENT_FREEZE) { + struct pmb_entry *pmbe; + spin_lock_irq(&pmb_list_lock); + for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) + set_pmb_entry(pmbe); + spin_unlock_irq(&pmb_list_lock); + } + prev_state = state; + return 0; +} + +static int pmb_sysdev_resume(struct sys_device *dev) +{ + return pmb_sysdev_suspend(dev, PMSG_ON); +} + +static struct sysdev_driver pmb_sysdev_driver = { + .suspend = pmb_sysdev_suspend, + .resume = pmb_sysdev_resume, +}; + +static int __init pmb_sysdev_init(void) +{ + return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); +} + +subsys_initcall(pmb_sysdev_init); +#endif -- cgit v1.2.3 From 02ebd32f52c10f90f810e85d0281e9e81dd6e741 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Fri, 13 Mar 2009 04:31:34 +0000 Subject: sh: Disable get_dma_error_irq for non-SH4 targets. dma-sh's get_dma_error_irq() is only used by SH4, as the SH3 doesn't have the DMA Error interrupt. Disable it out for non-SH4 builds. Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/drivers/dma/dma-sh.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index ab7b18dcbab..31c2930f2f5 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -280,6 +280,7 @@ static struct dma_info sh_dmac_info = { .flags = DMAC_CHANNELS_TEI_CAPABLE, }; +#ifdef CONFIG_CPU_SH4 static unsigned int get_dma_error_irq(int n) { #if defined(DMAC_IRQ_MULTI) @@ -293,6 +294,7 @@ static unsigned int get_dma_error_irq(int n) #endif #endif } +#endif static int __init sh_dmac_init(void) { -- cgit v1.2.3 From 7a516280b6a99634933b417834e178bde8659da1 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Fri, 13 Mar 2009 05:03:37 +0000 Subject: sh: Fix compile error by operands(mov.l) in sh3/entry.S -- log -- arch/sh/kernel/cpu/sh4/../sh3/entry.S:365: Error: invalid operands for opcode make[4]: *** [arch/sh/kernel/cpu/sh4/../sh3/entry.o] Error 1 make[3]: *** [arch/sh/kernel/cpu/sh4] Error 2 -- log -- Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index fba6ac20bb1..55da0ff9848 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -362,7 +362,7 @@ general_exception: nop ! Save registers / Switch to bank 0 - mov.l k4, k2 ! keep vector in k2 + mov k4, k2 ! keep vector in k2 mov.l 1f, k4 ! SR bits to clear in k4 bsr save_regs ! needs original pr value in k3 nop -- cgit v1.2.3 From 7759491274bc5ba7cd72b3b9cc5ec8247b937efb Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 13 Mar 2009 15:23:04 +0000 Subject: sh: SuperH Mobile suspend support This patch contains CONFIG_SUSPEND support to the SuperH architecture. If enabled, SuperH Mobile processors will register their suspend callbacks during boot. To suspend, use "echo mem > /sys/power/state". To allow wakeup, make sure "/sys/device/platform/../power/wakeup" contains "enabled". Additional per-device driver patches are most likely needed. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/include/asm/suspend.h | 9 +++ arch/sh/kernel/cpu/sh4a/Makefile | 4 + arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c | 92 ++++++++++++++++++++++ arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S | 125 ++++++++++++++++++++++++++++++ 5 files changed, 231 insertions(+) create mode 100644 arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c create mode 100644 arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index adffbf4048b..a0c879d17fd 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -181,6 +181,7 @@ config CPU_SHX3 config ARCH_SHMOBILE bool + select ARCH_SUSPEND_POSSIBLE choice prompt "Processor sub-type selection" diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 5d6d8aba568..b1b995370e7 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -1,6 +1,7 @@ #ifndef _ASM_SH_SUSPEND_H #define _ASM_SH_SUSPEND_H +#ifndef __ASSEMBLY__ static inline int arch_prepare_suspend(void) { return 0; } #include @@ -9,5 +10,13 @@ struct swsusp_arch_regs { struct pt_regs user_regs; unsigned long bank1_regs[8]; }; +#endif + +/* flags passed to assembly suspend code */ +#define SUSP_SH_SLEEP (1 << 0) /* Regular sleep mode */ +#define SUSP_SH_STANDBY (1 << 1) /* SH-Mobile Software standby mode */ +#define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */ +#define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */ +#define SUSP_SH_SF (1 << 4) /* Enable self-refresh */ #endif /* _ASM_SH_SUSPEND_H */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 1a92361feeb..09967dc0065 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -35,6 +35,10 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o +# Power Mangement & Sleep mode +pm-$(CONFIG_ARCH_SHMOBILE) := pm-sh_mobile.o sleep-sh_mobile.o + obj-y += $(clock-y) obj-$(CONFIG_SMP) += $(smp-y) obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) +obj-$(CONFIG_PM) += $(pm-y) diff --git a/arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c b/arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c new file mode 100644 index 00000000000..8c067adf683 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c @@ -0,0 +1,92 @@ +/* + * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c + * + * Power management support code for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include + +/* + * Sleep modes available on SuperH Mobile: + * + * Sleep mode is just plain "sleep" instruction + * Sleep Self-Refresh mode is above plus RAM put in Self-Refresh + * Standby Self-Refresh mode is above plus stopped clocks + */ +#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) +#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) +#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) + +/* + * The following modes are not there yet: + * + * R-standby mode is unsupported, but will be added in the future + * U-standby mode is low priority since it needs bootloader hacks + * + * All modes should be tied in with cpuidle. But before that can + * happen we need to keep track of enabled hardware blocks so we + * can avoid entering sleep modes that stop clocks to hardware + * blocks that are in use even though the cpu core is idle. + */ + +extern const unsigned char sh_mobile_standby[]; +extern const unsigned int sh_mobile_standby_size; + +static void sh_mobile_call_standby(unsigned long mode) +{ + extern void *vbr_base; + void *onchip_mem = (void *)0xe5200000; /* ILRAM */ + void (*standby_onchip_mem)(unsigned long) = onchip_mem; + + /* Note: Wake up from sleep may generate exceptions! + * Setup VBR to point to on-chip ram if self-refresh is + * going to be used. + */ + if (mode & SUSP_SH_SF) + asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); + + /* Copy the assembly snippet to the otherwise ununsed ILRAM */ + memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); + wmb(); + ctrl_barrier(); + + /* Let assembly snippet in on-chip memory handle the rest */ + standby_onchip_mem(mode); + + /* Put VBR back in System RAM again */ + if (mode & SUSP_SH_SF) + asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); +} + +static int sh_pm_enter(suspend_state_t state) +{ + local_irq_disable(); + set_bl_bit(); + sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); + local_irq_disable(); + clear_bl_bit(); + return 0; +} + +static struct platform_suspend_ops sh_pm_ops = { + .enter = sh_pm_enter, + .valid = suspend_valid_only_mem, +}; + +static int __init sh_pm_init(void) +{ + suspend_set_ops(&sh_pm_ops); + return 0; +} + +late_initcall(sh_pm_init); diff --git a/arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S b/arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S new file mode 100644 index 00000000000..5d888ef53d8 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S @@ -0,0 +1,125 @@ +/* + * arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S + * + * Sleep mode and Standby modes support for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include + +/* manage self-refresh and enter standby mode. + * this code will be copied to on-chip memory and executed from there. + */ + + .balign 4096,0,4096 +ENTRY(sh_mobile_standby) + mov r4, r0 + + tst #SUSP_SH_SF, r0 + bt skip_set_sf + + /* SDRAM: disable power down and put in self-refresh mode */ + mov.l 1f, r4 + mov.l 2f, r1 + mov.l @r4, r2 + or r1, r2 + mov.l 3f, r3 + and r3, r2 + mov.l r2, @r4 + +skip_set_sf: + tst #SUSP_SH_SLEEP, r0 + bt test_standby + + /* set mode to "sleep mode" */ + bra do_sleep + mov #0x00, r1 + +test_standby: + tst #SUSP_SH_STANDBY, r0 + bt test_rstandby + + /* set mode to "software standby mode" */ + bra do_sleep + mov #0x80, r1 + +test_rstandby: + tst #SUSP_SH_RSTANDBY, r0 + bt test_ustandby + + /* set mode to "r-standby mode" */ + bra do_sleep + mov #0x20, r1 + +test_ustandby: + tst #SUSP_SH_USTANDBY, r0 + bt done_sleep + + /* set mode to "u-standby mode" */ + mov #0x10, r1 + + /* fall-through */ + +do_sleep: + /* setup and enter selected standby mode */ + mov.l 5f, r4 + mov.l r1, @r4 + sleep + +done_sleep: + /* reset standby mode to sleep mode */ + mov.l 5f, r4 + mov #0x00, r1 + mov.l r1, @r4 + + tst #SUSP_SH_SF, r0 + bt skip_restore_sf + + /* SDRAM: set auto-refresh mode */ + mov.l 1f, r4 + mov.l @r4, r2 + mov.l 4f, r3 + and r3, r2 + mov.l r2, @r4 + mov.l 6f, r4 + mov.l 7f, r1 + mov.l 8f, r2 + mov.l @r4, r3 + mov #-1, r4 + add r4, r3 + or r2, r3 + mov.l r3, @r1 +skip_restore_sf: + rts + nop + + .balign 4 +1: .long 0xfe400008 /* SDCR0 */ +2: .long 0x00000400 +3: .long 0xffff7fff +4: .long 0xfffffbff +5: .long 0xa4150020 /* STBCR */ +6: .long 0xfe40001c /* RTCOR */ +7: .long 0xfe400018 /* RTCNT */ +8: .long 0xa55a0000 + +/* interrupt vector @ 0x600 */ + .balign 0x400,0,0x400 + .long 0xdeadbeef + .balign 0x200,0,0x200 + /* sh7722 will end up here in sleep mode */ + rte + nop +sh_mobile_standby_end: + +ENTRY(sh_mobile_standby_size) + .long sh_mobile_standby_end - sh_mobile_standby -- cgit v1.2.3 From 93356d07474b1f16f25e79e81597c2a6b8c2a783 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 13 Mar 2009 15:27:14 +0000 Subject: sh: add ap325 lcd power off support Improve the ap325 board code to allow the lcd panel and backlight to be powered off. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/board-ap325rxa.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 15b6d450fbf..a64e38841c4 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -166,6 +166,16 @@ static void ap320_wvga_power_on(void *board_data) ctrl_outw(0x100, FPGA_BKLREG); } +static void ap320_wvga_power_off(void *board_data) +{ + /* backlight */ + ctrl_outw(0, FPGA_BKLREG); + gpio_set_value(GPIO_PTS3, 1); + + /* ASD AP-320/325 LCD OFF */ + ctrl_outw(0, FPGA_LCDREG); +} + static struct sh_mobile_lcdc_info lcdc_info = { .clock_source = LCDC_CLK_EXTERNAL, .ch[0] = { @@ -191,6 +201,7 @@ static struct sh_mobile_lcdc_info lcdc_info = { }, .board_cfg = { .display_on = ap320_wvga_power_on, + .display_off = ap320_wvga_power_off, }, } }; -- cgit v1.2.3 From e9edb3fec2260b5a64e9ca9e09160b74f1b106e3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 16 Mar 2009 20:00:17 +0900 Subject: sh: Consolidate SH-Mobile CPU code in arch/sh/kernel/cpu/shmobile/. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/Makefile | 1 + arch/sh/kernel/cpu/sh4a/Makefile | 4 - arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c | 92 ---------------------- arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S | 125 ------------------------------ arch/sh/kernel/cpu/shmobile/Makefile | 6 ++ arch/sh/kernel/cpu/shmobile/pm.c | 92 ++++++++++++++++++++++ arch/sh/kernel/cpu/shmobile/sleep.S | 125 ++++++++++++++++++++++++++++++ 7 files changed, 224 insertions(+), 221 deletions(-) delete mode 100644 arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c delete mode 100644 arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S create mode 100644 arch/sh/kernel/cpu/shmobile/Makefile create mode 100644 arch/sh/kernel/cpu/shmobile/pm.c create mode 100644 arch/sh/kernel/cpu/shmobile/sleep.S (limited to 'arch') diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index f471d242774..2600641a483 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_CPU_SH5) = sh5/ # Special cases for family ancestry. obj-$(CONFIG_CPU_SH4A) += sh4a/ +obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/ # Common interfaces. diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 09967dc0065..1a92361feeb 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -35,10 +35,6 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o -# Power Mangement & Sleep mode -pm-$(CONFIG_ARCH_SHMOBILE) := pm-sh_mobile.o sleep-sh_mobile.o - obj-y += $(clock-y) obj-$(CONFIG_SMP) += $(smp-y) obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) -obj-$(CONFIG_PM) += $(pm-y) diff --git a/arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c b/arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c deleted file mode 100644 index 8c067adf683..00000000000 --- a/arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c - * - * Power management support code for SuperH Mobile - * - * Copyright (C) 2009 Magnus Damm - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include -#include - -/* - * Sleep modes available on SuperH Mobile: - * - * Sleep mode is just plain "sleep" instruction - * Sleep Self-Refresh mode is above plus RAM put in Self-Refresh - * Standby Self-Refresh mode is above plus stopped clocks - */ -#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) -#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) -#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) - -/* - * The following modes are not there yet: - * - * R-standby mode is unsupported, but will be added in the future - * U-standby mode is low priority since it needs bootloader hacks - * - * All modes should be tied in with cpuidle. But before that can - * happen we need to keep track of enabled hardware blocks so we - * can avoid entering sleep modes that stop clocks to hardware - * blocks that are in use even though the cpu core is idle. - */ - -extern const unsigned char sh_mobile_standby[]; -extern const unsigned int sh_mobile_standby_size; - -static void sh_mobile_call_standby(unsigned long mode) -{ - extern void *vbr_base; - void *onchip_mem = (void *)0xe5200000; /* ILRAM */ - void (*standby_onchip_mem)(unsigned long) = onchip_mem; - - /* Note: Wake up from sleep may generate exceptions! - * Setup VBR to point to on-chip ram if self-refresh is - * going to be used. - */ - if (mode & SUSP_SH_SF) - asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); - - /* Copy the assembly snippet to the otherwise ununsed ILRAM */ - memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); - wmb(); - ctrl_barrier(); - - /* Let assembly snippet in on-chip memory handle the rest */ - standby_onchip_mem(mode); - - /* Put VBR back in System RAM again */ - if (mode & SUSP_SH_SF) - asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); -} - -static int sh_pm_enter(suspend_state_t state) -{ - local_irq_disable(); - set_bl_bit(); - sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); - local_irq_disable(); - clear_bl_bit(); - return 0; -} - -static struct platform_suspend_ops sh_pm_ops = { - .enter = sh_pm_enter, - .valid = suspend_valid_only_mem, -}; - -static int __init sh_pm_init(void) -{ - suspend_set_ops(&sh_pm_ops); - return 0; -} - -late_initcall(sh_pm_init); diff --git a/arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S b/arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S deleted file mode 100644 index 5d888ef53d8..00000000000 --- a/arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S +++ /dev/null @@ -1,125 +0,0 @@ -/* - * arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S - * - * Sleep mode and Standby modes support for SuperH Mobile - * - * Copyright (C) 2009 Magnus Damm - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include -#include -#include -#include -#include - -/* manage self-refresh and enter standby mode. - * this code will be copied to on-chip memory and executed from there. - */ - - .balign 4096,0,4096 -ENTRY(sh_mobile_standby) - mov r4, r0 - - tst #SUSP_SH_SF, r0 - bt skip_set_sf - - /* SDRAM: disable power down and put in self-refresh mode */ - mov.l 1f, r4 - mov.l 2f, r1 - mov.l @r4, r2 - or r1, r2 - mov.l 3f, r3 - and r3, r2 - mov.l r2, @r4 - -skip_set_sf: - tst #SUSP_SH_SLEEP, r0 - bt test_standby - - /* set mode to "sleep mode" */ - bra do_sleep - mov #0x00, r1 - -test_standby: - tst #SUSP_SH_STANDBY, r0 - bt test_rstandby - - /* set mode to "software standby mode" */ - bra do_sleep - mov #0x80, r1 - -test_rstandby: - tst #SUSP_SH_RSTANDBY, r0 - bt test_ustandby - - /* set mode to "r-standby mode" */ - bra do_sleep - mov #0x20, r1 - -test_ustandby: - tst #SUSP_SH_USTANDBY, r0 - bt done_sleep - - /* set mode to "u-standby mode" */ - mov #0x10, r1 - - /* fall-through */ - -do_sleep: - /* setup and enter selected standby mode */ - mov.l 5f, r4 - mov.l r1, @r4 - sleep - -done_sleep: - /* reset standby mode to sleep mode */ - mov.l 5f, r4 - mov #0x00, r1 - mov.l r1, @r4 - - tst #SUSP_SH_SF, r0 - bt skip_restore_sf - - /* SDRAM: set auto-refresh mode */ - mov.l 1f, r4 - mov.l @r4, r2 - mov.l 4f, r3 - and r3, r2 - mov.l r2, @r4 - mov.l 6f, r4 - mov.l 7f, r1 - mov.l 8f, r2 - mov.l @r4, r3 - mov #-1, r4 - add r4, r3 - or r2, r3 - mov.l r3, @r1 -skip_restore_sf: - rts - nop - - .balign 4 -1: .long 0xfe400008 /* SDCR0 */ -2: .long 0x00000400 -3: .long 0xffff7fff -4: .long 0xfffffbff -5: .long 0xa4150020 /* STBCR */ -6: .long 0xfe40001c /* RTCOR */ -7: .long 0xfe400018 /* RTCNT */ -8: .long 0xa55a0000 - -/* interrupt vector @ 0x600 */ - .balign 0x400,0,0x400 - .long 0xdeadbeef - .balign 0x200,0,0x200 - /* sh7722 will end up here in sleep mode */ - rte - nop -sh_mobile_standby_end: - -ENTRY(sh_mobile_standby_size) - .long sh_mobile_standby_end - sh_mobile_standby diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile new file mode 100644 index 00000000000..08bfa7c7db2 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Linux/SuperH SH-Mobile backends. +# + +# Power Management & Sleep mode +obj-$(CONFIG_PM) += pm.o sleep.o diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c new file mode 100644 index 00000000000..8c067adf683 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -0,0 +1,92 @@ +/* + * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c + * + * Power management support code for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include + +/* + * Sleep modes available on SuperH Mobile: + * + * Sleep mode is just plain "sleep" instruction + * Sleep Self-Refresh mode is above plus RAM put in Self-Refresh + * Standby Self-Refresh mode is above plus stopped clocks + */ +#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) +#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) +#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) + +/* + * The following modes are not there yet: + * + * R-standby mode is unsupported, but will be added in the future + * U-standby mode is low priority since it needs bootloader hacks + * + * All modes should be tied in with cpuidle. But before that can + * happen we need to keep track of enabled hardware blocks so we + * can avoid entering sleep modes that stop clocks to hardware + * blocks that are in use even though the cpu core is idle. + */ + +extern const unsigned char sh_mobile_standby[]; +extern const unsigned int sh_mobile_standby_size; + +static void sh_mobile_call_standby(unsigned long mode) +{ + extern void *vbr_base; + void *onchip_mem = (void *)0xe5200000; /* ILRAM */ + void (*standby_onchip_mem)(unsigned long) = onchip_mem; + + /* Note: Wake up from sleep may generate exceptions! + * Setup VBR to point to on-chip ram if self-refresh is + * going to be used. + */ + if (mode & SUSP_SH_SF) + asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); + + /* Copy the assembly snippet to the otherwise ununsed ILRAM */ + memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); + wmb(); + ctrl_barrier(); + + /* Let assembly snippet in on-chip memory handle the rest */ + standby_onchip_mem(mode); + + /* Put VBR back in System RAM again */ + if (mode & SUSP_SH_SF) + asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); +} + +static int sh_pm_enter(suspend_state_t state) +{ + local_irq_disable(); + set_bl_bit(); + sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); + local_irq_disable(); + clear_bl_bit(); + return 0; +} + +static struct platform_suspend_ops sh_pm_ops = { + .enter = sh_pm_enter, + .valid = suspend_valid_only_mem, +}; + +static int __init sh_pm_init(void) +{ + suspend_set_ops(&sh_pm_ops); + return 0; +} + +late_initcall(sh_pm_init); diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S new file mode 100644 index 00000000000..5d888ef53d8 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/sleep.S @@ -0,0 +1,125 @@ +/* + * arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S + * + * Sleep mode and Standby modes support for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include + +/* manage self-refresh and enter standby mode. + * this code will be copied to on-chip memory and executed from there. + */ + + .balign 4096,0,4096 +ENTRY(sh_mobile_standby) + mov r4, r0 + + tst #SUSP_SH_SF, r0 + bt skip_set_sf + + /* SDRAM: disable power down and put in self-refresh mode */ + mov.l 1f, r4 + mov.l 2f, r1 + mov.l @r4, r2 + or r1, r2 + mov.l 3f, r3 + and r3, r2 + mov.l r2, @r4 + +skip_set_sf: + tst #SUSP_SH_SLEEP, r0 + bt test_standby + + /* set mode to "sleep mode" */ + bra do_sleep + mov #0x00, r1 + +test_standby: + tst #SUSP_SH_STANDBY, r0 + bt test_rstandby + + /* set mode to "software standby mode" */ + bra do_sleep + mov #0x80, r1 + +test_rstandby: + tst #SUSP_SH_RSTANDBY, r0 + bt test_ustandby + + /* set mode to "r-standby mode" */ + bra do_sleep + mov #0x20, r1 + +test_ustandby: + tst #SUSP_SH_USTANDBY, r0 + bt done_sleep + + /* set mode to "u-standby mode" */ + mov #0x10, r1 + + /* fall-through */ + +do_sleep: + /* setup and enter selected standby mode */ + mov.l 5f, r4 + mov.l r1, @r4 + sleep + +done_sleep: + /* reset standby mode to sleep mode */ + mov.l 5f, r4 + mov #0x00, r1 + mov.l r1, @r4 + + tst #SUSP_SH_SF, r0 + bt skip_restore_sf + + /* SDRAM: set auto-refresh mode */ + mov.l 1f, r4 + mov.l @r4, r2 + mov.l 4f, r3 + and r3, r2 + mov.l r2, @r4 + mov.l 6f, r4 + mov.l 7f, r1 + mov.l 8f, r2 + mov.l @r4, r3 + mov #-1, r4 + add r4, r3 + or r2, r3 + mov.l r3, @r1 +skip_restore_sf: + rts + nop + + .balign 4 +1: .long 0xfe400008 /* SDCR0 */ +2: .long 0x00000400 +3: .long 0xffff7fff +4: .long 0xfffffbff +5: .long 0xa4150020 /* STBCR */ +6: .long 0xfe40001c /* RTCOR */ +7: .long 0xfe400018 /* RTCNT */ +8: .long 0xa55a0000 + +/* interrupt vector @ 0x600 */ + .balign 0x400,0,0x400 + .long 0xdeadbeef + .balign 0x200,0,0x200 + /* sh7722 will end up here in sleep mode */ + rte + nop +sh_mobile_standby_end: + +ENTRY(sh_mobile_standby_size) + .long sh_mobile_standby_end - sh_mobile_standby -- cgit v1.2.3 From 50cca715a64b66ccf173767d94d4020ea0a6129c Mon Sep 17 00:00:00 2001 From: Francesco VIRLINZI Date: Fri, 13 Mar 2009 08:08:01 +0000 Subject: sh: clkfwk: Safer resume from hibernation. This patch fixes a possible problem in the resume from hibenration. It temporaneally saves the clk->rate on the stack to avoid any possible change during the clk->set_parent(..) call. Signed-off-by: Francesco Virlinzi Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/clock.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index 3209a8740fa..1dc896483b5 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c @@ -372,12 +372,14 @@ static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) if (prev_state.event == PM_EVENT_FREEZE) { list_for_each_entry(clkp, &clock_list, node) if (likely(clkp->ops)) { + unsigned long rate = clkp->rate; + if (likely(clkp->ops->set_parent)) clkp->ops->set_parent(clkp, clkp->parent); if (likely(clkp->ops->set_rate)) clkp->ops->set_rate(clkp, - clkp->rate, NO_CHANGE); + rate, NO_CHANGE); else if (likely(clkp->ops->recalc)) clkp->ops->recalc(clkp); } -- cgit v1.2.3 From 988f831df398ff36f67b095245060c24c354e9e9 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Mon, 16 Mar 2009 03:22:07 +0000 Subject: sh: Move IRQ multi definition of DMAC to defconfig When SuperH CPU has IRQ multi of DMAC, SH_DMA_IRQ_MULTI becomes enable. The following CPU's are Multi IRQ of DMAC now. - SH775X and SH7091 - SH776X - SH7780 - SH7785 If SH_DMA_IRQ_MULTI becomes enable, dma-sh api driver is optimized for Multi IRQ. Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/drivers/dma/Kconfig | 8 ++++++++ arch/sh/drivers/dma/dma-sh.c | 17 +++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig index 57d95fce046..32bb8fa605c 100644 --- a/arch/sh/drivers/dma/Kconfig +++ b/arch/sh/drivers/dma/Kconfig @@ -9,6 +9,14 @@ config SH_DMA select SH_DMA_API default n +config SH_DMA_IRQ_MULTI + bool + depends on SH_DMA + default y if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7751 || \ + CPU_SUBTYPE_SH7750S || CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R || \ + CPU_SUBTYPE_SH7091 || CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7764 || \ + CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 + config NR_ONCHIP_DMA_CHANNELS int depends on SH_DMA diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 31c2930f2f5..37fb5b8bbc3 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -19,13 +19,6 @@ #include #include -#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7764) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) -#define DMAC_IRQ_MULTI 1 -#endif - #if defined(DMAE1_IRQ) #define NR_DMAE 2 #else @@ -42,7 +35,7 @@ static inline unsigned int get_dmte_irq(unsigned int chan) if (chan < ARRAY_SIZE(dmte_irq_map)) irq = dmte_irq_map[chan]; -#if defined(DMAC_IRQ_MULTI) +#if defined(CONFIG_SH_DMA_IRQ_MULTI) if (irq > DMTE6_IRQ) return DMTE6_IRQ; return DMTE0_IRQ; @@ -96,7 +89,7 @@ static int sh_dmac_request_dma(struct dma_channel *chan) return 0; return request_irq(get_dmte_irq(chan->chan), dma_tei, -#if defined(DMAC_IRQ_MULTI) +#if defined(CONFIG_SH_DMA_IRQ_MULTI) IRQF_SHARED, #else IRQF_DISABLED, @@ -235,7 +228,7 @@ static inline int dmaor_reset(int no) #if defined(CONFIG_CPU_SH4) static irqreturn_t dma_err(int irq, void *dummy) { -#if defined(DMAC_IRQ_MULTI) +#if defined(CONFIG_SH_DMA_IRQ_MULTI) int cnt = 0; switch (irq) { #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) @@ -283,7 +276,7 @@ static struct dma_info sh_dmac_info = { #ifdef CONFIG_CPU_SH4 static unsigned int get_dma_error_irq(int n) { -#if defined(DMAC_IRQ_MULTI) +#if defined(CONFIG_SH_DMA_IRQ_MULTI) return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6); #else return (n == 0) ? DMAE0_IRQ : @@ -306,7 +299,7 @@ static int __init sh_dmac_init(void) for (n = 0; n < NR_DMAE; n++) { i = request_irq(get_dma_error_irq(n), dma_err, -#if defined(DMAC_IRQ_MULTI) +#if defined(CONFIG_SH_DMA_IRQ_MULTI) IRQF_SHARED, #else IRQF_DISABLED, -- cgit v1.2.3 From c5f5d3e6ed4405999d5becaffe18415182fa8eb5 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 16 Mar 2009 22:03:25 +1000 Subject: m68k: use MMU version of setup.h for both MMU and non-MMU The MMU version of setup.h can be used for all m68k platforms. Without a single file "make headers_install" is broken for m68k (since each of the sub-varients of setup.h are not installed). Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/setup.h | 377 ++++++++++++++++++++++++++++++++++++++- arch/m68k/include/asm/setup_mm.h | 376 -------------------------------------- arch/m68k/include/asm/setup_no.h | 10 -- 3 files changed, 374 insertions(+), 389 deletions(-) delete mode 100644 arch/m68k/include/asm/setup_mm.h delete mode 100644 arch/m68k/include/asm/setup_no.h (limited to 'arch') diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h index 842f86f75cc..4dfb3952b37 100644 --- a/arch/m68k/include/asm/setup.h +++ b/arch/m68k/include/asm/setup.h @@ -1,5 +1,376 @@ -#ifdef __uClinux__ -#include "setup_no.h" +/* +** asm/setup.h -- Definition of the Linux/m68k setup information +** +** Copyright 1992 by Greg Harp +** +** This file is subject to the terms and conditions of the GNU General Public +** License. See the file COPYING in the main directory of this archive +** for more details. +** +** Created 09/29/92 by Greg Harp +** +** 5/2/94 Roman Hodek: +** Added bi_atari part of the machine dependent union bi_un; for now it +** contains just a model field to distinguish between TT and Falcon. +** 26/7/96 Roman Zippel: +** Renamed to setup.h; added some useful macros to allow gcc some +** optimizations if possible. +** 5/10/96 Geert Uytterhoeven: +** Redesign of the boot information structure; moved boot information +** structure to bootinfo.h +*/ + +#ifndef _M68K_SETUP_H +#define _M68K_SETUP_H + + + + /* + * Linux/m68k Architectures + */ + +#define MACH_AMIGA 1 +#define MACH_ATARI 2 +#define MACH_MAC 3 +#define MACH_APOLLO 4 +#define MACH_SUN3 5 +#define MACH_MVME147 6 +#define MACH_MVME16x 7 +#define MACH_BVME6000 8 +#define MACH_HP300 9 +#define MACH_Q40 10 +#define MACH_SUN3X 11 + +#define COMMAND_LINE_SIZE 256 + +#ifdef __KERNEL__ + +#define CL_SIZE COMMAND_LINE_SIZE + +#ifndef __ASSEMBLY__ +extern unsigned long m68k_machtype; +#endif /* !__ASSEMBLY__ */ + +#if !defined(CONFIG_AMIGA) +# define MACH_IS_AMIGA (0) +#elif defined(CONFIG_ATARI) || defined(CONFIG_MAC) || defined(CONFIG_APOLLO) \ + || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ + || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) +# define MACH_IS_AMIGA (m68k_machtype == MACH_AMIGA) #else -#include "setup_mm.h" +# define MACH_AMIGA_ONLY +# define MACH_IS_AMIGA (1) +# define MACH_TYPE (MACH_AMIGA) #endif + +#if !defined(CONFIG_ATARI) +# define MACH_IS_ATARI (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_APOLLO) \ + || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ + || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) +# define MACH_IS_ATARI (m68k_machtype == MACH_ATARI) +#else +# define MACH_ATARI_ONLY +# define MACH_IS_ATARI (1) +# define MACH_TYPE (MACH_ATARI) +#endif + +#if !defined(CONFIG_MAC) +# define MACH_IS_MAC (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_ATARI) || defined(CONFIG_APOLLO) \ + || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ + || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) +# define MACH_IS_MAC (m68k_machtype == MACH_MAC) +#else +# define MACH_MAC_ONLY +# define MACH_IS_MAC (1) +# define MACH_TYPE (MACH_MAC) +#endif + +#if defined(CONFIG_SUN3) +#define MACH_IS_SUN3 (1) +#define MACH_SUN3_ONLY (1) +#define MACH_TYPE (MACH_SUN3) +#else +#define MACH_IS_SUN3 (0) +#endif + +#if !defined (CONFIG_APOLLO) +# define MACH_IS_APOLLO (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ + || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ + || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) +# define MACH_IS_APOLLO (m68k_machtype == MACH_APOLLO) +#else +# define MACH_APOLLO_ONLY +# define MACH_IS_APOLLO (1) +# define MACH_TYPE (MACH_APOLLO) +#endif + +#if !defined (CONFIG_MVME147) +# define MACH_IS_MVME147 (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ + || defined(CONFIG_APOLLO) || defined(CONFIG_BVME6000) \ + || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME16x) +# define MACH_IS_MVME147 (m68k_machtype == MACH_MVME147) +#else +# define MACH_MVME147_ONLY +# define MACH_IS_MVME147 (1) +# define MACH_TYPE (MACH_MVME147) +#endif + +#if !defined (CONFIG_MVME16x) +# define MACH_IS_MVME16x (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ + || defined(CONFIG_APOLLO) || defined(CONFIG_BVME6000) \ + || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) +# define MACH_IS_MVME16x (m68k_machtype == MACH_MVME16x) +#else +# define MACH_MVME16x_ONLY +# define MACH_IS_MVME16x (1) +# define MACH_TYPE (MACH_MVME16x) +#endif + +#if !defined (CONFIG_BVME6000) +# define MACH_IS_BVME6000 (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ + || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ + || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) +# define MACH_IS_BVME6000 (m68k_machtype == MACH_BVME6000) +#else +# define MACH_BVME6000_ONLY +# define MACH_IS_BVME6000 (1) +# define MACH_TYPE (MACH_BVME6000) +#endif + +#if !defined (CONFIG_HP300) +# define MACH_IS_HP300 (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ + || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ + || defined(CONFIG_BVME6000) || defined(CONFIG_Q40) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) +# define MACH_IS_HP300 (m68k_machtype == MACH_HP300) +#else +# define MACH_HP300_ONLY +# define MACH_IS_HP300 (1) +# define MACH_TYPE (MACH_HP300) +#endif + +#if !defined (CONFIG_Q40) +# define MACH_IS_Q40 (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ + || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ + || defined(CONFIG_BVME6000) || defined(CONFIG_HP300) \ + || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) +# define MACH_IS_Q40 (m68k_machtype == MACH_Q40) +#else +# define MACH_Q40_ONLY +# define MACH_IS_Q40 (1) +# define MACH_TYPE (MACH_Q40) +#endif + +#if !defined (CONFIG_SUN3X) +# define MACH_IS_SUN3X (0) +#elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ + || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ + || defined(CONFIG_BVME6000) || defined(CONFIG_HP300) \ + || defined(CONFIG_Q40) || defined(CONFIG_MVME147) +# define MACH_IS_SUN3X (m68k_machtype == MACH_SUN3X) +#else +# define CONFIG_SUN3X_ONLY +# define MACH_IS_SUN3X (1) +# define MACH_TYPE (MACH_SUN3X) +#endif + +#ifndef MACH_TYPE +# define MACH_TYPE (m68k_machtype) +#endif + +#endif /* __KERNEL__ */ + + + /* + * CPU, FPU and MMU types + * + * Note: we may rely on the following equalities: + * + * CPU_68020 == MMU_68851 + * CPU_68030 == MMU_68030 + * CPU_68040 == FPU_68040 == MMU_68040 + * CPU_68060 == FPU_68060 == MMU_68060 + */ + +#define CPUB_68020 0 +#define CPUB_68030 1 +#define CPUB_68040 2 +#define CPUB_68060 3 + +#define CPU_68020 (1< - -/* We have a bigger command line buffer. */ -#undef COMMAND_LINE_SIZE - -#endif /* __KERNEL__ */ - -#define COMMAND_LINE_SIZE 512 -- cgit v1.2.3 From 230d1866a9f9b08c2ab4cb1b60f33fe230ee0a87 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 16 Mar 2009 22:07:39 +1000 Subject: m68k: merge the non-MMU and MMU versions of ptrace.h It is trivial to merge the non-MMU and MMU versions of ptrace.h. Without a single file "make headers_install" is broken for m68k (since each of the sub-varients of ptrace.h are not installed). Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/ptrace.h | 88 +++++++++++++++++++++++++++++++++++++-- arch/m68k/include/asm/ptrace_mm.h | 80 ----------------------------------- arch/m68k/include/asm/ptrace_no.h | 87 -------------------------------------- 3 files changed, 85 insertions(+), 170 deletions(-) delete mode 100644 arch/m68k/include/asm/ptrace_mm.h delete mode 100644 arch/m68k/include/asm/ptrace_no.h (limited to 'arch') diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h index e83cd2f6610..8c9194b9854 100644 --- a/arch/m68k/include/asm/ptrace.h +++ b/arch/m68k/include/asm/ptrace.h @@ -1,5 +1,87 @@ -#ifdef __uClinux__ -#include "ptrace_no.h" +#ifndef _M68K_PTRACE_H +#define _M68K_PTRACE_H + +#define PT_D1 0 +#define PT_D2 1 +#define PT_D3 2 +#define PT_D4 3 +#define PT_D5 4 +#define PT_D6 5 +#define PT_D7 6 +#define PT_A0 7 +#define PT_A1 8 +#define PT_A2 9 +#define PT_A3 10 +#define PT_A4 11 +#define PT_A5 12 +#define PT_A6 13 +#define PT_D0 14 +#define PT_USP 15 +#define PT_ORIG_D0 16 +#define PT_SR 17 +#define PT_PC 18 + +#ifndef __ASSEMBLY__ + +/* this struct defines the way the registers are stored on the + stack during a system call. */ + +struct pt_regs { + long d1; + long d2; + long d3; + long d4; + long d5; + long a0; + long a1; + long a2; + long d0; + long orig_d0; + long stkadj; +#ifdef CONFIG_COLDFIRE + unsigned format : 4; /* frame format specifier */ + unsigned vector : 12; /* vector offset */ + unsigned short sr; + unsigned long pc; #else -#include "ptrace_mm.h" + unsigned short sr; + unsigned long pc; + unsigned format : 4; /* frame format specifier */ + unsigned vector : 12; /* vector offset */ #endif +}; + +/* + * This is the extended stack used by signal handlers and the context + * switcher: it's pushed after the normal "struct pt_regs". + */ +struct switch_stack { + unsigned long d6; + unsigned long d7; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long retpc; +}; + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 +#define PTRACE_GETFPREGS 14 +#define PTRACE_SETFPREGS 15 + +#ifdef __KERNEL__ + +#ifndef PS_S +#define PS_S (0x2000) +#define PS_M (0x1000) +#endif + +#define user_mode(regs) (!((regs)->sr & PS_S)) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +extern void show_regs(struct pt_regs *); +#endif /* __KERNEL__ */ +#endif /* __ASSEMBLY__ */ +#endif /* _M68K_PTRACE_H */ diff --git a/arch/m68k/include/asm/ptrace_mm.h b/arch/m68k/include/asm/ptrace_mm.h deleted file mode 100644 index 57e763d79bf..00000000000 --- a/arch/m68k/include/asm/ptrace_mm.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef _M68K_PTRACE_H -#define _M68K_PTRACE_H - -#define PT_D1 0 -#define PT_D2 1 -#define PT_D3 2 -#define PT_D4 3 -#define PT_D5 4 -#define PT_D6 5 -#define PT_D7 6 -#define PT_A0 7 -#define PT_A1 8 -#define PT_A2 9 -#define PT_A3 10 -#define PT_A4 11 -#define PT_A5 12 -#define PT_A6 13 -#define PT_D0 14 -#define PT_USP 15 -#define PT_ORIG_D0 16 -#define PT_SR 17 -#define PT_PC 18 - -#ifndef __ASSEMBLY__ - -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct pt_regs { - long d1; - long d2; - long d3; - long d4; - long d5; - long a0; - long a1; - long a2; - long d0; - long orig_d0; - long stkadj; - unsigned short sr; - unsigned long pc; - unsigned format : 4; /* frame format specifier */ - unsigned vector : 12; /* vector offset */ -}; - -/* - * This is the extended stack used by signal handlers and the context - * switcher: it's pushed after the normal "struct pt_regs". - */ -struct switch_stack { - unsigned long d6; - unsigned long d7; - unsigned long a3; - unsigned long a4; - unsigned long a5; - unsigned long a6; - unsigned long retpc; -}; - -/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 -#define PTRACE_GETFPREGS 14 -#define PTRACE_SETFPREGS 15 - -#ifdef __KERNEL__ - -#ifndef PS_S -#define PS_S (0x2000) -#define PS_M (0x1000) -#endif - -#define user_mode(regs) (!((regs)->sr & PS_S)) -#define instruction_pointer(regs) ((regs)->pc) -#define profile_pc(regs) instruction_pointer(regs) -extern void show_regs(struct pt_regs *); -#endif /* __KERNEL__ */ -#endif /* __ASSEMBLY__ */ -#endif /* _M68K_PTRACE_H */ diff --git a/arch/m68k/include/asm/ptrace_no.h b/arch/m68k/include/asm/ptrace_no.h deleted file mode 100644 index 8c9194b9854..00000000000 --- a/arch/m68k/include/asm/ptrace_no.h +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef _M68K_PTRACE_H -#define _M68K_PTRACE_H - -#define PT_D1 0 -#define PT_D2 1 -#define PT_D3 2 -#define PT_D4 3 -#define PT_D5 4 -#define PT_D6 5 -#define PT_D7 6 -#define PT_A0 7 -#define PT_A1 8 -#define PT_A2 9 -#define PT_A3 10 -#define PT_A4 11 -#define PT_A5 12 -#define PT_A6 13 -#define PT_D0 14 -#define PT_USP 15 -#define PT_ORIG_D0 16 -#define PT_SR 17 -#define PT_PC 18 - -#ifndef __ASSEMBLY__ - -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct pt_regs { - long d1; - long d2; - long d3; - long d4; - long d5; - long a0; - long a1; - long a2; - long d0; - long orig_d0; - long stkadj; -#ifdef CONFIG_COLDFIRE - unsigned format : 4; /* frame format specifier */ - unsigned vector : 12; /* vector offset */ - unsigned short sr; - unsigned long pc; -#else - unsigned short sr; - unsigned long pc; - unsigned format : 4; /* frame format specifier */ - unsigned vector : 12; /* vector offset */ -#endif -}; - -/* - * This is the extended stack used by signal handlers and the context - * switcher: it's pushed after the normal "struct pt_regs". - */ -struct switch_stack { - unsigned long d6; - unsigned long d7; - unsigned long a3; - unsigned long a4; - unsigned long a5; - unsigned long a6; - unsigned long retpc; -}; - -/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 -#define PTRACE_GETFPREGS 14 -#define PTRACE_SETFPREGS 15 - -#ifdef __KERNEL__ - -#ifndef PS_S -#define PS_S (0x2000) -#define PS_M (0x1000) -#endif - -#define user_mode(regs) (!((regs)->sr & PS_S)) -#define instruction_pointer(regs) ((regs)->pc) -#define profile_pc(regs) instruction_pointer(regs) -extern void show_regs(struct pt_regs *); -#endif /* __KERNEL__ */ -#endif /* __ASSEMBLY__ */ -#endif /* _M68K_PTRACE_H */ -- cgit v1.2.3 From 8dba99e077ae999c85b41c4567c852313528c7a7 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 16 Mar 2009 22:10:08 +1000 Subject: m68k: merge the non-MMU and MMU versions of signal.h It is trivial to merge the non-MMU and MMU versions of signal.h. Without a single file "make headers_install" is broken for m68k (since each of the sub-varients of signal.h are not installed). Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/signal.h | 216 +++++++++++++++++++++++++++++++++++++- arch/m68k/include/asm/signal_mm.h | 206 ------------------------------------ arch/m68k/include/asm/signal_no.h | 159 ---------------------------- 3 files changed, 212 insertions(+), 369 deletions(-) delete mode 100644 arch/m68k/include/asm/signal_mm.h delete mode 100644 arch/m68k/include/asm/signal_no.h (limited to 'arch') diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h index 3c19988bd93..08788fdefde 100644 --- a/arch/m68k/include/asm/signal.h +++ b/arch/m68k/include/asm/signal.h @@ -1,5 +1,213 @@ -#ifdef __uClinux__ -#include "signal_no.h" +#ifndef _M68K_SIGNAL_H +#define _M68K_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifdef __KERNEL__ +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +#define _NSIG 64 +#define _NSIG_BPW 32 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + #else -#include "signal_mm.h" -#endif +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGSYS 31 +#define SIGUNUSED 31 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 +#define SA_SIGINFO 0x00000004 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +#define MINSIGSTKSZ 2048 +#define SIGSTKSZ 8192 + +#include + +#ifdef __KERNEL__ +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + __sigrestore_t sa_restorer; +}; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + __sigrestore_t sa_restorer; + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +}; +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int, struct siginfo *, void *); + } _u; + sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#ifdef __KERNEL__ +#include + +#ifndef __uClinux__ +#define __HAVE_ARCH_SIG_BITOPS + +static inline void sigaddset(sigset_t *set, int _sig) +{ + asm ("bfset %0{%1,#1}" + : "+od" (*set) + : "id" ((_sig - 1) ^ 31) + : "cc"); +} + +static inline void sigdelset(sigset_t *set, int _sig) +{ + asm ("bfclr %0{%1,#1}" + : "+od" (*set) + : "id" ((_sig - 1) ^ 31) + : "cc"); +} + +static inline int __const_sigismember(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); +} + +static inline int __gen_sigismember(sigset_t *set, int _sig) +{ + int ret; + asm ("bfextu %1{%2,#1},%0" + : "=d" (ret) + : "od" (*set), "id" ((_sig-1) ^ 31) + : "cc"); + return ret; +} + +#define sigismember(set,sig) \ + (__builtin_constant_p(sig) ? \ + __const_sigismember(set,sig) : \ + __gen_sigismember(set,sig)) + +static inline int sigfindinword(unsigned long word) +{ + asm ("bfffo %1{#0,#0},%0" + : "=d" (word) + : "d" (word & -word) + : "cc"); + return word ^ 31; +} + +struct pt_regs; +extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie); + +#else + +#undef __HAVE_ARCH_SIG_BITOPS +#define ptrace_signal_deliver(regs, cookie) do { } while (0) + +#endif /* __uClinux__ */ +#endif /* __KERNEL__ */ + +#endif /* _M68K_SIGNAL_H */ diff --git a/arch/m68k/include/asm/signal_mm.h b/arch/m68k/include/asm/signal_mm.h deleted file mode 100644 index 3db8a81942f..00000000000 --- a/arch/m68k/include/asm/signal_mm.h +++ /dev/null @@ -1,206 +0,0 @@ -#ifndef _M68K_SIGNAL_H -#define _M68K_SIGNAL_H - -#include - -/* Avoid too many header ordering problems. */ -struct siginfo; - -#ifdef __KERNEL__ -/* Most things should be clean enough to redefine this at will, if care - is taken to make libc match. */ - -#define _NSIG 64 -#define _NSIG_BPW 32 -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -#define NSIG 32 -typedef unsigned long sigset_t; - -#endif /* __KERNEL__ */ - -#define SIGHUP 1 -#define SIGINT 2 -#define SIGQUIT 3 -#define SIGILL 4 -#define SIGTRAP 5 -#define SIGABRT 6 -#define SIGIOT 6 -#define SIGBUS 7 -#define SIGFPE 8 -#define SIGKILL 9 -#define SIGUSR1 10 -#define SIGSEGV 11 -#define SIGUSR2 12 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGTERM 15 -#define SIGSTKFLT 16 -#define SIGCHLD 17 -#define SIGCONT 18 -#define SIGSTOP 19 -#define SIGTSTP 20 -#define SIGTTIN 21 -#define SIGTTOU 22 -#define SIGURG 23 -#define SIGXCPU 24 -#define SIGXFSZ 25 -#define SIGVTALRM 26 -#define SIGPROF 27 -#define SIGWINCH 28 -#define SIGIO 29 -#define SIGPOLL SIGIO -/* -#define SIGLOST 29 -*/ -#define SIGPWR 30 -#define SIGSYS 31 -#define SIGUNUSED 31 - -/* These should not be considered constants from userland. */ -#define SIGRTMIN 32 -#define SIGRTMAX _NSIG - -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - -#define MINSIGSTKSZ 2048 -#define SIGSTKSZ 8192 - -#include - -#ifdef __KERNEL__ -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - __sigrestore_t sa_restorer; -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - __sigrestore_t sa_restorer; - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -struct sigaction { - union { - __sighandler_t _sa_handler; - void (*_sa_sigaction)(int, struct siginfo *, void *); - } _u; - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -#define sa_handler _u._sa_handler -#define sa_sigaction _u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { - void __user *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - -#ifdef __KERNEL__ -#include - -#define __HAVE_ARCH_SIG_BITOPS - -static inline void sigaddset(sigset_t *set, int _sig) -{ - asm ("bfset %0{%1,#1}" - : "+od" (*set) - : "id" ((_sig - 1) ^ 31) - : "cc"); -} - -static inline void sigdelset(sigset_t *set, int _sig) -{ - asm ("bfclr %0{%1,#1}" - : "+od" (*set) - : "id" ((_sig - 1) ^ 31) - : "cc"); -} - -static inline int __const_sigismember(sigset_t *set, int _sig) -{ - unsigned long sig = _sig - 1; - return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); -} - -static inline int __gen_sigismember(sigset_t *set, int _sig) -{ - int ret; - asm ("bfextu %1{%2,#1},%0" - : "=d" (ret) - : "od" (*set), "id" ((_sig-1) ^ 31) - : "cc"); - return ret; -} - -#define sigismember(set,sig) \ - (__builtin_constant_p(sig) ? \ - __const_sigismember(set,sig) : \ - __gen_sigismember(set,sig)) - -static inline int sigfindinword(unsigned long word) -{ - asm ("bfffo %1{#0,#0},%0" - : "=d" (word) - : "d" (word & -word) - : "cc"); - return word ^ 31; -} - -struct pt_regs; -extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie); - -#endif /* __KERNEL__ */ - -#endif /* _M68K_SIGNAL_H */ diff --git a/arch/m68k/include/asm/signal_no.h b/arch/m68k/include/asm/signal_no.h deleted file mode 100644 index 216c08be54a..00000000000 --- a/arch/m68k/include/asm/signal_no.h +++ /dev/null @@ -1,159 +0,0 @@ -#ifndef _M68KNOMMU_SIGNAL_H -#define _M68KNOMMU_SIGNAL_H - -#include - -/* Avoid too many header ordering problems. */ -struct siginfo; - -#ifdef __KERNEL__ -/* Most things should be clean enough to redefine this at will, if care - is taken to make libc match. */ - -#define _NSIG 64 -#define _NSIG_BPW 32 -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -#define NSIG 32 -typedef unsigned long sigset_t; - -#endif /* __KERNEL__ */ - -#define SIGHUP 1 -#define SIGINT 2 -#define SIGQUIT 3 -#define SIGILL 4 -#define SIGTRAP 5 -#define SIGABRT 6 -#define SIGIOT 6 -#define SIGBUS 7 -#define SIGFPE 8 -#define SIGKILL 9 -#define SIGUSR1 10 -#define SIGSEGV 11 -#define SIGUSR2 12 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGTERM 15 -#define SIGSTKFLT 16 -#define SIGCHLD 17 -#define SIGCONT 18 -#define SIGSTOP 19 -#define SIGTSTP 20 -#define SIGTTIN 21 -#define SIGTTOU 22 -#define SIGURG 23 -#define SIGXCPU 24 -#define SIGXFSZ 25 -#define SIGVTALRM 26 -#define SIGPROF 27 -#define SIGWINCH 28 -#define SIGIO 29 -#define SIGPOLL SIGIO -/* -#define SIGLOST 29 -*/ -#define SIGPWR 30 -#define SIGSYS 31 -#define SIGUNUSED 31 - -/* These should not be considered constants from userland. */ -#define SIGRTMIN 32 -#define SIGRTMAX _NSIG - -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - -#define MINSIGSTKSZ 2048 -#define SIGSTKSZ 8192 - -#include - -#ifdef __KERNEL__ -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - void (*sa_restorer)(void); - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -struct sigaction { - union { - __sighandler_t _sa_handler; - void (*_sa_sigaction)(int, struct siginfo *, void *); - } _u; - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -#define sa_handler _u._sa_handler -#define sa_sigaction _u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { - void *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - -#ifdef __KERNEL__ - -#include -#undef __HAVE_ARCH_SIG_BITOPS - -#define ptrace_signal_deliver(regs, cookie) do { } while (0) - -#endif /* __KERNEL__ */ - -#endif /* _M68KNOMMU_SIGNAL_H */ -- cgit v1.2.3 From 9df3d51b59bd6aa48aee76e0cde4b89d854f3694 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 16 Mar 2009 22:12:28 +1000 Subject: m68k: use the MMU version of unistd.h for all m68k platforms The MMU version of unistd.h can be use on non-MMU platrorms as well. Without a single file "make headers_install" is broken for m68k (since each of the sub-varients of unistd.h are not installed). Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/unistd.h | 377 +++++++++++++++++++++++++++++++++++++- arch/m68k/include/asm/unistd_mm.h | 372 ------------------------------------- arch/m68k/include/asm/unistd_no.h | 372 ------------------------------------- 3 files changed, 372 insertions(+), 749 deletions(-) delete mode 100644 arch/m68k/include/asm/unistd_mm.h delete mode 100644 arch/m68k/include/asm/unistd_no.h (limited to 'arch') diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index df1d9d4cb1f..3c19027331f 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -1,5 +1,372 @@ -#ifdef __uClinux__ -#include "unistd_no.h" -#else -#include "unistd_mm.h" -#endif +#ifndef _ASM_M68K_UNISTD_H_ +#define _ASM_M68K_UNISTD_H_ + +/* + * This file contains the system call numbers. + */ + +#define __NR_restart_syscall 0 +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 +#define __NR_waitpid 7 +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_chown 16 +#define __NR_break 17 +#define __NR_oldstat 18 +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 +#define __NR_umount 22 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 +#define __NR_oldfstat 28 +#define __NR_pause 29 +#define __NR_utime 30 +#define __NR_stty 31 +#define __NR_gtty 32 +#define __NR_access 33 +#define __NR_nice 34 +#define __NR_ftime 35 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 +#define __NR_prof 44 +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_signal 48 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount2 52 +#define __NR_lock 53 +#define __NR_ioctl 54 +#define __NR_fcntl 55 +#define __NR_mpx 56 +#define __NR_setpgid 57 +#define __NR_ulimit 58 +#define __NR_oldolduname 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 +#define __NR_sigaction 67 +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_sigsuspend 72 +#define __NR_sigpending 73 +#define __NR_sethostname 74 +#define __NR_setrlimit 75 +#define __NR_getrlimit 76 +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +#define __NR_select 82 +#define __NR_symlink 83 +#define __NR_oldlstat 84 +#define __NR_readlink 85 +#define __NR_uselib 86 +#define __NR_swapon 87 +#define __NR_reboot 88 +#define __NR_readdir 89 +#define __NR_mmap 90 +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 +#define __NR_profil 98 +#define __NR_statfs 99 +#define __NR_fstatfs 100 +#define __NR_ioperm 101 +#define __NR_socketcall 102 +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 +#define __NR_olduname 109 +#define __NR_iopl /* 110 */ not supported +#define __NR_vhangup 111 +#define __NR_idle /* 112 */ Obsolete +#define __NR_vm86 /* 113 */ not supported +#define __NR_wait4 114 +#define __NR_swapoff 115 +#define __NR_sysinfo 116 +#define __NR_ipc 117 +#define __NR_fsync 118 +#define __NR_sigreturn 119 +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 +#define __NR_cacheflush 123 +#define __NR_adjtimex 124 +#define __NR_mprotect 125 +#define __NR_sigprocmask 126 +#define __NR_create_module 127 +#define __NR_init_module 128 +#define __NR_delete_module 129 +#define __NR_get_kernel_syms 130 +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 +#define __NR_sysfs 135 +#define __NR_personality 136 +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 +#define __NR__newselect 142 +#define __NR_flock 143 +#define __NR_msync 144 +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_getsid 147 +#define __NR_fdatasync 148 +#define __NR__sysctl 149 +#define __NR_mlock 150 +#define __NR_munlock 151 +#define __NR_mlockall 152 +#define __NR_munlockall 153 +#define __NR_sched_setparam 154 +#define __NR_sched_getparam 155 +#define __NR_sched_setscheduler 156 +#define __NR_sched_getscheduler 157 +#define __NR_sched_yield 158 +#define __NR_sched_get_priority_max 159 +#define __NR_sched_get_priority_min 160 +#define __NR_sched_rr_get_interval 161 +#define __NR_nanosleep 162 +#define __NR_mremap 163 +#define __NR_setresuid 164 +#define __NR_getresuid 165 +#define __NR_getpagesize 166 +#define __NR_query_module 167 +#define __NR_poll 168 +#define __NR_nfsservctl 169 +#define __NR_setresgid 170 +#define __NR_getresgid 171 +#define __NR_prctl 172 +#define __NR_rt_sigreturn 173 +#define __NR_rt_sigaction 174 +#define __NR_rt_sigprocmask 175 +#define __NR_rt_sigpending 176 +#define __NR_rt_sigtimedwait 177 +#define __NR_rt_sigqueueinfo 178 +#define __NR_rt_sigsuspend 179 +#define __NR_pread64 180 +#define __NR_pwrite64 181 +#define __NR_lchown 182 +#define __NR_getcwd 183 +#define __NR_capget 184 +#define __NR_capset 185 +#define __NR_sigaltstack 186 +#define __NR_sendfile 187 +#define __NR_getpmsg 188 /* some people actually want streams */ +#define __NR_putpmsg 189 /* some people actually want streams */ +#define __NR_vfork 190 +#define __NR_ugetrlimit 191 +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 +#define __NR_chown32 198 +#define __NR_getuid32 199 +#define __NR_getgid32 200 +#define __NR_geteuid32 201 +#define __NR_getegid32 202 +#define __NR_setreuid32 203 +#define __NR_setregid32 204 +#define __NR_getgroups32 205 +#define __NR_setgroups32 206 +#define __NR_fchown32 207 +#define __NR_setresuid32 208 +#define __NR_getresuid32 209 +#define __NR_setresgid32 210 +#define __NR_getresgid32 211 +#define __NR_lchown32 212 +#define __NR_setuid32 213 +#define __NR_setgid32 214 +#define __NR_setfsuid32 215 +#define __NR_setfsgid32 216 +#define __NR_pivot_root 217 +#define __NR_getdents64 220 +#define __NR_gettid 221 +#define __NR_tkill 222 +#define __NR_setxattr 223 +#define __NR_lsetxattr 224 +#define __NR_fsetxattr 225 +#define __NR_getxattr 226 +#define __NR_lgetxattr 227 +#define __NR_fgetxattr 228 +#define __NR_listxattr 229 +#define __NR_llistxattr 230 +#define __NR_flistxattr 231 +#define __NR_removexattr 232 +#define __NR_lremovexattr 233 +#define __NR_fremovexattr 234 +#define __NR_futex 235 +#define __NR_sendfile64 236 +#define __NR_mincore 237 +#define __NR_madvise 238 +#define __NR_fcntl64 239 +#define __NR_readahead 240 +#define __NR_io_setup 241 +#define __NR_io_destroy 242 +#define __NR_io_getevents 243 +#define __NR_io_submit 244 +#define __NR_io_cancel 245 +#define __NR_fadvise64 246 +#define __NR_exit_group 247 +#define __NR_lookup_dcookie 248 +#define __NR_epoll_create 249 +#define __NR_epoll_ctl 250 +#define __NR_epoll_wait 251 +#define __NR_remap_file_pages 252 +#define __NR_set_tid_address 253 +#define __NR_timer_create 254 +#define __NR_timer_settime 255 +#define __NR_timer_gettime 256 +#define __NR_timer_getoverrun 257 +#define __NR_timer_delete 258 +#define __NR_clock_settime 259 +#define __NR_clock_gettime 260 +#define __NR_clock_getres 261 +#define __NR_clock_nanosleep 262 +#define __NR_statfs64 263 +#define __NR_fstatfs64 264 +#define __NR_tgkill 265 +#define __NR_utimes 266 +#define __NR_fadvise64_64 267 +#define __NR_mbind 268 +#define __NR_get_mempolicy 269 +#define __NR_set_mempolicy 270 +#define __NR_mq_open 271 +#define __NR_mq_unlink 272 +#define __NR_mq_timedsend 273 +#define __NR_mq_timedreceive 274 +#define __NR_mq_notify 275 +#define __NR_mq_getsetattr 276 +#define __NR_waitid 277 +#define __NR_vserver 278 +#define __NR_add_key 279 +#define __NR_request_key 280 +#define __NR_keyctl 281 +#define __NR_ioprio_set 282 +#define __NR_ioprio_get 283 +#define __NR_inotify_init 284 +#define __NR_inotify_add_watch 285 +#define __NR_inotify_rm_watch 286 +#define __NR_migrate_pages 287 +#define __NR_openat 288 +#define __NR_mkdirat 289 +#define __NR_mknodat 290 +#define __NR_fchownat 291 +#define __NR_futimesat 292 +#define __NR_fstatat64 293 +#define __NR_unlinkat 294 +#define __NR_renameat 295 +#define __NR_linkat 296 +#define __NR_symlinkat 297 +#define __NR_readlinkat 298 +#define __NR_fchmodat 299 +#define __NR_faccessat 300 +#define __NR_pselect6 301 +#define __NR_ppoll 302 +#define __NR_unshare 303 +#define __NR_set_robust_list 304 +#define __NR_get_robust_list 305 +#define __NR_splice 306 +#define __NR_sync_file_range 307 +#define __NR_tee 308 +#define __NR_vmsplice 309 +#define __NR_move_pages 310 +#define __NR_sched_setaffinity 311 +#define __NR_sched_getaffinity 312 +#define __NR_kexec_load 313 +#define __NR_getcpu 314 +#define __NR_epoll_pwait 315 +#define __NR_utimensat 316 +#define __NR_signalfd 317 +#define __NR_timerfd_create 318 +#define __NR_eventfd 319 +#define __NR_fallocate 320 +#define __NR_timerfd_settime 321 +#define __NR_timerfd_gettime 322 +#define __NR_signalfd4 323 +#define __NR_eventfd2 324 +#define __NR_epoll_create1 325 +#define __NR_dup3 326 +#define __NR_pipe2 327 +#define __NR_inotify_init1 328 + +#ifdef __KERNEL__ + +#define NR_syscalls 329 + +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_OLD_STAT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGACTION + +/* + * "Conditional" syscalls + * + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), + * but it doesn't work on all toolchains, so we just do it by hand + */ +#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") + +#endif /* __KERNEL__ */ +#endif /* _ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/include/asm/unistd_mm.h b/arch/m68k/include/asm/unistd_mm.h deleted file mode 100644 index 3c19027331f..00000000000 --- a/arch/m68k/include/asm/unistd_mm.h +++ /dev/null @@ -1,372 +0,0 @@ -#ifndef _ASM_M68K_UNISTD_H_ -#define _ASM_M68K_UNISTD_H_ - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_chown 16 -#define __NR_break 17 -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 -#define __NR_access 33 -#define __NR_nice 34 -#define __NR_ftime 35 -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -#define __NR_prof 44 -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -#define __NR_lock 53 -#define __NR_ioctl 54 -#define __NR_fcntl 55 -#define __NR_mpx 56 -#define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -#define __NR_profil 98 -#define __NR_statfs 99 -#define __NR_fstatfs 100 -#define __NR_ioperm 101 -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl /* 110 */ not supported -#define __NR_vhangup 111 -#define __NR_idle /* 112 */ Obsolete -#define __NR_vm86 /* 113 */ not supported -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_cacheflush 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_getpagesize 166 -#define __NR_query_module 167 -#define __NR_poll 168 -#define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 -#define __NR_prctl 172 -#define __NR_rt_sigreturn 173 -#define __NR_rt_sigaction 174 -#define __NR_rt_sigprocmask 175 -#define __NR_rt_sigpending 176 -#define __NR_rt_sigtimedwait 177 -#define __NR_rt_sigqueueinfo 178 -#define __NR_rt_sigsuspend 179 -#define __NR_pread64 180 -#define __NR_pwrite64 181 -#define __NR_lchown 182 -#define __NR_getcwd 183 -#define __NR_capget 184 -#define __NR_capset 185 -#define __NR_sigaltstack 186 -#define __NR_sendfile 187 -#define __NR_getpmsg 188 /* some people actually want streams */ -#define __NR_putpmsg 189 /* some people actually want streams */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_chown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_lchown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 -#define __NR_pivot_root 217 -#define __NR_getdents64 220 -#define __NR_gettid 221 -#define __NR_tkill 222 -#define __NR_setxattr 223 -#define __NR_lsetxattr 224 -#define __NR_fsetxattr 225 -#define __NR_getxattr 226 -#define __NR_lgetxattr 227 -#define __NR_fgetxattr 228 -#define __NR_listxattr 229 -#define __NR_llistxattr 230 -#define __NR_flistxattr 231 -#define __NR_removexattr 232 -#define __NR_lremovexattr 233 -#define __NR_fremovexattr 234 -#define __NR_futex 235 -#define __NR_sendfile64 236 -#define __NR_mincore 237 -#define __NR_madvise 238 -#define __NR_fcntl64 239 -#define __NR_readahead 240 -#define __NR_io_setup 241 -#define __NR_io_destroy 242 -#define __NR_io_getevents 243 -#define __NR_io_submit 244 -#define __NR_io_cancel 245 -#define __NR_fadvise64 246 -#define __NR_exit_group 247 -#define __NR_lookup_dcookie 248 -#define __NR_epoll_create 249 -#define __NR_epoll_ctl 250 -#define __NR_epoll_wait 251 -#define __NR_remap_file_pages 252 -#define __NR_set_tid_address 253 -#define __NR_timer_create 254 -#define __NR_timer_settime 255 -#define __NR_timer_gettime 256 -#define __NR_timer_getoverrun 257 -#define __NR_timer_delete 258 -#define __NR_clock_settime 259 -#define __NR_clock_gettime 260 -#define __NR_clock_getres 261 -#define __NR_clock_nanosleep 262 -#define __NR_statfs64 263 -#define __NR_fstatfs64 264 -#define __NR_tgkill 265 -#define __NR_utimes 266 -#define __NR_fadvise64_64 267 -#define __NR_mbind 268 -#define __NR_get_mempolicy 269 -#define __NR_set_mempolicy 270 -#define __NR_mq_open 271 -#define __NR_mq_unlink 272 -#define __NR_mq_timedsend 273 -#define __NR_mq_timedreceive 274 -#define __NR_mq_notify 275 -#define __NR_mq_getsetattr 276 -#define __NR_waitid 277 -#define __NR_vserver 278 -#define __NR_add_key 279 -#define __NR_request_key 280 -#define __NR_keyctl 281 -#define __NR_ioprio_set 282 -#define __NR_ioprio_get 283 -#define __NR_inotify_init 284 -#define __NR_inotify_add_watch 285 -#define __NR_inotify_rm_watch 286 -#define __NR_migrate_pages 287 -#define __NR_openat 288 -#define __NR_mkdirat 289 -#define __NR_mknodat 290 -#define __NR_fchownat 291 -#define __NR_futimesat 292 -#define __NR_fstatat64 293 -#define __NR_unlinkat 294 -#define __NR_renameat 295 -#define __NR_linkat 296 -#define __NR_symlinkat 297 -#define __NR_readlinkat 298 -#define __NR_fchmodat 299 -#define __NR_faccessat 300 -#define __NR_pselect6 301 -#define __NR_ppoll 302 -#define __NR_unshare 303 -#define __NR_set_robust_list 304 -#define __NR_get_robust_list 305 -#define __NR_splice 306 -#define __NR_sync_file_range 307 -#define __NR_tee 308 -#define __NR_vmsplice 309 -#define __NR_move_pages 310 -#define __NR_sched_setaffinity 311 -#define __NR_sched_getaffinity 312 -#define __NR_kexec_load 313 -#define __NR_getcpu 314 -#define __NR_epoll_pwait 315 -#define __NR_utimensat 316 -#define __NR_signalfd 317 -#define __NR_timerfd_create 318 -#define __NR_eventfd 319 -#define __NR_fallocate 320 -#define __NR_timerfd_settime 321 -#define __NR_timerfd_gettime 322 -#define __NR_signalfd4 323 -#define __NR_eventfd2 324 -#define __NR_epoll_create1 325 -#define __NR_dup3 326 -#define __NR_pipe2 327 -#define __NR_inotify_init1 328 - -#ifdef __KERNEL__ - -#define NR_syscalls 329 - -#define __ARCH_WANT_IPC_PARSE_VERSION -#define __ARCH_WANT_OLD_READDIR -#define __ARCH_WANT_OLD_STAT -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_ALARM -#define __ARCH_WANT_SYS_GETHOSTNAME -#define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK -#define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME -#define __ARCH_WANT_SYS_WAITPID -#define __ARCH_WANT_SYS_SOCKETCALL -#define __ARCH_WANT_SYS_FADVISE64 -#define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK -#define __ARCH_WANT_SYS_NICE -#define __ARCH_WANT_SYS_OLD_GETRLIMIT -#define __ARCH_WANT_SYS_OLDUMOUNT -#define __ARCH_WANT_SYS_SIGPENDING -#define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION - -/* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") - -#endif /* __KERNEL__ */ -#endif /* _ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/include/asm/unistd_no.h b/arch/m68k/include/asm/unistd_no.h deleted file mode 100644 index b034a2f7b44..00000000000 --- a/arch/m68k/include/asm/unistd_no.h +++ /dev/null @@ -1,372 +0,0 @@ -#ifndef _ASM_M68K_UNISTD_H_ -#define _ASM_M68K_UNISTD_H_ - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_chown 16 -#define __NR_break 17 -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 -#define __NR_access 33 -#define __NR_nice 34 -#define __NR_ftime 35 -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -#define __NR_prof 44 -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -#define __NR_lock 53 -#define __NR_ioctl 54 -#define __NR_fcntl 55 -#define __NR_mpx 56 -#define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -#define __NR_profil 98 -#define __NR_statfs 99 -#define __NR_fstatfs 100 -#define __NR_ioperm 101 -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl /* 110 */ not supported -#define __NR_vhangup 111 -#define __NR_idle /* 112 */ Obsolete -#define __NR_vm86 /* 113 */ not supported -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_cacheflush 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_getpagesize 166 -#define __NR_query_module 167 -#define __NR_poll 168 -#define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 -#define __NR_prctl 172 -#define __NR_rt_sigreturn 173 -#define __NR_rt_sigaction 174 -#define __NR_rt_sigprocmask 175 -#define __NR_rt_sigpending 176 -#define __NR_rt_sigtimedwait 177 -#define __NR_rt_sigqueueinfo 178 -#define __NR_rt_sigsuspend 179 -#define __NR_pread64 180 -#define __NR_pwrite64 181 -#define __NR_lchown 182 -#define __NR_getcwd 183 -#define __NR_capget 184 -#define __NR_capset 185 -#define __NR_sigaltstack 186 -#define __NR_sendfile 187 -#define __NR_getpmsg 188 /* some people actually want streams */ -#define __NR_putpmsg 189 /* some people actually want streams */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_chown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_lchown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 -#define __NR_pivot_root 217 -#define __NR_getdents64 220 -#define __NR_gettid 221 -#define __NR_tkill 222 -#define __NR_setxattr 223 -#define __NR_lsetxattr 224 -#define __NR_fsetxattr 225 -#define __NR_getxattr 226 -#define __NR_lgetxattr 227 -#define __NR_fgetxattr 228 -#define __NR_listxattr 229 -#define __NR_llistxattr 230 -#define __NR_flistxattr 231 -#define __NR_removexattr 232 -#define __NR_lremovexattr 233 -#define __NR_fremovexattr 234 -#define __NR_futex 235 -#define __NR_sendfile64 236 -#define __NR_mincore 237 -#define __NR_madvise 238 -#define __NR_fcntl64 239 -#define __NR_readahead 240 -#define __NR_io_setup 241 -#define __NR_io_destroy 242 -#define __NR_io_getevents 243 -#define __NR_io_submit 244 -#define __NR_io_cancel 245 -#define __NR_fadvise64 246 -#define __NR_exit_group 247 -#define __NR_lookup_dcookie 248 -#define __NR_epoll_create 249 -#define __NR_epoll_ctl 250 -#define __NR_epoll_wait 251 -#define __NR_remap_file_pages 252 -#define __NR_set_tid_address 253 -#define __NR_timer_create 254 -#define __NR_timer_settime 255 -#define __NR_timer_gettime 256 -#define __NR_timer_getoverrun 257 -#define __NR_timer_delete 258 -#define __NR_clock_settime 259 -#define __NR_clock_gettime 260 -#define __NR_clock_getres 261 -#define __NR_clock_nanosleep 262 -#define __NR_statfs64 263 -#define __NR_fstatfs64 264 -#define __NR_tgkill 265 -#define __NR_utimes 266 -#define __NR_fadvise64_64 267 -#define __NR_mbind 268 -#define __NR_get_mempolicy 269 -#define __NR_set_mempolicy 270 -#define __NR_mq_open 271 -#define __NR_mq_unlink 272 -#define __NR_mq_timedsend 273 -#define __NR_mq_timedreceive 274 -#define __NR_mq_notify 275 -#define __NR_mq_getsetattr 276 -#define __NR_waitid 277 -#define __NR_vserver 278 -#define __NR_add_key 279 -#define __NR_request_key 280 -#define __NR_keyctl 281 -#define __NR_ioprio_set 282 -#define __NR_ioprio_get 283 -#define __NR_inotify_init 284 -#define __NR_inotify_add_watch 285 -#define __NR_inotify_rm_watch 286 -#define __NR_migrate_pages 287 -#define __NR_openat 288 -#define __NR_mkdirat 289 -#define __NR_mknodat 290 -#define __NR_fchownat 291 -#define __NR_futimesat 292 -#define __NR_fstatat64 293 -#define __NR_unlinkat 294 -#define __NR_renameat 295 -#define __NR_linkat 296 -#define __NR_symlinkat 297 -#define __NR_readlinkat 298 -#define __NR_fchmodat 299 -#define __NR_faccessat 300 -#define __NR_pselect6 301 -#define __NR_ppoll 302 -#define __NR_unshare 303 -#define __NR_set_robust_list 304 -#define __NR_get_robust_list 305 -#define __NR_splice 306 -#define __NR_sync_file_range 307 -#define __NR_tee 308 -#define __NR_vmsplice 309 -#define __NR_move_pages 310 -#define __NR_sched_setaffinity 311 -#define __NR_sched_getaffinity 312 -#define __NR_kexec_load 313 -#define __NR_getcpu 314 -#define __NR_epoll_pwait 315 -#define __NR_utimensat 316 -#define __NR_signalfd 317 -#define __NR_timerfd_create 318 -#define __NR_eventfd 319 -#define __NR_fallocate 320 -#define __NR_timerfd_settime 321 -#define __NR_timerfd_gettime 322 -#define __NR_signalfd4 323 -#define __NR_eventfd2 324 -#define __NR_epoll_create1 325 -#define __NR_dup3 326 -#define __NR_pipe2 327 -#define __NR_inotify_init1 328 - -#ifdef __KERNEL__ - -#define NR_syscalls 329 - -#define __ARCH_WANT_IPC_PARSE_VERSION -#define __ARCH_WANT_OLD_READDIR -#define __ARCH_WANT_OLD_STAT -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_ALARM -#define __ARCH_WANT_SYS_GETHOSTNAME -#define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK -#define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME -#define __ARCH_WANT_SYS_WAITPID -#define __ARCH_WANT_SYS_SOCKETCALL -#define __ARCH_WANT_SYS_FADVISE64 -#define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK -#define __ARCH_WANT_SYS_NICE -#define __ARCH_WANT_SYS_OLD_GETRLIMIT -#define __ARCH_WANT_SYS_OLDUMOUNT -#define __ARCH_WANT_SYS_SIGPENDING -#define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION - -/* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") - -#endif /* __KERNEL__ */ -#endif /* _ASM_M68K_UNISTD_H_ */ -- cgit v1.2.3 From 7a2cf4af1554d891b440cc3a649d01ed222206c3 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Tue, 17 Mar 2009 08:47:11 +1000 Subject: m68k: merge the non-MMU and MMU versions of siginfo.h It is trivial to merge the non-MMU and MMU versions of siginfo.h. Without a single file "make headers_install" is broken for m68k (since each of the sub-varients of siginfo.h are not installed). Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/siginfo.h | 98 ++++++++++++++++++++++++++++++++++++-- arch/m68k/include/asm/siginfo_mm.h | 92 ----------------------------------- arch/m68k/include/asm/siginfo_no.h | 6 --- 3 files changed, 95 insertions(+), 101 deletions(-) delete mode 100644 arch/m68k/include/asm/siginfo_mm.h delete mode 100644 arch/m68k/include/asm/siginfo_no.h (limited to 'arch') diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h index 61219d7affc..ca7dde8fd22 100644 --- a/arch/m68k/include/asm/siginfo.h +++ b/arch/m68k/include/asm/siginfo.h @@ -1,5 +1,97 @@ -#ifdef __uClinux__ -#include "siginfo_no.h" +#ifndef _M68K_SIGINFO_H +#define _M68K_SIGINFO_H + +#ifndef __uClinux__ +#define HAVE_ARCH_SIGINFO_T +#define HAVE_ARCH_COPY_SIGINFO +#endif + +#include + +#ifndef __uClinux__ + +typedef struct siginfo { + int si_signo; + int si_errno; + int si_code; + + union { + int _pad[SI_PAD_SIZE]; + + /* kill() */ + struct { + __kernel_pid_t _pid; /* sender's pid */ + __kernel_uid_t _uid; /* backwards compatibility */ + __kernel_uid32_t _uid32; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; + sigval_t _sigval; /* same as below */ + int _sys_private; /* not to be passed to user */ + } _timer; + + /* POSIX.1b signals */ + struct { + __kernel_pid_t _pid; /* sender's pid */ + __kernel_uid_t _uid; /* backwards compatibility */ + sigval_t _sigval; + __kernel_uid32_t _uid32; /* sender's uid */ + } _rt; + + /* SIGCHLD */ + struct { + __kernel_pid_t _pid; /* which child */ + __kernel_uid_t _uid; /* backwards compatibility */ + int _status; /* exit code */ + clock_t _utime; + clock_t _stime; + __kernel_uid32_t _uid32; /* sender's uid */ + } _sigchld; + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + struct { + void *_addr; /* faulting insn/memory ref. */ + } _sigfault; + + /* SIGPOLL */ + struct { + int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + } _sifields; +} siginfo_t; + +#define UID16_SIGINFO_COMPAT_NEEDED + +/* + * How these fields are to be accessed. + */ +#undef si_uid +#ifdef __KERNEL__ +#define si_uid _sifields._kill._uid32 +#define si_uid16 _sifields._kill._uid #else -#include "siginfo_mm.h" +#define si_uid _sifields._kill._uid +#endif + +#ifdef __KERNEL__ + +#include + +static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(*to)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); +} + +#endif /* __KERNEL__ */ +#endif /* !__uClinux__ */ + #endif diff --git a/arch/m68k/include/asm/siginfo_mm.h b/arch/m68k/include/asm/siginfo_mm.h deleted file mode 100644 index 05a8d6d90b5..00000000000 --- a/arch/m68k/include/asm/siginfo_mm.h +++ /dev/null @@ -1,92 +0,0 @@ -#ifndef _M68K_SIGINFO_H -#define _M68K_SIGINFO_H - -#define HAVE_ARCH_SIGINFO_T -#define HAVE_ARCH_COPY_SIGINFO - -#include - -typedef struct siginfo { - int si_signo; - int si_errno; - int si_code; - - union { - int _pad[SI_PAD_SIZE]; - - /* kill() */ - struct { - __kernel_pid_t _pid; /* sender's pid */ - __kernel_uid_t _uid; /* backwards compatibility */ - __kernel_uid32_t _uid32; /* sender's uid */ - } _kill; - - /* POSIX.1b timers */ - struct { - timer_t _tid; /* timer id */ - int _overrun; /* overrun count */ - char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; - sigval_t _sigval; /* same as below */ - int _sys_private; /* not to be passed to user */ - } _timer; - - /* POSIX.1b signals */ - struct { - __kernel_pid_t _pid; /* sender's pid */ - __kernel_uid_t _uid; /* backwards compatibility */ - sigval_t _sigval; - __kernel_uid32_t _uid32; /* sender's uid */ - } _rt; - - /* SIGCHLD */ - struct { - __kernel_pid_t _pid; /* which child */ - __kernel_uid_t _uid; /* backwards compatibility */ - int _status; /* exit code */ - clock_t _utime; - clock_t _stime; - __kernel_uid32_t _uid32; /* sender's uid */ - } _sigchld; - - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ - struct { - void *_addr; /* faulting insn/memory ref. */ - } _sigfault; - - /* SIGPOLL */ - struct { - int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ - int _fd; - } _sigpoll; - } _sifields; -} siginfo_t; - -#define UID16_SIGINFO_COMPAT_NEEDED - -/* - * How these fields are to be accessed. - */ -#undef si_uid -#ifdef __KERNEL__ -#define si_uid _sifields._kill._uid32 -#define si_uid16 _sifields._kill._uid -#else -#define si_uid _sifields._kill._uid -#endif - -#ifdef __KERNEL__ - -#include - -static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) -{ - if (from->si_code < 0) - memcpy(to, from, sizeof(*to)); - else - /* _sigchld is currently the largest know union member */ - memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); -} - -#endif /* __KERNEL__ */ - -#endif diff --git a/arch/m68k/include/asm/siginfo_no.h b/arch/m68k/include/asm/siginfo_no.h deleted file mode 100644 index b18e5f4064a..00000000000 --- a/arch/m68k/include/asm/siginfo_no.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68KNOMMU_SIGINFO_H -#define _M68KNOMMU_SIGINFO_H - -#include - -#endif -- cgit v1.2.3 From da62e71d13ad0b76011491e36cb58999c464516a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 09:30:36 +0900 Subject: sh: dma: Make PVR2 DMA configurable. With arch/sh/drivers/dma/ always being built, the Dreamcast DMA engines are being unconditionally built in, regardless of whether the DMA API is enabled or not. This is a regression from previous behaviour, but there is not much advantage in building them all in unconditionally regardless. Add a new config option to make it optional, and update the only user of it to reflect that. Signed-off-by: Paul Mundt --- arch/sh/drivers/dma/Kconfig | 13 +++++++++++++ arch/sh/drivers/dma/Makefile | 3 ++- 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig index 32bb8fa605c..ae26610837b 100644 --- a/arch/sh/drivers/dma/Kconfig +++ b/arch/sh/drivers/dma/Kconfig @@ -54,4 +54,17 @@ config SH_DMABRG of the SH7760. Say Y if you want to use Audio/USB DMA on your SH7760 board. +config PVR2_DMA + tristate "PowerVR 2 DMAC support" + depends on SH_DREAMCAST && SH_DMA + help + Selecting this will enable support for the PVR2 DMA controller. + As this chains off of the on-chip DMAC, that must also be + enabled by default. + + This is primarily used by the pvr2fb framebuffer driver for + certain optimizations, but is not necessary for functionality. + + If in doubt, say N. + endmenu diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile index ab956adacb4..cff52cb6ac7 100644 --- a/arch/sh/drivers/dma/Makefile +++ b/arch/sh/drivers/dma/Makefile @@ -4,5 +4,6 @@ obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o obj-$(CONFIG_SH_DMA) += dma-sh.o -obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o +obj-$(CONFIG_SH_DREAMCAST) += dma-g2.o +obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o obj-$(CONFIG_SH_DMABRG) += dmabrg.o -- cgit v1.2.3 From 40f49e7ed77f1b753a7243c0137e4767a50ea8bd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 12:47:56 +0900 Subject: sh: dma: Make G2 DMA configurable. Follow the PVR2 DMAC change for G2 DMA. Signed-off-by: Paul Mundt --- arch/sh/drivers/dma/Kconfig | 11 +++++++++++ arch/sh/drivers/dma/Makefile | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig index ae26610837b..f13a05285a9 100644 --- a/arch/sh/drivers/dma/Kconfig +++ b/arch/sh/drivers/dma/Kconfig @@ -67,4 +67,15 @@ config PVR2_DMA If in doubt, say N. +config G2_DMA + tristate "G2 Bus DMA support" + depends on SH_DREAMCAST + select SH_DMA_API + help + This enables support for the DMA controller for the Dreamcast's + G2 bus. Drivers that want this will generally enable this on + their own. + + If in doubt, say N. + endmenu diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile index cff52cb6ac7..c6068137b46 100644 --- a/arch/sh/drivers/dma/Makefile +++ b/arch/sh/drivers/dma/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o obj-$(CONFIG_SH_DMA) += dma-sh.o -obj-$(CONFIG_SH_DREAMCAST) += dma-g2.o obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o +obj-$(CONFIG_G2_DMA) += dma-g2.o obj-$(CONFIG_SH_DMABRG) += dmabrg.o -- cgit v1.2.3 From 32910e2c52cae552f2651c5360bae8033adb8aac Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Tue, 17 Mar 2009 05:54:37 +0000 Subject: sh: espt-giga board support This adds support for the ESPT-Giga (Ethernet Serial Parallel Translator) SH7763-based reference board. Board support is relatively sparse, presently supporting serial, gigabit ethernet, USB host, and MTD. More information (in Japanese) available at: http://www.cente.jp/product/cente_hard/ESPT-Giga.html Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/boards/Kconfig | 7 + arch/sh/boards/Makefile | 1 + arch/sh/boards/board-espt.c | 102 ++++ arch/sh/configs/espt_defconfig | 1190 ++++++++++++++++++++++++++++++++++++++++ arch/sh/tools/mach-types | 1 + 5 files changed, 1301 insertions(+) create mode 100644 arch/sh/boards/board-espt.c create mode 100644 arch/sh/configs/espt_defconfig (limited to 'arch') diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index bd3569a9934..48c043bf45c 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -190,6 +190,13 @@ config SH_SH7763RDP Select SH7763RDP if configuring for a Renesas SH7763 evaluation board. +config SH_ESPT + bool "ESPT" + depends on CPU_SUBTYPE_SH7763 + help + Select ESPT if configuring for a Renesas SH7763 + with gigabit ether evaluation board. + config SH_EDOSK7705 bool "EDOSK7705" depends on CPU_SUBTYPE_SH7705 diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile index 6f101a8161f..d0daebce8b3 100644 --- a/arch/sh/boards/Makefile +++ b/arch/sh/boards/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o obj-$(CONFIG_SH_URQUELL) += board-urquell.o obj-$(CONFIG_SH_SHMIN) += board-shmin.o obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o +obj-$(CONFIG_SH_ESPT) += board-espt.o diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c new file mode 100644 index 00000000000..d5ce5e18eb3 --- /dev/null +++ b/arch/sh/boards/board-espt.c @@ -0,0 +1,102 @@ +/* + * Data Technology Inc. ESPT-GIGA board suport + * + * Copyright (C) 2008, 2009 Renesas Solutions Corp. + * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* NOR Flash */ +static struct mtd_partition espt_nor_flash_partitions[] = { + { + .name = "U-Boot", + .offset = 0, + .size = (2 * SZ_128K), + .mask_flags = MTD_WRITEABLE, /* Read-only */ + }, { + .name = "Linux-Kernel", + .offset = MTDPART_OFS_APPEND, + .size = (20 * SZ_128K), + }, { + .name = "Root Filesystem", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, + }, +}; + +static struct physmap_flash_data espt_nor_flash_data = { + .width = 2, + .parts = espt_nor_flash_partitions, + .nr_parts = ARRAY_SIZE(espt_nor_flash_partitions), +}; + +static struct resource espt_nor_flash_resources[] = { + [0] = { + .name = "NOR Flash", + .start = 0, + .end = SZ_8M - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device espt_nor_flash_device = { + .name = "physmap-flash", + .resource = espt_nor_flash_resources, + .num_resources = ARRAY_SIZE(espt_nor_flash_resources), + .dev = { + .platform_data = &espt_nor_flash_data, + }, +}; + +/* SH-Ether */ +static struct resource sh_eth_resources[] = { + { + .start = 0xFEE00800, /* use eth1 */ + .end = 0xFEE00F7C - 1, + .flags = IORESOURCE_MEM, + }, { + .start = 57, /* irq number */ + .flags = IORESOURCE_IRQ, + }, +}; + +static struct sh_eth_plat_data sh7763_eth_pdata = { + .phy = 0, + .edmac_endian = EDMAC_LITTLE_ENDIAN, +}; + +static struct platform_device espt_eth_device = { + .name = "sh-eth", + .resource = sh_eth_resources, + .num_resources = ARRAY_SIZE(sh_eth_resources), + .dev = { + .platform_data = &sh7763_eth_pdata, + }, +}; + +static struct platform_device *espt_devices[] __initdata = { + &espt_nor_flash_device, + &espt_eth_device, +}; + +static int __init espt_devices_setup(void) +{ + return platform_add_devices(espt_devices, + ARRAY_SIZE(espt_devices)); +} +device_initcall(espt_devices_setup); + +static struct sh_machine_vector mv_espt __initmv = { + .mv_name = "ESPT-GIGA", +}; diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig new file mode 100644 index 00000000000..873ec42c6e6 --- /dev/null +++ b/arch/sh/configs/espt_defconfig @@ -0,0 +1,1190 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.29-rc7 +# Tue Mar 17 13:25:58 2009 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +# CONFIG_GENERIC_GPIO is not set +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_ARCH_SUSPEND_POSSIBLE is not set +# CONFIG_ARCH_HIBERNATION_POSSIBLE is not set +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_CLASSIC_RCU=y +# CONFIG_TREE_RCU is not set +# CONFIG_PREEMPT_RCU is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_GROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_USER_SCHED=y +# CONFIG_CGROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set +# CONFIG_BLK_DEV_INITRD is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_COMPAT_BRK=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_ANON_INODES=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# CONFIG_MARKERS is not set +CONFIG_OPROFILE=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +# CONFIG_MODULE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_DEFAULT_AS=y +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="anticipatory" +# CONFIG_FREEZER is not set + +# +# System type +# +CONFIG_CPU_SH4=y +CONFIG_CPU_SH4A=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7201 is not set +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +# CONFIG_CPU_SUBTYPE_SH7705 is not set +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7751R is not set +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +CONFIG_CPU_SUBTYPE_SH7763=y +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +# CONFIG_CPU_SUBTYPE_SH7785 is not set +# CONFIG_CPU_SUBTYPE_SH7786 is not set +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set +# CONFIG_CPU_SUBTYPE_SH5_101 is not set +# CONFIG_CPU_SUBTYPE_SH5_103 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_MMU=y +CONFIG_PAGE_OFFSET=0x80000000 +CONFIG_MEMORY_START=0x0c000000 +CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_29BIT=y +CONFIG_VSYSCALL=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=1 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_ENTRY_OFFSET=0x00001000 +CONFIG_SELECT_MEMORY_MODEL=y +# CONFIG_FLATMEM_MANUAL is not set +# CONFIG_DISCONTIGMEM_MANUAL is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_STATIC=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_UNEVICTABLE_LRU=y + +# +# Cache configuration +# +# CONFIG_SH_DIRECT_MAPPED is not set +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SH_FPU=y +# CONFIG_SH_STORE_QUEUES is not set +CONFIG_CPU_HAS_INTEVT=y +CONFIG_CPU_HAS_SR_RB=y +CONFIG_CPU_HAS_FPU=y + +# +# Board support +# +# CONFIG_SH_SH7763RDP is not set +CONFIG_SH_ESPT=y + +# +# Timer and clock configuration +# +CONFIG_SH_TMU=y +CONFIG_SH_TIMER_IRQ=28 +CONFIG_SH_PCLK_FREQ=66666666 +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +# CONFIG_SH_DMA is not set + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +# CONFIG_HEARTBEAT is not set +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +# CONFIG_SCHED_HRTICK is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_SECCOMP=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_GUSA=y + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/nfs ip=bootp" + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options (EXPERIMENTAL) +# +# CONFIG_PM is not set +# CONFIG_CPU_IDLE is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_COMPAT_NET_DEV_OPS=y +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +CONFIG_MTD_CFI_GEOMETRY=y +# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set +CONFIG_MTD_MAP_BANK_WIDTH_2=y +# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_OTP is not set +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CFI_AMDSTD=y +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_UB is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_HD is not set +# CONFIG_MISC_DEVICES is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_TGT is not set +# CONFIG_SCSI_NETLINK is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +# CONFIG_SCSI_MULTI_LUN is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set +CONFIG_SCSI_WAIT_SCAN=m + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_TCP is not set +# CONFIG_LIBFC is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_DH is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_FIXED_PHY is not set +CONFIG_MDIO_BITBANG=y +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_AX88796 is not set +# CONFIG_STNIC is not set +CONFIG_SH_ETH=y +# CONFIG_SMC91X is not set +# CONFIG_SMC911X is not set +# CONFIG_SMSC911X is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set + +# +# Wireless LAN +# +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set +# CONFIG_IWLWIFI_LEDS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_USBNET is not set +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=3 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_I2C is not set +# CONFIG_SPI is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_REGULATOR is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_DAB is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +CONFIG_FB_FOREIGN_ENDIAN=y +CONFIG_FB_BOTH_ENDIAN=y +# CONFIG_FB_BIG_ENDIAN is not set +# CONFIG_FB_LITTLE_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_SH_MOBILE_LCDC is not set +CONFIG_FB_SH7760=y +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_LOGO_SUPERH_MONO=y +CONFIG_LOGO_SUPERH_VGA16=y +CONFIG_LOGO_SUPERH_CLUT224=y +# CONFIG_SOUND is not set +# CONFIG_HID_SUPPORT is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB_ARCH_HAS_OHCI=y +# CONFIG_USB_ARCH_HAS_EHCI is not set +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set +# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set + +# +# Miscellaneous USB options +# +# CONFIG_USB_DEVICEFS is not set +CONFIG_USB_DEVICE_CLASS=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_MON=y +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +CONFIG_USB_OHCI_HCD=y +# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set +# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HWA_HCD is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# + +# +# see USB_STORAGE Help for more information +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_BERRY_CHARGE is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_PHIDGET is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_VST is not set +# CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_RTC_CLASS is not set +# CONFIG_DMADEVICES is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_FILE_LOCKING=y +# CONFIG_XFS_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_DNOTIFY=y +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +CONFIG_AUTOFS_FS=y +CONFIG_AUTOFS4_FS=y +# CONFIG_FUSE_FS is not set +CONFIG_GENERIC_ACL=y + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +CONFIG_CRAMFS=y +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +CONFIG_ROMFS_FS=y +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +CONFIG_ROOT_NFS=y +# CONFIG_NFSD is not set +CONFIG_LOCKD=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_SUNRPC_REGISTER_V4 is not set +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=y +CONFIG_NLS_CODEPAGE_775=y +CONFIG_NLS_CODEPAGE_850=y +CONFIG_NLS_CODEPAGE_852=y +CONFIG_NLS_CODEPAGE_855=y +CONFIG_NLS_CODEPAGE_857=y +CONFIG_NLS_CODEPAGE_860=y +CONFIG_NLS_CODEPAGE_861=y +CONFIG_NLS_CODEPAGE_862=y +CONFIG_NLS_CODEPAGE_863=y +CONFIG_NLS_CODEPAGE_864=y +CONFIG_NLS_CODEPAGE_865=y +CONFIG_NLS_CODEPAGE_866=y +CONFIG_NLS_CODEPAGE_869=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=y +CONFIG_NLS_CODEPAGE_949=y +CONFIG_NLS_CODEPAGE_874=y +CONFIG_NLS_ISO8859_8=y +CONFIG_NLS_CODEPAGE_1250=y +CONFIG_NLS_CODEPAGE_1251=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=y +CONFIG_NLS_ISO8859_3=y +CONFIG_NLS_ISO8859_4=y +CONFIG_NLS_ISO8859_5=y +CONFIG_NLS_ISO8859_6=y +CONFIG_NLS_ISO8859_7=y +CONFIG_NLS_ISO8859_9=y +CONFIG_NLS_ISO8859_13=y +CONFIG_NLS_ISO8859_14=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_KOI8_R=y +CONFIG_NLS_KOI8_U=y +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_KERNEL is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_RING_BUFFER=y +CONFIG_TRACING=y + +# +# Tracers +# +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_EARLY_SCIF_CONSOLE is not set +# CONFIG_MORE_COMPILE_OPTIONS is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +# CONFIG_CRYPTO_MANAGER is not set +# CONFIG_CRYPTO_MANAGER2 is not set +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +# CONFIG_CRYPTO_CBC is not set +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_MD4 is not set +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_DES is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_HW=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +CONFIG_CRC_T10DIF=y +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_PLIST=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index 8f9e1662fa9..4932705603b 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types @@ -53,3 +53,4 @@ AP325RXA SH_AP325RXA SH7763RDP SH_SH7763RDP SH7785LCR SH_SH7785LCR URQUELL SH_URQUELL +ESPT SH_ESPT -- cgit v1.2.3 From da78800632197ac12adcdefbf09991d82adb8201 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Tue, 17 Mar 2009 05:53:26 +0000 Subject: sh: sh7763rdp: Change IRQ number for sh_eth of sh7763rdp IRQ for sh_eth of sh7763rdp became multi handling. Therefore, the IRQ number of sh_eth is changed, too. Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/boards/mach-sh7763rdp/setup.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c index 6f926fd2162..390534a0b35 100644 --- a/arch/sh/boards/mach-sh7763rdp/setup.c +++ b/arch/sh/boards/mach-sh7763rdp/setup.c @@ -63,15 +63,19 @@ static struct platform_device sh7763rdp_nor_flash_device = { }, }; -/* SH-Ether */ +/* + * SH-Ether + * + * SH Ether of SH7763 has multi IRQ handling. + * (57,58,59 -> 57) + */ static struct resource sh_eth_resources[] = { { .start = 0xFEE00800, /* use eth1 */ .end = 0xFEE00F7C - 1, .flags = IORESOURCE_MEM, }, { - .start = 58, /* irq number */ - .end = 58, + .start = 57, /* irq number */ .flags = IORESOURCE_IRQ, }, }; -- cgit v1.2.3 From 8263a67e169fdf0d06d172acbf6c03ae172a69d4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 17:49:49 +0900 Subject: sh: Support for extended ASIDs on PTEAEX-capable SH-X3 cores. This adds support for extended ASIDs (up to 16-bits) on newer SH-X3 cores that implement the PTAEX register and respective functionality. Presently only the 65nm SH7786 (90nm only supports legacy 8-bit ASIDs). The main change is in how the PTE is written out when loading the entry in to the TLB, as well as in how the TLB entry is selectively flushed. While SH-X2 extended mode splits out the memory-mapped U and I-TLB data arrays for extra bits, extended ASID mode splits out the address arrays. While we don't use the memory-mapped data array access, the address array accesses are necessary for selective TLB flushes, so these are implemented newly and replace the generic SH-4 implementation. With this, TLB flushes in switch_mm() are almost non-existent on newer parts. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/Kconfig.cpu | 3 + arch/sh/include/asm/cpu-features.h | 1 + arch/sh/include/asm/mmu_context.h | 15 +++-- arch/sh/include/asm/mmu_context_32.h | 12 ++++ arch/sh/include/cpu-sh4/cpu/mmu_context.h | 35 +++++------ arch/sh/kernel/cpu/sh4/probe.c | 2 +- arch/sh/kernel/setup.c | 2 +- arch/sh/mm/Makefile_32 | 6 +- arch/sh/mm/tlb-pteaex.c | 99 +++++++++++++++++++++++++++++++ 10 files changed, 148 insertions(+), 28 deletions(-) create mode 100644 arch/sh/mm/tlb-pteaex.c (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index a0c879d17fd..6c56495fd15 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -365,6 +365,7 @@ config CPU_SUBTYPE_SH7786 bool "Support SH7786 processor" select CPU_SH4A select CPU_SHX3 + select CPU_HAS_PTEAEX select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA diff --git a/arch/sh/Kconfig.cpu b/arch/sh/Kconfig.cpu index 0e27fe3b182..c7d704381a6 100644 --- a/arch/sh/Kconfig.cpu +++ b/arch/sh/Kconfig.cpu @@ -104,6 +104,9 @@ config CPU_HAS_SR_RB config CPU_HAS_PTEA bool +config CPU_HAS_PTEAEX + bool + config CPU_HAS_DSP bool diff --git a/arch/sh/include/asm/cpu-features.h b/arch/sh/include/asm/cpu-features.h index 86308aa3973..694abe490ed 100644 --- a/arch/sh/include/asm/cpu-features.h +++ b/arch/sh/include/asm/cpu-features.h @@ -21,5 +21,6 @@ #define CPU_HAS_LLSC 0x0040 /* movli.l/movco.l */ #define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */ #define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */ +#define CPU_HAS_PTEAEX 0x0200 /* PTE ASID Extension support */ #endif /* __ASM_SH_CPU_FEATURES_H */ diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 5d9157bd474..2a9c55f1a83 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -19,13 +19,18 @@ * (a) TLB cache version (or round, cycle whatever expression you like) * (b) ASID (Address Space IDentifier) */ +#ifdef CONFIG_CPU_HAS_PTEAEX +#define MMU_CONTEXT_ASID_MASK 0x0000ffff +#else #define MMU_CONTEXT_ASID_MASK 0x000000ff -#define MMU_CONTEXT_VERSION_MASK 0xffffff00 -#define MMU_CONTEXT_FIRST_VERSION 0x00000100 -#define NO_CONTEXT 0UL +#endif -/* ASID is 8-bit value, so it can't be 0x100 */ -#define MMU_NO_ASID 0x100 +#define MMU_CONTEXT_VERSION_MASK (~0UL & ~MMU_CONTEXT_ASID_MASK) +#define MMU_CONTEXT_FIRST_VERSION (MMU_CONTEXT_ASID_MASK + 1) + +/* Impossible ASID value, to differentiate from NO_CONTEXT. */ +#define MMU_NO_ASID MMU_CONTEXT_FIRST_VERSION +#define NO_CONTEXT 0UL #define asid_cache(cpu) (cpu_data[cpu].asid_cache) diff --git a/arch/sh/include/asm/mmu_context_32.h b/arch/sh/include/asm/mmu_context_32.h index f4f9aebd68b..8ef800c549a 100644 --- a/arch/sh/include/asm/mmu_context_32.h +++ b/arch/sh/include/asm/mmu_context_32.h @@ -10,6 +10,17 @@ static inline void destroy_context(struct mm_struct *mm) /* Do nothing */ } +#ifdef CONFIG_CPU_HAS_PTEAEX +static inline void set_asid(unsigned long asid) +{ + __raw_writel(asid, MMU_PTEAEX); +} + +static inline unsigned long get_asid(void) +{ + return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK; +} +#else static inline void set_asid(unsigned long asid) { unsigned long __dummy; @@ -33,6 +44,7 @@ static inline unsigned long get_asid(void) asid &= MMU_CONTEXT_ASID_MASK; return asid; } +#endif /* CONFIG_CPU_HAS_PTEAEX */ /* MMU_TTB is used for optimizing the fault handling. */ static inline void set_TTB(pgd_t *pgd) diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h index 9ea8eb27b18..3ce7ef6c297 100644 --- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h +++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h @@ -14,28 +14,35 @@ #define MMU_PTEL 0xFF000004 /* Page table entry register LOW */ #define MMU_TTB 0xFF000008 /* Translation table base register */ #define MMU_TEA 0xFF00000C /* TLB Exception Address */ -#define MMU_PTEA 0xFF000034 /* Page table entry assistance register */ +#define MMU_PTEA 0xFF000034 /* PTE assistance register */ +#define MMU_PTEAEX 0xFF00007C /* PTE ASID extension register */ #define MMUCR 0xFF000010 /* MMU Control Register */ -#define MMU_ITLB_ADDRESS_ARRAY 0xF2000000 #define MMU_UTLB_ADDRESS_ARRAY 0xF6000000 +#define MMU_UTLB_ADDRESS_ARRAY2 0xF6800000 #define MMU_PAGE_ASSOC_BIT 0x80 #define MMUCR_TI (1<<2) -#ifdef CONFIG_X2TLB -#define MMUCR_ME (1 << 7) -#else -#define MMUCR_ME (0) -#endif - #if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40) #define MMUCR_SE (1 << 4) #else #define MMUCR_SE (0) #endif +#ifdef CONFIG_CPU_HAS_PTEAEX +#define MMUCR_AEX (1 << 6) +#else +#define MMUCR_AEX (0) +#endif + +#ifdef CONFIG_X2TLB +#define MMUCR_ME (1 << 7) +#else +#define MMUCR_ME (0) +#endif + #ifdef CONFIG_SH_STORE_QUEUES #define MMUCR_SQMD (1 << 9) #else @@ -43,17 +50,7 @@ #endif #define MMU_NTLB_ENTRIES 64 -#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME|MMUCR_SE) - -#define MMU_ITLB_DATA_ARRAY 0xF3000000 -#define MMU_UTLB_DATA_ARRAY 0xF7000000 - -#define MMU_UTLB_ENTRIES 64 -#define MMU_U_ENTRY_SHIFT 8 -#define MMU_UTLB_VALID 0x100 -#define MMU_ITLB_ENTRIES 4 -#define MMU_I_ENTRY_SHIFT 8 -#define MMU_ITLB_VALID 0x100 +#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME|MMUCR_SE|MMUCR_AEX) #define TRA 0xff000020 #define EXPEVT 0xff000024 diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 2bd0ec96263..3d3a3c4425a 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -134,7 +134,7 @@ int __init detect_cpu_and_cache_system(void) boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | - CPU_HAS_LLSC; + CPU_HAS_LLSC | CPU_HAS_PTEAEX; break; case 0x3008: boot_cpu_data.icache.ways = 4; diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 61ab2a7f864..24c60251f68 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -449,7 +449,7 @@ EXPORT_SYMBOL(get_cpu_subtype); /* Symbolic CPU flags, keep in sync with asm/cpu-features.h */ static const char *cpu_flags[] = { "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr", - "ptea", "llsc", "l2", "op32", NULL + "ptea", "llsc", "l2", "op32", "pteaex", NULL }; static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c) diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 469ff167245..986a1e05583 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -25,8 +25,10 @@ obj-$(CONFIG_CPU_SH4) += cache-debugfs.o endif ifdef CONFIG_MMU -obj-$(CONFIG_CPU_SH3) += tlb-sh3.o -obj-$(CONFIG_CPU_SH4) += tlb-sh4.o +tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o +tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o +tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o +obj-y += $(tlb-y) ifndef CONFIG_CACHE_OFF obj-$(CONFIG_CPU_SH4) += pg-sh4.o obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c new file mode 100644 index 00000000000..5c9b2d781e0 --- /dev/null +++ b/arch/sh/mm/tlb-pteaex.c @@ -0,0 +1,99 @@ +/* + * arch/sh/mm/tlb-pteaex.c + * + * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions. + * + * Copyright (C) 2009 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include + +void update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ + unsigned long flags; + unsigned long pteval; + unsigned long vpn; + + /* Ptrace may call this routine. */ + if (vma && current->active_mm != vma->vm_mm) + return; + +#ifndef CONFIG_CACHE_OFF + { + unsigned long pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + + if (!test_bit(PG_mapped, &page->flags)) { + unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; + __flush_wback_region((void *)P1SEGADDR(phys), + PAGE_SIZE); + __set_bit(PG_mapped, &page->flags); + } + } + } +#endif + + local_irq_save(flags); + + /* Set PTEH register */ + vpn = address & MMU_VPN_MASK; + __raw_writel(vpn, MMU_PTEH); + + /* Set PTEAEX */ + __raw_writel(get_asid(), MMU_PTEAEX); + + pteval = pte.pte_low; + + /* Set PTEA register */ +#ifdef CONFIG_X2TLB + /* + * For the extended mode TLB this is trivial, only the ESZ and + * EPR bits need to be written out to PTEA, with the remainder of + * the protection bits (with the exception of the compat-mode SZ + * and PR bits, which are cleared) being written out in PTEL. + */ + __raw_writel(pte.pte_high, MMU_PTEA); +#else + /* TODO: make this look less hacky */ + __raw_writel(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); +#endif + + /* Set PTEL register */ + pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ +#ifdef CONFIG_CACHE_WRITETHROUGH + pteval |= _PAGE_WT; +#endif + /* conveniently, we want all the software flags to be 0 anyway */ + __raw_writel(pteval, MMU_PTEL); + + /* Load the TLB */ + asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); + local_irq_restore(flags); +} + +/* + * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB + * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped + * address arrays. In compat mode the second array is inaccessible, while + * in extended mode, the legacy 8-bit ASID field in address array 1 has + * undefined behaviour. + */ +void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, + unsigned long page) +{ + jump_to_uncached(); + __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); + __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); + back_to_cached(); +} -- cgit v1.2.3 From c54a43e90b80993b2e0772d678563cb2bc6a1b3b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 17:58:33 +0900 Subject: sh: tlb-pteaex: Kill off legacy PTEA updates. While harmless, PTEA has different semantics on these parts, and is only used in extended TLB mode. Kill off the legacy support. Signed-off-by: Paul Mundt --- arch/sh/mm/tlb-pteaex.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 5c9b2d781e0..2aab3ea934d 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c @@ -64,9 +64,6 @@ void update_mmu_cache(struct vm_area_struct * vma, * and PR bits, which are cleared) being written out in PTEL. */ __raw_writel(pte.pte_high, MMU_PTEA); -#else - /* TODO: make this look less hacky */ - __raw_writel(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); #endif /* Set PTEL register */ -- cgit v1.2.3 From 3a3b311ca375a37b29bb78b030f96bf97dee97f5 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 17:59:31 +0900 Subject: sh: Update debugfs ASID dumping for 16-bit ASID support. Signed-off-by: Paul Mundt --- arch/sh/mm/asids-debugfs.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c index 8e912a15e94..cd8c3bf39b5 100644 --- a/arch/sh/mm/asids-debugfs.c +++ b/arch/sh/mm/asids-debugfs.c @@ -37,10 +37,8 @@ static int asids_seq_show(struct seq_file *file, void *iter) continue; if (p->mm) - seq_printf(file, "%5d : %02lx\n", pid, + seq_printf(file, "%5d : %04lx\n", pid, cpu_asid(smp_processor_id(), p->mm)); - else - seq_printf(file, "%5d : (none)\n", pid); } read_unlock(&tasklist_lock); -- cgit v1.2.3 From c20351846efcb755ba849d9fb701fbd9a1ffb7c2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 21:19:49 +0900 Subject: sh: Flush only the needed range when unmapping a VMA. This follows the ARM change from Aaro Koskinen: When unmapping N pages (e.g. shared memory) the amount of TLB flushes done can be (N*PAGE_SIZE/ZAP_BLOCK_SIZE)*N although it should be N at maximum. With PREEMPT kernel ZAP_BLOCK_SIZE is 8 pages, so there is a noticeable performance penalty when unmapping a large VMA and the system is spending its time in flush_tlb_range(). The problem is that tlb_end_vma() is always flushing the full VMA range. The subrange that needs to be flushed can be calculated by tlb_remove_tlb_entry(). This approach was suggested by Hugh Dickins, and is also used by other arches. The speed increase is roughly 3x for 8M mappings and for larger mappings even more. Bits and peices are taken from the ARM patch as well as the existing arch/um implementation that is quite similar. The end result is a significant reduction in both partial and full TLB flushes initiated through flush_tlb_range(). At the same time, the nommu implementation was broken, had a superfluous cache flush, and subsequently would have triggered a BUG_ON() if a code-path had triggered it. Tidy this up for correctness and provide a nopped-out implementation there. More background on the initial discussion can be found at: http://marc.info/?t=123609820900002&r=1&w=2 http://marc.info/?t=123660375800003&r=1&w=2 Signed-off-by: Paul Mundt --- arch/sh/include/asm/tlb.h | 100 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 92 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 88ff1ae8a6b..9c16f737074 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -6,22 +6,106 @@ #endif #ifndef __ASSEMBLY__ +#include + +#ifdef CONFIG_MMU +#include +#include + +/* + * TLB handling. This allows us to remove pages from the page + * tables, and efficiently handle the TLB issues. + */ +struct mmu_gather { + struct mm_struct *mm; + unsigned int fullmm; + unsigned long start, end; +}; -#define tlb_start_vma(tlb, vma) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end) +DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); -#define tlb_end_vma(tlb, vma) \ - flush_tlb_range(vma, vma->vm_start, vma->vm_end) +static inline void init_tlb_gather(struct mmu_gather *tlb) +{ + tlb->start = TASK_SIZE; + tlb->end = 0; -#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) + if (tlb->fullmm) { + tlb->start = 0; + tlb->end = TASK_SIZE; + } +} + +static inline struct mmu_gather * +tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) +{ + struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); + + tlb->mm = mm; + tlb->fullmm = full_mm_flush; + + init_tlb_gather(tlb); + + return tlb; +} + +static inline void +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + if (tlb->fullmm) + flush_tlb_mm(tlb->mm); + + /* keep the page table cache within bounds */ + check_pgt_cache(); + + put_cpu_var(mmu_gathers); +} + +static inline void +tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) +{ + if (tlb->start > address) + tlb->start = address; + if (tlb->end < address + PAGE_SIZE) + tlb->end = address + PAGE_SIZE; +} /* - * Flush whole TLBs for MM + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. */ -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) +static inline void +tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (!tlb->fullmm) + flush_cache_range(vma, vma->vm_start, vma->vm_end); +} + +static inline void +tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (!tlb->fullmm && tlb->end) { + flush_tlb_range(vma, tlb->start, tlb->end); + init_tlb_gather(tlb); + } +} + +#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) +#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) +#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) +#define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp) + +#define tlb_migrate_finish(mm) do { } while (0) + +#else /* CONFIG_MMU */ + +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) +#define tlb_flush(tlb) do { } while (0) -#include #include +#endif /* CONFIG_MMU */ #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_TLB_H */ -- cgit v1.2.3 From c9310920e6e7ae0a5c0accbd57d34c194cb31780 Mon Sep 17 00:00:00 2001 From: Piotr Ziecik Date: Tue, 17 Mar 2009 09:17:50 -0600 Subject: powerpc/5200: Enable CPU_FTR_NEED_COHERENT for MPC52xx BestComm, a DMA engine in MPC52xx SoC, requires snooping when CPU caches are enabled to work properly. Adding CPU_FTR_NEED_COHERENT fixes NFS problems on MPC52xx machines introduced by 'powerpc/mm: Fix handling of _PAGE_COHERENT in BAT setup code' (sha1: 4c456a67f501b8b15542c7c21c28812bf88f484b). Signed-off-by: Piotr Ziecik Signed-off-by: Grant Likely --- arch/powerpc/include/asm/cputable.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 4911104791c..21172badd70 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -241,9 +241,11 @@ extern const char *powerpc_base_platform; /* We need to mark all pages as being coherent if we're SMP or we have a * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II * require it for PCI "streaming/prefetch" to work properly. + * This is also required by 52xx family. */ #if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \ - || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) + || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \ + || defined(CONFIG_PPC_MPC52xx) #define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT #else #define CPU_FTR_COMMON 0 -- cgit v1.2.3 From a4bd6a93c3f14691c8a29e53eb04dc734b27f0db Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 17 Mar 2009 09:17:50 -0600 Subject: powerpc/mm: Respect _PAGE_COHERENT on classic ppc32 SW Since we now set _PAGE_COHERENT in the Linux PTE we shouldn't be clearing it out before we setup the SW TLB. Today all the SW TLB machines (603/e300) that we support are non-SMP, however there are some errata on some devices that cause us to set _PAGE_COHERENT via CPU_FTR_NEED_COHERENT. Signed-off-by: Kumar Gala Signed-off-by: Grant Likely --- arch/powerpc/kernel/head_32.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index a1c4cfd25de..7db2e42d97a 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -511,7 +511,7 @@ InstructionTLBMiss: and r1,r1,r2 /* writable if _RW and _DIRTY */ rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ - ori r1,r1,0xe14 /* clear out reserved bits and M */ + ori r1,r1,0xe04 /* clear out reserved bits */ andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ mtspr SPRN_RPA,r1 mfspr r3,SPRN_IMISS @@ -585,7 +585,7 @@ DataLoadTLBMiss: and r1,r1,r2 /* writable if _RW and _DIRTY */ rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ - ori r1,r1,0xe14 /* clear out reserved bits and M */ + ori r1,r1,0xe04 /* clear out reserved bits */ andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ mtspr SPRN_RPA,r1 mfspr r3,SPRN_DMISS @@ -653,7 +653,7 @@ DataStoreTLBMiss: stw r3,0(r2) /* update PTE (accessed/dirty bits) */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ - li r1,0xe15 /* clear out reserved bits and M */ + li r1,0xe05 /* clear out reserved bits & PP lsb */ andc r1,r3,r1 /* PP = user? 2: 0 */ mtspr SPRN_RPA,r1 mfspr r3,SPRN_DMISS -- cgit v1.2.3 From 9aac397525dc7945b1582a80cef5860516bca452 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Fri, 13 Mar 2009 06:52:22 +0000 Subject: powerpc/ps3: ps3_defconfig updates Update ps3_defconfig. Sets these options: CONFIG_PS3_VRAM=m CONFIG_BLK_DEV_DM=m CONFIG_USB_HIDDEV=y CONFIG_EXT4_FS=y Signed-off-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/configs/ps3_defconfig | 250 ++++++++++++++++++++++++++++--------- 1 file changed, 190 insertions(+), 60 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index b6eee7c93cd..ac14f5245d2 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.27-rc3 -# Wed Aug 20 08:16:53 2008 +# Linux kernel version: 2.6.29-rc8 +# Fri Mar 13 09:28:45 2009 # CONFIG_PPC64=y @@ -16,13 +16,14 @@ CONFIG_PPC_FPU=y CONFIG_ALTIVEC=y # CONFIG_VSX is not set CONFIG_PPC_STD_MMU=y +CONFIG_PPC_STD_MMU_64=y CONFIG_PPC_MM_SLICES=y CONFIG_VIRT_CPU_ACCOUNTING=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_64BIT=y CONFIG_WORD_SIZE=64 -CONFIG_PPC_MERGE=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y CONFIG_MMU=y CONFIG_GENERIC_CMOS_UPDATE=y CONFIG_GENERIC_TIME=y @@ -46,7 +47,7 @@ CONFIG_PPC=y CONFIG_EARLY_PRINTK=y CONFIG_COMPAT=y CONFIG_SYSVIPC_COMPAT=y -CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_PPC_OF=y CONFIG_OF=y @@ -74,10 +75,19 @@ CONFIG_POSIX_MQUEUE=y # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_CLASSIC_RCU=y +# CONFIG_TREE_RCU is not set +# CONFIG_PREEMPT_RCU is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=17 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_RELAY is not set @@ -86,11 +96,13 @@ CONFIG_NAMESPACES=y # CONFIG_IPC_NS is not set # CONFIG_USER_NS is not set # CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y -# CONFIG_EMBEDDED is not set +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y @@ -99,37 +111,36 @@ CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y -# CONFIG_COMPAT_BRK is not set CONFIG_BASE_FULL=y CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set CONFIG_PROFILING=y -# CONFIG_MARKERS is not set +CONFIG_TRACEPOINTS=y +CONFIG_MARKERS=y CONFIG_OPROFILE=m CONFIG_HAVE_OPROFILE=y # CONFIG_KPROBES is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_SYSCALL_WRAPPERS=y CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_ARCH_TRACEHOOK=y CONFIG_HAVE_DMA_ATTRS=y CONFIG_USE_GENERIC_SMP_HELPERS=y -# CONFIG_HAVE_CLK is not set -CONFIG_PROC_PAGE_MONITOR=y # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -137,7 +148,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y # CONFIG_BLK_DEV_IO_TRACE is not set @@ -157,7 +167,7 @@ CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y +# CONFIG_FREEZER is not set # # Platform support @@ -183,18 +193,20 @@ CONFIG_PS3_STORAGE=y CONFIG_PS3_DISK=y CONFIG_PS3_ROM=y CONFIG_PS3_FLASH=y -CONFIG_OPROFILE_PS3=y +CONFIG_PS3_VRAM=m CONFIG_PS3_LPM=m CONFIG_PPC_CELL=y # CONFIG_PPC_CELL_NATIVE is not set # CONFIG_PPC_IBM_CELL_BLADE is not set # CONFIG_PPC_CELLEB is not set +# CONFIG_PPC_CELL_QPACE is not set # # Cell Broadband Engine options # CONFIG_SPU_FS=y CONFIG_SPU_FS_64K_LS=y +# CONFIG_SPU_TRACE is not set CONFIG_SPU_BASE=y # CONFIG_PQ2ADS is not set # CONFIG_IPIC is not set @@ -210,6 +222,7 @@ CONFIG_SPU_BASE=y # CONFIG_GENERIC_IOMAP is not set # CONFIG_CPU_FREQ is not set # CONFIG_FSL_ULI1575 is not set +# CONFIG_SIMPLE_GPIO is not set # # Kernel options @@ -229,6 +242,8 @@ CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT is not set CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set CONFIG_BINFMT_MISC=y CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y # CONFIG_IOMMU_VMERGE is not set @@ -251,7 +266,6 @@ CONFIG_SELECT_MEMORY_MODEL=y CONFIG_SPARSEMEM_MANUAL=y CONFIG_SPARSEMEM=y CONFIG_HAVE_MEMORY_PRESENT=y -# CONFIG_SPARSEMEM_STATIC is not set CONFIG_SPARSEMEM_EXTREME=y CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y # CONFIG_SPARSEMEM_VMEMMAP is not set @@ -261,11 +275,14 @@ CONFIG_MEMORY_HOTPLUG_SPARSE=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MIGRATION=y -CONFIG_RESOURCES_64BIT=y +CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_ZONE_DMA_FLAG=1 CONFIG_BOUNCE=y +CONFIG_UNEVICTABLE_LRU=y CONFIG_ARCH_MEMORY_PROBE=y CONFIG_PPC_HAS_HASH_64K=y +CONFIG_PPC_4K_PAGES=y +# CONFIG_PPC_16K_PAGES is not set # CONFIG_PPC_64K_PAGES is not set CONFIG_FORCE_MAX_ZONEORDER=13 CONFIG_SCHED_SMT=y @@ -299,6 +316,7 @@ CONFIG_NET=y # # Networking options # +CONFIG_COMPAT_NET_DEV_OPS=y CONFIG_PACKET=y CONFIG_PACKET_MMAP=y CONFIG_UNIX=y @@ -361,6 +379,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set # CONFIG_LLC2 is not set @@ -371,6 +390,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing @@ -392,39 +412,37 @@ CONFIG_BT_HIDP=m # # Bluetooth device drivers # -CONFIG_BT_HCIUSB=m -CONFIG_BT_HCIUSB_SCO=y +CONFIG_BT_HCIBTUSB=m # CONFIG_BT_HCIUART is not set # CONFIG_BT_HCIBCM203X is not set # CONFIG_BT_HCIBPA10X is not set # CONFIG_BT_HCIBFUSB is not set # CONFIG_BT_HCIVHCI is not set # CONFIG_AF_RXRPC is not set - -# -# Wireless -# +# CONFIG_PHONET is not set +CONFIG_WIRELESS=y CONFIG_CFG80211=m +# CONFIG_CFG80211_REG_DEBUG is not set CONFIG_NL80211=y +# CONFIG_WIRELESS_OLD_REGULATORY is not set CONFIG_WIRELESS_EXT=y # CONFIG_WIRELESS_EXT_SYSFS is not set +# CONFIG_LIB80211 is not set CONFIG_MAC80211=m # # Rate control algorithm selection # CONFIG_MAC80211_RC_PID=y +# CONFIG_MAC80211_RC_MINSTREL is not set CONFIG_MAC80211_RC_DEFAULT_PID=y +# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set CONFIG_MAC80211_RC_DEFAULT="pid" # CONFIG_MAC80211_MESH is not set # CONFIG_MAC80211_LEDS is not set # CONFIG_MAC80211_DEBUGFS is not set # CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_IEEE80211=m -# CONFIG_IEEE80211_DEBUG is not set -CONFIG_IEEE80211_CRYPT_WEP=m -CONFIG_IEEE80211_CRYPT_CCMP=m -CONFIG_IEEE80211_CRYPT_TKIP=m +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -450,6 +468,7 @@ CONFIG_MTD_DEBUG=y CONFIG_MTD_DEBUG_VERBOSE=0 # CONFIG_MTD_CONCAT is not set # CONFIG_MTD_PARTITIONS is not set +# CONFIG_MTD_TESTS is not set # # User Modules And Translation Layers @@ -494,7 +513,6 @@ CONFIG_MTD_CFI_I2=y # # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set -CONFIG_MTD_PS3VRAM=y # CONFIG_MTD_MTDRAM is not set # CONFIG_MTD_BLOCK2MTD is not set @@ -507,6 +525,11 @@ CONFIG_MTD_PS3VRAM=y # CONFIG_MTD_NAND is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -528,8 +551,13 @@ CONFIG_BLK_DEV_RAM_SIZE=65535 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -575,7 +603,17 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_DH is not set # CONFIG_ATA is not set -# CONFIG_MD is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +# CONFIG_DM_CRYPT is not set +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +# CONFIG_DM_UEVENT is not set # CONFIG_MACINTOSH_DRIVERS is not set CONFIG_NETDEVICES=y # CONFIG_DUMMY is not set @@ -591,6 +629,9 @@ CONFIG_MII=m # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set # CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set CONFIG_NETDEV_1000=y CONFIG_GELIC_NET=y @@ -604,6 +645,7 @@ CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE=y # CONFIG_WLAN_PRE80211 is not set CONFIG_WLAN_80211=y # CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set # CONFIG_USB_ZD1201 is not set # CONFIG_USB_NET_RNDIS_WLAN is not set # CONFIG_RTL8187 is not set @@ -615,13 +657,11 @@ CONFIG_WLAN_80211=y # CONFIG_B43LEGACY is not set CONFIG_ZD1211RW=m # CONFIG_ZD1211RW_DEBUG is not set -CONFIG_RT2X00=m -CONFIG_RT2X00_LIB=m -CONFIG_RT2X00_LIB_USB=m -CONFIG_RT2X00_LIB_FIRMWARE=y -# CONFIG_RT2500USB is not set -CONFIG_RT73USB=m -# CONFIG_RT2X00_DEBUG is not set +# CONFIG_RT2X00 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# # # USB Network Adapters @@ -634,6 +674,7 @@ CONFIG_USB_USBNET=m CONFIG_USB_NET_AX8817X=m # CONFIG_USB_NET_CDCETHER is not set # CONFIG_USB_NET_DM9601 is not set +# CONFIG_USB_NET_SMSC95XX is not set # CONFIG_USB_NET_GL620A is not set # CONFIG_USB_NET_NET1080 is not set # CONFIG_USB_NET_PLUSB is not set @@ -664,7 +705,7 @@ CONFIG_SLHC=m # Input device support # CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_POLLDEV is not set # @@ -735,8 +776,10 @@ CONFIG_DEVKMEM=y # Non-8250 serial port support # CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=16 +# CONFIG_HVC_UDBG is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -753,11 +796,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y # CONFIG_THERMAL is not set # CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # @@ -767,6 +810,7 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_MFD_TMIO is not set +# CONFIG_REGULATOR is not set # # Multimedia devices @@ -792,6 +836,7 @@ CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y # CONFIG_FIRMWARE_EDID is not set # CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set # CONFIG_FB_CFB_FILLRECT is not set # CONFIG_FB_CFB_COPYAREA is not set # CONFIG_FB_CFB_IMAGEBLIT is not set @@ -817,6 +862,8 @@ CONFIG_FB_SYS_FOPS=y CONFIG_FB_PS3=y CONFIG_FB_PS3_DEFAULT_SIZE_M=9 # CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # @@ -841,6 +888,7 @@ CONFIG_FB_LOGO_EXTRA=y # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_LOGO_LINUX_CLUT224=y CONFIG_SOUND=m +# CONFIG_SOUND_OSS_CORE is not set CONFIG_SND=m CONFIG_SND_TIMER=m CONFIG_SND_PCM=m @@ -849,6 +897,7 @@ CONFIG_SND_RAWMIDI=m # CONFIG_SND_SEQUENCER is not set # CONFIG_SND_MIXER_OSS is not set # CONFIG_SND_PCM_OSS is not set +# CONFIG_SND_HRTIMER is not set # CONFIG_SND_DYNAMIC_MINORS is not set CONFIG_SND_SUPPORT_OLD_API=y CONFIG_SND_VERBOSE_PROCFS=y @@ -873,15 +922,40 @@ CONFIG_HIDRAW=y # USB Input Devices # CONFIG_USB_HID=m -# CONFIG_USB_HIDINPUT_POWERBOOK is not set -# CONFIG_HID_FF is not set -# CONFIG_USB_HIDDEV is not set +# CONFIG_HID_PID is not set +CONFIG_USB_HIDDEV=y # # USB HID Boot Protocol drivers # # CONFIG_USB_KBD is not set # CONFIG_USB_MOUSE is not set + +# +# Special HID drivers +# +# CONFIG_HID_COMPAT is not set +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_LOGITECH is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_GREENASIA_FF is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_ZEROPLUS_FF is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y @@ -898,7 +972,11 @@ CONFIG_USB_DEVICEFS=y # CONFIG_USB_DYNAMIC_MINORS is not set CONFIG_USB_SUSPEND=y # CONFIG_USB_OTG is not set -CONFIG_USB_MON=y +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_MON=m +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set # # USB Host Controller Drivers @@ -909,6 +987,7 @@ CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_TT_NEWSCHED is not set CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y # CONFIG_USB_EHCI_HCD_PPC_OF is not set +# CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1760_HCD is not set CONFIG_USB_OHCI_HCD=m @@ -918,6 +997,7 @@ CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y CONFIG_USB_OHCI_LITTLE_ENDIAN=y # CONFIG_USB_SL811_HCD is not set # CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HWA_HCD is not set # # Enable Host or Gadget support to see Inventra options @@ -929,20 +1009,20 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y # CONFIG_USB_ACM is not set # CONFIG_USB_PRINTER is not set # CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; # # -# may also be needed; see USB_STORAGE Help for more information +# see USB_STORAGE Help for more information # CONFIG_USB_STORAGE=m # CONFIG_USB_STORAGE_DEBUG is not set # CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_DPCM is not set # CONFIG_USB_STORAGE_USBAT is not set # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set @@ -950,7 +1030,6 @@ CONFIG_USB_STORAGE=m # CONFIG_USB_STORAGE_ALAUDA is not set # CONFIG_USB_STORAGE_ONETOUCH is not set # CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_SIERRA is not set # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set # CONFIG_USB_LIBUSUAL is not set @@ -971,6 +1050,7 @@ CONFIG_USB_STORAGE=m # CONFIG_USB_EMI62 is not set # CONFIG_USB_EMI26 is not set # CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set @@ -988,7 +1068,12 @@ CONFIG_USB_STORAGE=m # CONFIG_USB_IOWARRIOR is not set # CONFIG_USB_TEST is not set # CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set @@ -1014,12 +1099,15 @@ CONFIG_RTC_INTF_DEV=y # Platform RTC drivers # # CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set # CONFIG_RTC_DRV_DS1511 is not set # CONFIG_RTC_DRV_DS1553 is not set # CONFIG_RTC_DRV_DS1742 is not set # CONFIG_RTC_DRV_STK17TA8 is not set # CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set # CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_V3020 is not set # @@ -1028,6 +1116,7 @@ CONFIG_RTC_INTF_DEV=y CONFIG_RTC_DRV_PPC=m # CONFIG_DMADEVICES is not set # CONFIG_UIO is not set +# CONFIG_STAGING is not set # # File systems @@ -1035,26 +1124,35 @@ CONFIG_RTC_DRV_PPC=m CONFIG_EXT2_FS=m # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set -CONFIG_EXT3_FS=y +CONFIG_EXT3_FS=m CONFIG_EXT3_FS_XATTR=y # CONFIG_EXT3_FS_POSIX_ACL is not set # CONFIG_EXT3_FS_SECURITY is not set -# CONFIG_EXT4DEV_FS is not set -CONFIG_JBD=y +CONFIG_EXT4_FS=y +# CONFIG_EXT4DEV_COMPAT is not set +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +CONFIG_JBD=m # CONFIG_JBD_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y CONFIG_QUOTA=y # CONFIG_QUOTA_NETLINK_INTERFACE is not set CONFIG_PRINT_QUOTA_WARNING=y +CONFIG_QUOTA_TREE=y # CONFIG_QFMT_V1 is not set CONFIG_QFMT_V2=y CONFIG_QUOTACTL=y @@ -1087,16 +1185,14 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1106,6 +1202,7 @@ CONFIG_HUGETLB_PAGE=y # CONFIG_EFS_FS is not set # CONFIG_JFFS2_FS is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1126,6 +1223,7 @@ CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_REGISTER_V4 is not set CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -1190,9 +1288,9 @@ CONFIG_NLS_ISO8859_1=y # Library routines # CONFIG_BITREVERSE=y -# CONFIG_GENERIC_FIND_FIRST_BIT is not set +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m -# CONFIG_CRC16 is not set +CONFIG_CRC16=y CONFIG_CRC_T10DIF=y CONFIG_CRC_ITU_T=m CONFIG_CRC32=y @@ -1250,27 +1348,44 @@ CONFIG_DEBUG_WRITECOUNT=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_LIST=y # CONFIG_DEBUG_SG is not set -CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_NOTIFIERS is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_FAULT_INJECTION is not set # CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_HAVE_FTRACE=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_DYNAMIC_FTRACE=y -# CONFIG_FTRACE is not set +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_RING_BUFFER=y +CONFIG_TRACING=y + +# +# Tracers +# +# CONFIG_FUNCTION_TRACER is not set # CONFIG_IRQSOFF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_CONTEXT_SWITCH_TRACER is not set +# CONFIG_BOOT_TRACER is not set +# CONFIG_TRACE_BRANCH_PROFILING is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set +CONFIG_PRINT_STACK_DEPTH=64 CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_DEBUG_PAGEALLOC is not set # CONFIG_CODE_PATCHING_SELFTEST is not set # CONFIG_FTR_FIXUP_SELFTEST is not set +# CONFIG_MSI_BITMAP_SELFTEST is not set # CONFIG_XMON is not set CONFIG_IRQSTACKS=y # CONFIG_VIRQ_DEBUG is not set @@ -1282,16 +1397,26 @@ CONFIG_IRQSTACKS=y # # CONFIG_KEYS is not set # CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set CONFIG_CRYPTO=y # # Crypto core or helper # +# CONFIG_CRYPTO_FIPS is not set CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_GF128MUL=m # CONFIG_CRYPTO_NULL is not set # CONFIG_CRYPTO_CRYPTD is not set @@ -1363,6 +1488,11 @@ CONFIG_CRYPTO_SALSA20=m # # CONFIG_CRYPTO_DEFLATE is not set CONFIG_CRYPTO_LZO=m + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y # CONFIG_PPC_CLOCK is not set # CONFIG_VIRTUALIZATION is not set -- cgit v1.2.3 From 7be5c55af0cc58e54e42e1702d837527e15b8414 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 18 Mar 2009 08:47:31 +0000 Subject: sh: simplify kexec vbr code Setup the vbr register in machine_kexec(). This instead of passing values to the assembly snippet. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 17 ++++++++--------- arch/sh/kernel/relocate_kernel.S | 4 ---- 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 94df56b0d1f..d3318f99256 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -23,8 +23,7 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)( unsigned long indirection_page, unsigned long reboot_code_buffer, - unsigned long start_address, - unsigned long vbr_reg) ATTRIB_NORET; + unsigned long start_address) ATTRIB_NORET; extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; @@ -76,14 +75,8 @@ void machine_kexec(struct kimage *image) unsigned long page_list; unsigned long reboot_code_buffer; - unsigned long vbr_reg; relocate_new_kernel_t rnk; -#if defined(CONFIG_SH_STANDARD_BIOS) - vbr_reg = ((unsigned long )gdb_vbr_vector) - 0x100; -#else - vbr_reg = 0x80000000; // dummy -#endif /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); @@ -100,9 +93,15 @@ void machine_kexec(struct kimage *image) kexec_info(image); flush_cache_all(); + set_bl_bit(); +#if defined(CONFIG_SH_STANDARD_BIOS) + asm volatile("ldc %0, vbr" : + : "r" (((unsigned long) gdb_vbr_vector) - 0x100) + : "memory"); +#endif /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; - (*rnk)(page_list, reboot_code_buffer, P2SEGADDR(image->start), vbr_reg); + (*rnk)(page_list, reboot_code_buffer, P2SEGADDR(image->start)); } void arch_crash_save_vmcoreinfo(void) diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S index c66cb3209db..8b50b2c873a 100644 --- a/arch/sh/kernel/relocate_kernel.S +++ b/arch/sh/kernel/relocate_kernel.S @@ -16,7 +16,6 @@ relocate_new_kernel: /* r4 = indirection_page */ /* r5 = reboot_code_buffer */ /* r6 = start_address */ - /* r7 = vbr_reg */ mov.l 10f,r8 /* PAGE_SIZE */ mov.l 11f,r9 /* P2SEG */ @@ -80,9 +79,6 @@ relocate_new_kernel: bra 0b nop 6: -#ifdef CONFIG_SH_STANDARD_BIOS - ldc r7, vbr -#endif jmp @r6 nop -- cgit v1.2.3 From e4e063d0c288bd65c56dd855337780a541ed928d Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 18 Mar 2009 08:49:45 +0000 Subject: sh: rework kexec segment code Rework the kexec code to avoid using P2SEG. Instead we walk the page list in machine_kexec() and convert the addresses from physical to virtual using C. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 17 ++++++++++++++++- arch/sh/kernel/relocate_kernel.S | 6 +----- 2 files changed, 17 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index d3318f99256..25b4748fdc7 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -76,6 +76,21 @@ void machine_kexec(struct kimage *image) unsigned long page_list; unsigned long reboot_code_buffer; relocate_new_kernel_t rnk; + unsigned long entry; + unsigned long *ptr; + + /* + * Nicked from the mips version of machine_kexec(): + * The generic kexec code builds a page list with physical + * addresses. Use phys_to_virt() to convert them to virtual. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); @@ -101,7 +116,7 @@ void machine_kexec(struct kimage *image) #endif /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; - (*rnk)(page_list, reboot_code_buffer, P2SEGADDR(image->start)); + (*rnk)(page_list, reboot_code_buffer, image->start); } void arch_crash_save_vmcoreinfo(void) diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S index 8b50b2c873a..2a6630be668 100644 --- a/arch/sh/kernel/relocate_kernel.S +++ b/arch/sh/kernel/relocate_kernel.S @@ -18,7 +18,6 @@ relocate_new_kernel: /* r6 = start_address */ mov.l 10f,r8 /* PAGE_SIZE */ - mov.l 11f,r9 /* P2SEG */ /* stack setting */ add r8,r5 @@ -29,9 +28,8 @@ relocate_new_kernel: 0: mov.l @r4+,r0 /* cmd = *ind++ */ -1: /* addr = (cmd | P2SEG) & 0xfffffff0 */ +1: /* addr = cmd & 0xfffffff0 */ mov r0,r2 - or r9,r2 mov #-16,r1 and r1,r2 @@ -85,8 +83,6 @@ relocate_new_kernel: .align 2 10: .long PAGE_SIZE -11: - .long P2SEG relocate_new_kernel_end: -- cgit v1.2.3 From b7cf6ddc13186f9272438a97aa75972d496d0b0a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 18 Mar 2009 08:51:29 +0000 Subject: sh: add kexec jump support Add kexec jump support to the SuperH architecture. Similar to the x86 implementation, with the following exceptions: - Instead of separating the assembly code flow into two parts for regular kexec and kexec jump we use a single code path. In the assembly snippet regular kexec is just kexec jump that never comes back. - Instead of using a swap page when moving data between pages the page copy assembly routine has been modified to exchange the data between the pages using registers. - We walk the page list twice in machine_kexec() to do and undo physical to virtual address conversion. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 ++ arch/sh/kernel/machine_kexec.c | 32 ++++++- arch/sh/kernel/relocate_kernel.S | 195 +++++++++++++++++++++++++++++++++------ 3 files changed, 202 insertions(+), 32 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 6c56495fd15..8d50d527c59 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -559,6 +559,13 @@ config CRASH_DUMP For more details see Documentation/kdump/kdump.txt +config KEXEC_JUMP + bool "kexec jump (EXPERIMENTAL)" + depends on SUPERH32 && KEXEC && HIBERNATION && EXPERIMENTAL + help + Jump between original kernel and kexeced kernel and invoke + code via KEXEC + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" depends on PROC_FS diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 25b4748fdc7..c44efb73ab1 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -14,20 +14,21 @@ #include #include #include +#include #include #include #include #include #include -typedef NORET_TYPE void (*relocate_new_kernel_t)( - unsigned long indirection_page, - unsigned long reboot_code_buffer, - unsigned long start_address) ATTRIB_NORET; +typedef void (*relocate_new_kernel_t)(unsigned long indirection_page, + unsigned long reboot_code_buffer, + unsigned long start_address); extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; extern void *gdb_vbr_vector; +extern void *vbr_base; void machine_shutdown(void) { @@ -72,7 +73,6 @@ static void kexec_info(struct kimage *image) */ void machine_kexec(struct kimage *image) { - unsigned long page_list; unsigned long reboot_code_buffer; relocate_new_kernel_t rnk; @@ -92,6 +92,11 @@ void machine_kexec(struct kimage *image) *ptr = (unsigned long) phys_to_virt(*ptr); } +#ifdef CONFIG_KEXEC_JUMP + if (image->preserve_context) + save_processor_state(); +#endif + /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); @@ -117,6 +122,23 @@ void machine_kexec(struct kimage *image) /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; (*rnk)(page_list, reboot_code_buffer, image->start); + +#ifdef CONFIG_KEXEC_JUMP + asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); + local_irq_disable(); + clear_bl_bit(); + if (image->preserve_context) + restore_processor_state(); + + /* Convert page list back to physical addresses, what a mess. */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (*ptr & IND_INDIRECTION) ? + phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = virt_to_phys(*ptr); + } +#endif } void arch_crash_save_vmcoreinfo(void) diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S index 2a6630be668..fcc9934fb97 100644 --- a/arch/sh/kernel/relocate_kernel.S +++ b/arch/sh/kernel/relocate_kernel.S @@ -4,6 +4,8 @@ * * LANDISK/sh4 is supported. Maybe, SH archtecture works well. * + * 2009-03-18 Magnus Damm - Added Kexec Jump support + * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ @@ -17,14 +19,135 @@ relocate_new_kernel: /* r5 = reboot_code_buffer */ /* r6 = start_address */ - mov.l 10f,r8 /* PAGE_SIZE */ + mov.l 10f, r0 /* PAGE_SIZE */ + add r5, r0 /* setup new stack at end of control page */ + + /* save r15->r8 to new stack */ + mov.l r15, @-r0 + mov r0, r15 + mov.l r14, @-r15 + mov.l r13, @-r15 + mov.l r12, @-r15 + mov.l r11, @-r15 + mov.l r10, @-r15 + mov.l r9, @-r15 + mov.l r8, @-r15 + + /* save other random registers */ + sts.l macl, @-r15 + sts.l mach, @-r15 + stc.l gbr, @-r15 + stc.l ssr, @-r15 + stc.l sr, @-r15 + sts.l pr, @-r15 + stc.l spc, @-r15 + + /* switch to bank1 and save r7->r0 */ + mov.l 12f, r9 + stc sr, r8 + or r9, r8 + ldc r8, sr + mov.l r7, @-r15 + mov.l r6, @-r15 + mov.l r5, @-r15 + mov.l r4, @-r15 + mov.l r3, @-r15 + mov.l r2, @-r15 + mov.l r1, @-r15 + mov.l r0, @-r15 + + /* switch to bank0 and save r7->r0 */ + mov.l 12f, r9 + not r9, r9 + stc sr, r8 + and r9, r8 + ldc r8, sr + mov.l r7, @-r15 + mov.l r6, @-r15 + mov.l r5, @-r15 + mov.l r4, @-r15 + mov.l r3, @-r15 + mov.l r2, @-r15 + mov.l r1, @-r15 + mov.l r0, @-r15 + + mov.l r4, @-r15 /* save indirection page again */ + + bsr swap_pages /* swap pages before jumping to new kernel */ + nop + + mova 11f, r0 + mov.l r15, @r0 /* save pointer to stack */ + + jsr @r6 /* hand over control to new kernel */ + nop + + mov.l 11f, r15 /* get pointer to stack */ + mov.l @r15+, r4 /* restore r4 to get indirection page */ - /* stack setting */ - add r8,r5 - mov r5,r15 + bsr swap_pages /* swap pages back to previous state */ + nop + /* make sure bank0 is active and restore r0->r7 */ + mov.l 12f, r9 + not r9, r9 + stc sr, r8 + and r9, r8 + ldc r8, sr + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + + /* switch to bank1 and restore r0->r7 */ + mov.l 12f, r9 + stc sr, r8 + or r9, r8 + ldc r8, sr + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + + /* switch back to bank0 */ + mov.l 12f, r9 + not r9, r9 + stc sr, r8 + and r9, r8 + ldc r8, sr + + /* restore other random registers */ + ldc.l @r15+, spc + lds.l @r15+, pr + ldc.l @r15+, sr + ldc.l @r15+, ssr + ldc.l @r15+, gbr + lds.l @r15+, mach + lds.l @r15+, macl + + /* restore r8->r15 */ + mov.l @r15+, r8 + mov.l @r15+, r9 + mov.l @r15+, r10 + mov.l @r15+, r11 + mov.l @r15+, r12 + mov.l @r15+, r13 + mov.l @r15+, r14 + mov.l @r15+, r15 + rts + nop + +swap_pages: bra 1f - mov r4,r0 /* cmd = indirection_page */ + mov r4,r0 /* cmd = indirection_page */ 0: mov.l @r4+,r0 /* cmd = *ind++ */ @@ -37,52 +160,70 @@ relocate_new_kernel: tst #1,r0 bt 2f bra 0b - mov r2,r5 + mov r2,r5 2: /* else if(cmd & IND_INDIRECTION) ind = addr */ tst #2,r0 bt 3f bra 0b - mov r2,r4 + mov r2,r4 -3: /* else if(cmd & IND_DONE) goto 6 */ +3: /* else if(cmd & IND_DONE) return */ tst #4,r0 bt 4f - bra 6f - nop + rts + nop 4: /* else if(cmd & IND_SOURCE) memcpy(dst,addr,PAGE_SIZE) */ tst #8,r0 bt 0b - mov r8,r3 + mov.l 10f,r3 /* PAGE_SIZE */ shlr2 r3 shlr2 r3 5: dt r3 - mov.l @r2+,r1 /* 16n+0 */ - mov.l r1,@r5 - add #4,r5 - mov.l @r2+,r1 /* 16n+4 */ - mov.l r1,@r5 - add #4,r5 - mov.l @r2+,r1 /* 16n+8 */ - mov.l r1,@r5 - add #4,r5 - mov.l @r2+,r1 /* 16n+12 */ - mov.l r1,@r5 - add #4,r5 + + /* regular kexec just overwrites the destination page + * with the contents of the source page. + * for the kexec jump case we need to swap the contents + * of the pages. + * to keep it simple swap the contents for both cases. + */ + mov.l @(0, r2), r8 + mov.l @(0, r5), r1 + mov.l r8, @(0, r5) + mov.l r1, @(0, r2) + + mov.l @(4, r2), r8 + mov.l @(4, r5), r1 + mov.l r8, @(4, r5) + mov.l r1, @(4, r2) + + mov.l @(8, r2), r8 + mov.l @(8, r5), r1 + mov.l r8, @(8, r5) + mov.l r1, @(8, r2) + + mov.l @(12, r2), r8 + mov.l @(12, r5), r1 + mov.l r8, @(12, r5) + mov.l r1, @(12, r2) + + add #16,r5 + add #16,r2 bf 5b bra 0b - nop -6: - jmp @r6 - nop + nop .align 2 10: .long PAGE_SIZE +11: + .long 0 +12: + .long 0x20000000 ! RB=1 relocate_new_kernel_end: -- cgit v1.2.3 From a6bab7b5c18501e4dd3201ae8ac1dc6da5f07acc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 18 Mar 2009 19:06:15 +0900 Subject: sh: kexec: Drop SR.BL bit toggling. For the time being, this creates far more problems than it solves, evident by the second local_irq_disable(). Kill all of this off and rely on IRQ disabling to protect against the VBR reload. Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index c44efb73ab1..69268c0d806 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -110,23 +110,22 @@ void machine_kexec(struct kimage *image) memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); - kexec_info(image); + kexec_info(image); flush_cache_all(); - set_bl_bit(); #if defined(CONFIG_SH_STANDARD_BIOS) asm volatile("ldc %0, vbr" : : "r" (((unsigned long) gdb_vbr_vector) - 0x100) : "memory"); #endif + /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; (*rnk)(page_list, reboot_code_buffer, image->start); #ifdef CONFIG_KEXEC_JUMP asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); - local_irq_disable(); - clear_bl_bit(); + if (image->preserve_context) restore_processor_state(); -- cgit v1.2.3 From 7e6b6f2b949a52382f59a93ecbe86e32e4fcec7c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 18 Mar 2009 19:07:16 +0900 Subject: sh: kexec jump: fix for ftrace. Save and restore ftrace state when returning from kexec jump in machine_kexec(). Follows the x86 change. Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 69268c0d806..cc7c29b0dc8 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -78,6 +79,7 @@ void machine_kexec(struct kimage *image) relocate_new_kernel_t rnk; unsigned long entry; unsigned long *ptr; + int save_ftrace_enabled; /* * Nicked from the mips version of machine_kexec(): @@ -97,6 +99,8 @@ void machine_kexec(struct kimage *image) save_processor_state(); #endif + save_ftrace_enabled = __ftrace_enabled_save(); + /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); @@ -138,6 +142,8 @@ void machine_kexec(struct kimage *image) *ptr = virt_to_phys(*ptr); } #endif + + __ftrace_enabled_restore(save_ftrace_enabled); } void arch_crash_save_vmcoreinfo(void) -- cgit v1.2.3 From 4fa81ed27781a12f6303b9263056635ae74e3e21 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 18 Mar 2009 13:27:32 +0100 Subject: [S390] __div64_31 broken for CONFIG_MARCH_G5 The implementation of __div64_31 for G5 machines is broken. The comments in __div64_31 are correct, only the code does not do what the comments say. The part "If the remainder has overflown subtract base and increase the quotient" is only partially realized, the base is subtracted correctly but the quotient is only increased if the dividend had the last bit set. Using the correct instruction fixes the problem. Cc: stable@kernel.org Reported-by: Frans Pop Tested-by: Frans Pop Signed-off-by: Martin Schwidefsky --- arch/s390/lib/div64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c index a5f8300bf3e..d9e62c0b576 100644 --- a/arch/s390/lib/div64.c +++ b/arch/s390/lib/div64.c @@ -61,7 +61,7 @@ static uint32_t __div64_31(uint64_t *n, uint32_t base) " clr %0,%3\n" " jl 0f\n" " slr %0,%3\n" - " alr %1,%2\n" + " ahi %1,1\n" "0:\n" : "+d" (reg2), "+d" (reg3), "=d" (tmp) : "d" (base), "2" (1UL) : "cc" ); -- cgit v1.2.3 From f55d63854e426e95d7858c2662c2353262a5af70 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 18 Mar 2009 13:27:33 +0100 Subject: [S390] topology: define SD_MC_INIT to fix performance regression The default values for SD_MC_INIT cause an additional cpu usage of up to 40% on some network benchmarks compared to the plain SD_CPU_INIT values. So just define SD_MC_INIT to SD_CPU_INIT. More tuning needs to be done. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/topology.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index c93eb50e1d0..c979c3b56ab 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -30,6 +30,8 @@ static inline void s390_init_cpu_topology(void) }; #endif +#define SD_MC_INIT SD_CPU_INIT + #include #endif /* _ASM_S390_TOPOLOGY_H */ -- cgit v1.2.3 From cf087343805ebfea2b1234b06fd5f12273e865b1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 18 Mar 2009 13:27:34 +0100 Subject: [S390] ftrace/mcount: fix kernel stack backchain With packed stack the backchain is at a different location. Just use __SF_BACKCHAIN as an offset to store the backchain. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/mcount.S | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 397d131a345..80641224a09 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -5,6 +5,8 @@ * */ +#include + #ifndef CONFIG_64BIT .globl _mcount _mcount: @@ -14,7 +16,7 @@ _mcount: ahi %r15,-96 l %r3,100(%r15) la %r2,0(%r14) - st %r1,0(%r15) + st %r1,__SF_BACKCHAIN(%r15) la %r3,0(%r3) bras %r14,0f .long ftrace_trace_function @@ -38,7 +40,7 @@ _mcount: stg %r14,112(%r15) lgr %r1,%r15 aghi %r15,-160 - stg %r1,0(%r15) + stg %r1,__SF_BACKCHAIN(%r15) lgr %r2,%r14 lg %r3,168(%r15) larl %r14,ftrace_trace_function -- cgit v1.2.3 From 2887fc5aa60803b9d6d38c79248805f08d8b0e28 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Wed, 18 Mar 2009 13:27:35 +0100 Subject: [S390] Dont check for pfn_valid() in uaccess_pt.c pfn_valid() actually checks for a valid struct page and not for a valid pfn. Using xip mappings w/o struct pages, this will result in -EFAULT returned by the (page table walk) user copy functions, even though there is valid memory. Those user copy functions don't need a struct page, so this patch just removes the pfn_valid() check. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/lib/uaccess_pt.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'arch') diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index d66215b0fde..b0b84c35b0a 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -119,8 +119,6 @@ retry: goto fault; pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) - goto out; offset = uaddr & (PAGE_SIZE - 1); size = min(n - done, PAGE_SIZE - offset); @@ -135,7 +133,6 @@ retry: done += size; uaddr += size; } while (done < n); -out: spin_unlock(&mm->page_table_lock); return n - done; fault: @@ -163,9 +160,6 @@ retry: goto fault; pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) - goto out; - ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); out: return ret; @@ -244,11 +238,6 @@ retry: goto fault; pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) { - done = -1; - goto out; - } - offset = uaddr & (PAGE_SIZE-1); addr = (char *)(pfn << PAGE_SHIFT) + offset; len = min(count - done, PAGE_SIZE - offset); @@ -256,7 +245,6 @@ retry: done += len_str; uaddr += len_str; } while ((len_str == len) && (done < count)); -out: spin_unlock(&mm->page_table_lock); return done + 1; fault: @@ -325,12 +313,7 @@ retry: } pfn_from = pte_pfn(*pte_from); - if (!pfn_valid(pfn_from)) - goto out; pfn_to = pte_pfn(*pte_to); - if (!pfn_valid(pfn_to)) - goto out; - offset_from = uaddr_from & (PAGE_SIZE-1); offset_to = uaddr_from & (PAGE_SIZE-1); offset_max = max(offset_from, offset_to); @@ -342,7 +325,6 @@ retry: uaddr_from += size; uaddr_to += size; } while (done < n); -out: spin_unlock(&mm->page_table_lock); return n - done; fault: -- cgit v1.2.3 From f481bfafd36e621d6cbc62d4b25f74811410aef7 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 18 Mar 2009 13:27:36 +0100 Subject: [S390] make page table walking more robust Make page table walking on s390 more robust. The current code requires that the pgd/pud/pmd/pte loop is only done for address ranges that are below the end address of the last vma of the address space. But this is not always true, e.g. the generic page table walker does not guarantee this. Change TASK_SIZE/TASK_SIZE_OF to reflect the current size of the address space. This makes the generic page table walker happy but it breaks the upgrade of a 3 level page table to a 4 level page table. To make the upgrade work again another fix is required. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/processor.h | 5 ++--- arch/s390/mm/mmap.c | 4 ++-- arch/s390/mm/pgtable.c | 2 ++ 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 066b99502e0..db4523fe38a 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -61,7 +61,7 @@ extern void print_cpu_info(struct cpuinfo_S390 *); extern int get_cpu_capability(unsigned int *); /* - * User space process size: 2GB for 31 bit, 4TB for 64 bit. + * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. */ #ifndef __s390x__ @@ -70,8 +70,7 @@ extern int get_cpu_capability(unsigned int *); #else /* __s390x__ */ -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,TIF_31BIT) ? \ - (1UL << 31) : (1UL << 53)) +#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ (1UL << 30) : (1UL << 41)) #define TASK_SIZE TASK_SIZE_OF(current) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 5932a824547..346dd0c5cbd 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -35,7 +35,7 @@ * Leave an at least ~128 MB hole. */ #define MIN_GAP (128*1024*1024) -#define MAX_GAP (TASK_SIZE/6*5) +#define MAX_GAP (STACK_TOP/6*5) static inline unsigned long mmap_base(void) { @@ -46,7 +46,7 @@ static inline unsigned long mmap_base(void) else if (gap > MAX_GAP) gap = MAX_GAP; - return TASK_SIZE - (gap & PAGE_MASK); + return STACK_TOP - (gap & PAGE_MASK); } static inline int mmap_is_legacy(void) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0767827540b..6b6ddc4ea02 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -117,6 +117,7 @@ repeat: crst_table_init(table, entry); pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); mm->pgd = (pgd_t *) table; + mm->task_size = mm->context.asce_limit; table = NULL; } spin_unlock(&mm->page_table_lock); @@ -154,6 +155,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) BUG(); } mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); + mm->task_size = mm->context.asce_limit; crst_table_free(mm, (unsigned long *) pgd); } update_mm(mm, current); -- cgit v1.2.3 From 0fb1d9bcbcf701a45835aa150c57ca54ea685bfa Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 18 Mar 2009 13:27:37 +0100 Subject: [S390] make page table upgrade work again After TASK_SIZE now gives the current size of the address space the upgrade of a 64 bit process from 3 to 4 levels of page table needs to use the arch_mmap_check hook to catch large mmap lengths. The get_unmapped_area* functions need to check for -ENOMEM from the arch_get_unmapped_area*, upgrade the page table and retry. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/mman.h | 5 +++++ arch/s390/mm/mmap.c | 44 ++++++++++++++++++++++++++++++-------------- 2 files changed, 35 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 7839767d837..da01432e8f4 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -22,4 +22,9 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) +int s390_mmap_check(unsigned long addr, unsigned long len); +#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) +#endif + #endif /* __S390_MMAN_H__ */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 346dd0c5cbd..e008d236cc1 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -89,42 +89,58 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); #else +int s390_mmap_check(unsigned long addr, unsigned long len) +{ + if (!test_thread_flag(TIF_31BIT) && + len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) + return crst_table_upgrade(current->mm, 1UL << 53); + return 0; +} + static unsigned long s390_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; + unsigned long area; int rc; - addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags); - if (addr & ~PAGE_MASK) - return addr; - if (unlikely(mm->context.asce_limit < addr + len)) { - rc = crst_table_upgrade(mm, addr + len); + area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); + if (!(area & ~PAGE_MASK)) + return area; + if (area == -ENOMEM && + !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { + /* Upgrade the page table to 4 levels and retry. */ + rc = crst_table_upgrade(mm, 1UL << 53); if (rc) return (unsigned long) rc; + area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); } - return addr; + return area; } static unsigned long -s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct mm_struct *mm = current->mm; - unsigned long addr = addr0; + unsigned long area; int rc; - addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); - if (addr & ~PAGE_MASK) - return addr; - if (unlikely(mm->context.asce_limit < addr + len)) { - rc = crst_table_upgrade(mm, addr + len); + area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); + if (!(area & ~PAGE_MASK)) + return area; + if (area == -ENOMEM && + !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { + /* Upgrade the page table to 4 levels and retry. */ + rc = crst_table_upgrade(mm, 1UL << 53); if (rc) return (unsigned long) rc; + area = arch_get_unmapped_area_topdown(filp, addr, len, + pgoff, flags); } - return addr; + return area; } /* * This function, called very early during the creation of a new -- cgit v1.2.3 From 42cc77c861e8e850e86252bb5b1e12e006261973 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 18 Mar 2009 23:51:57 -0700 Subject: sparc64: Reschedule KGDB capture to a software interrupt. Otherwise it might interrupt switch_to() midstream and use half-cooked register window state. Reported-by: Chris Torek Signed-off-by: David S. Miller --- arch/sparc/include/asm/pil.h | 1 + arch/sparc/kernel/kgdb_64.c | 2 +- arch/sparc/kernel/ttable.S | 7 ++++++- arch/sparc/mm/ultra.S | 24 ++---------------------- 4 files changed, 10 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h index 32a7efe76d0..26693703054 100644 --- a/arch/sparc/include/asm/pil.h +++ b/arch/sparc/include/asm/pil.h @@ -24,6 +24,7 @@ #define PIL_DEVICE_IRQ 5 #define PIL_SMP_CALL_FUNC_SNGL 6 #define PIL_DEFERRED_PCR_WORK 7 +#define PIL_KGDB_CAPTURE 8 #define PIL_NORMAL_MAX 14 #define PIL_NMI 15 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index fefbe6dc51b..f5a0fd490b5 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -108,7 +108,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) } #ifdef CONFIG_SMP -void smp_kgdb_capture_client(struct pt_regs *regs) +void smp_kgdb_capture_client(int irq, struct pt_regs *regs) { unsigned long flags; diff --git a/arch/sparc/kernel/ttable.S b/arch/sparc/kernel/ttable.S index d9bdfb9d5c1..76d837fc47d 100644 --- a/arch/sparc/kernel/ttable.S +++ b/arch/sparc/kernel/ttable.S @@ -64,7 +64,12 @@ tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6) tl0_irq6: BTRAP(0x46) #endif tl0_irq7: TRAP_IRQ(deferred_pcr_work_irq, 7) -tl0_irq8: BTRAP(0x48) BTRAP(0x49) +#ifdef CONFIG_KGDB +tl0_irq8: TRAP_IRQ(smp_kgdb_capture_client, 8) +#else +tl0_irq8: BTRAP(0x48) +#endif +tl0_irq9: BTRAP(0x49) tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) tl0_irq14: TRAP_IRQ(timer_interrupt, 14) tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15) diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 80c788ec7c3..b57a5942ba6 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -679,28 +679,8 @@ xcall_new_mmu_context_version: #ifdef CONFIG_KGDB .globl xcall_kgdb_capture xcall_kgdb_capture: -661: rdpr %pstate, %g2 - wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate - .section .sun4v_2insn_patch, "ax" - .word 661b - nop - nop - .previous - - rdpr %pil, %g2 - wrpr %g0, PIL_NORMAL_MAX, %pil - sethi %hi(109f), %g7 - ba,pt %xcc, etrap_irq -109: or %g7, %lo(109b), %g7 -#ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_off - nop -#endif - call smp_kgdb_capture_client - add %sp, PTREGS_OFF, %o0 - /* Has to be a non-v9 branch due to the large distance. */ - ba rtrap_xcall - ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 + wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint + retry #endif #endif /* CONFIG_SMP */ -- cgit v1.2.3 From 192d7a4667c6d11d1a174ec4cad9a3c5d5f9043c Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 18 Mar 2009 23:53:16 -0700 Subject: sparc64: Fix crash with /proc/iomem When you compile kernel on Sparc64 with heap memory checking and type "cat /proc/iomem", you get a crash, because pointers in struct resource are uninitialized. Most code fills struct resource with zeros, so I assume that it is responsibility of the caller of request_resource to initialized it, not the responsibility of request_resource functuion. After 2.6.29 is out, there could be a check for uninitialized fields added to request_resource to avoid crashes like this. Signed-off-by: Mikulas Patocka Signed-off-by: David S. Miller --- arch/sparc/kernel/pci_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index 64e6edf17b9..b775658a927 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -368,7 +368,7 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm) const u32 *vdma = of_get_property(pbm->op->node, "virtual-dma", NULL); if (vdma) { - struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL); + struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { prom_printf("Cannot allocate IOMMU resource.\n"); -- cgit v1.2.3 From 615e73b3cd8876262f61ea28b4147c8de38a043a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 19 Mar 2009 10:04:29 +0000 Subject: sh: disallow kexec virtual entry Older versions of kexec-tools has a zImage loader that passes a virtual address as entry point. The elf loader otoh it passes a physical address as entry point, and pages are always passed as physical addresses as well. Only allow physical addresses from now on. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/machine_kexec.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index cc7c29b0dc8..7ea2704ea03 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -46,6 +46,12 @@ void machine_crash_shutdown(struct pt_regs *regs) */ int machine_kexec_prepare(struct kimage *image) { + /* older versions of kexec-tools are passing + * the zImage entry point as a virtual address. + */ + if (image->start != PHYSADDR(image->start)) + return -EINVAL; /* upgrade your kexec-tools */ + return 0; } @@ -125,7 +131,8 @@ void machine_kexec(struct kimage *image) /* now call it */ rnk = (relocate_new_kernel_t) reboot_code_buffer; - (*rnk)(page_list, reboot_code_buffer, image->start); + (*rnk)(page_list, reboot_code_buffer, + (unsigned long)phys_to_virt(image->start)); #ifdef CONFIG_KEXEC_JUMP asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); -- cgit v1.2.3 From 3bf509230a626d11cba0e0145f552918092f586d Mon Sep 17 00:00:00 2001 From: Rafael Ignacio Zurita Date: Fri, 20 Mar 2009 02:08:22 +0000 Subject: sh: fix the HD64461 level-triggered interrupts handling Rework the hd64461 demuxer code to fix the HD64461 level-triggered interrupts handling, using handle_level_irq() as needed. Signed-off-by: Rafael Ignacio Zurita Signed-off-by: Paul Mundt --- arch/sh/boards/mach-hp6xx/setup.c | 1 - arch/sh/cchips/hd6446x/hd64461.c | 30 +++++++++++++++++------------- arch/sh/include/asm/hd64461.h | 1 - 3 files changed, 17 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-hp6xx/setup.c b/arch/sh/boards/mach-hp6xx/setup.c index 746742bdc01..8f305b36358 100644 --- a/arch/sh/boards/mach-hp6xx/setup.c +++ b/arch/sh/boards/mach-hp6xx/setup.c @@ -115,7 +115,6 @@ static struct sh_machine_vector mv_hp6xx __initmv = { .mv_setup = hp6xx_setup, /* IRQ's : CPU(64) + CCHIP(16) + FREE_TO_USE(6) */ .mv_nr_irqs = HD64461_IRQBASE + HD64461_IRQ_NUM + 6, - .mv_irq_demux = hd64461_irq_demux, /* Enable IRQ0 -> IRQ3 in IRQ_MODE */ .mv_init_irq = hp6xx_init_irq, }; diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c index 27ceeb948bb..25ef9106152 100644 --- a/arch/sh/cchips/hd6446x/hd64461.c +++ b/arch/sh/cchips/hd6446x/hd64461.c @@ -53,21 +53,22 @@ static struct irq_chip hd64461_irq_chip = { .unmask = hd64461_unmask_irq, }; -int hd64461_irq_demux(int irq) +static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc) { - if (irq == CONFIG_HD64461_IRQ) { - unsigned short bit; - unsigned short nirr = inw(HD64461_NIRR); - unsigned short nimr = inw(HD64461_NIMR); - int i; - - nirr &= ~nimr; - for (bit = 1, i = 0; i < 16; bit <<= 1, i++) - if (nirr & bit) - break; - irq = HD64461_IRQBASE + i; + unsigned short intv = ctrl_inw(HD64461_NIRR); + struct irq_desc *ext_desc; + unsigned int ext_irq = HD64461_IRQBASE; + + intv &= (1 << HD64461_IRQ_NUM) - 1; + + while (intv) { + if (intv & 1) { + ext_desc = irq_desc + ext_irq; + handle_level_irq(ext_irq, ext_desc); + } + intv >>= 1; + ext_irq++; } - return irq; } int __init setup_hd64461(void) @@ -93,6 +94,9 @@ int __init setup_hd64461(void) set_irq_chip_and_handler(i, &hd64461_irq_chip, handle_level_irq); + set_irq_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux); + set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW); + #ifdef CONFIG_HD64461_ENABLER printk(KERN_INFO "HD64461: enabling PCMCIA devices\n"); __raw_writeb(0x4c, HD64461_PCC1CSCIER); diff --git a/arch/sh/include/asm/hd64461.h b/arch/sh/include/asm/hd64461.h index 8c1353baf00..52b4b623827 100644 --- a/arch/sh/include/asm/hd64461.h +++ b/arch/sh/include/asm/hd64461.h @@ -242,7 +242,6 @@ #include /* arch/sh/cchips/hd6446x/hd64461/setup.c */ -int hd64461_irq_demux(int irq); void hd64461_register_irq_demux(int irq, int (*demux) (int irq, void *dev), void *dev); void hd64461_unregister_irq_demux(int irq); -- cgit v1.2.3 From eaeed5d31d8ded02fa0a4b608f57418cc0e65b07 Mon Sep 17 00:00:00 2001 From: Steve Glendinning Date: Fri, 20 Mar 2009 14:16:29 +0000 Subject: sh: add support for SMSC Polaris platform Polaris is an SMSC reference platform with a SH7709S CPU and LAN9118 ethernet controller. This patch adds support for it. Updated following feedback from Nobuhiro Iwamatsu. Signed-off-by: Steve Glendinning Reviewed-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt --- arch/sh/boards/Kconfig | 7 + arch/sh/boards/Makefile | 1 + arch/sh/boards/board-polaris.c | 149 ++++++ arch/sh/configs/polaris_defconfig | 969 ++++++++++++++++++++++++++++++++++++++ arch/sh/tools/mach-types | 1 + 5 files changed, 1127 insertions(+) create mode 100644 arch/sh/boards/board-polaris.c create mode 100644 arch/sh/configs/polaris_defconfig (limited to 'arch') diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 48c043bf45c..dcc1af8a2cf 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -261,6 +261,13 @@ config SH_CAYMAN depends on CPU_SUBTYPE_SH5_101 || CPU_SUBTYPE_SH5_103 select SYS_SUPPORTS_PCI +config SH_POLARIS + bool "SMSC Polaris" + select CPU_HAS_IPR_IRQ + depends on CPU_SUBTYPE_SH7709 + help + Select if configuring for an SMSC Polaris development board + endmenu source "arch/sh/boards/mach-r2d/Kconfig" diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile index d0daebce8b3..7baa2109023 100644 --- a/arch/sh/boards/Makefile +++ b/arch/sh/boards/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_SH_URQUELL) += board-urquell.o obj-$(CONFIG_SH_SHMIN) += board-shmin.o obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o obj-$(CONFIG_SH_ESPT) += board-espt.o +obj-$(CONFIG_SH_POLARIS) += board-polaris.o diff --git a/arch/sh/boards/board-polaris.c b/arch/sh/boards/board-polaris.c new file mode 100644 index 00000000000..62607eb5100 --- /dev/null +++ b/arch/sh/boards/board-polaris.c @@ -0,0 +1,149 @@ +/* + * June 2006 steve.glendinning@smsc.com + * + * Polaris-specific resource declaration + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BCR2 (0xFFFFFF62) +#define WCR2 (0xFFFFFF66) +#define AREA5_WAIT_CTRL (0x1C00) +#define WAIT_STATES_10 (0x7) + +static struct resource smsc911x_resources[] = { + [0] = { + .name = "smsc911x-memory", + .start = PA_EXT5, + .end = PA_EXT5 + 0x1fff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = "smsc911x-irq", + .start = IRQ0_IRQ, + .end = IRQ0_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct smsc911x_platform_config smsc911x_config = { + .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, + .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, + .flags = SMSC911X_USE_32BIT, + .phy_interface = PHY_INTERFACE_MODE_MII, +}; + +static struct platform_device smsc911x_device = { + .name = "smsc911x", + .id = 0, + .num_resources = ARRAY_SIZE(smsc911x_resources), + .resource = smsc911x_resources, + .dev = { + .platform_data = &smsc911x_config, + }, +}; + +static unsigned char heartbeat_bit_pos[] = { 0, 1, 2, 3 }; + +static struct heartbeat_data heartbeat_data = { + .bit_pos = heartbeat_bit_pos, + .nr_bits = ARRAY_SIZE(heartbeat_bit_pos), + .regsize = 8, +}; + +static struct resource heartbeat_resources[] = { + [0] = { + .start = PORT_PCDR, + .end = PORT_PCDR, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device heartbeat_device = { + .name = "heartbeat", + .id = -1, + .dev = { + .platform_data = &heartbeat_data, + }, + .num_resources = ARRAY_SIZE(heartbeat_resources), + .resource = heartbeat_resources, +}; + +static struct platform_device *polaris_devices[] __initdata = { + &smsc911x_device, + &heartbeat_device, +}; + +static int __init polaris_initialise(void) +{ + u16 wcr, bcr_mask; + + printk(KERN_INFO "Configuring Polaris external bus\n"); + + /* Configure area 5 with 2 wait states */ + wcr = ctrl_inw(WCR2); + wcr &= (~AREA5_WAIT_CTRL); + wcr |= (WAIT_STATES_10 << 10); + ctrl_outw(wcr, WCR2); + + /* Configure area 5 for 32-bit access */ + bcr_mask = ctrl_inw(BCR2); + bcr_mask |= 1 << 10; + ctrl_outw(bcr_mask, BCR2); + + return platform_add_devices(polaris_devices, + ARRAY_SIZE(polaris_devices)); +} +arch_initcall(polaris_initialise); + +static struct ipr_data ipr_irq_table[] = { + /* External IRQs */ + { IRQ0_IRQ, 0, 0, 1, }, /* IRQ0 */ + { IRQ1_IRQ, 0, 4, 1, }, /* IRQ1 */ +}; + +static unsigned long ipr_offsets[] = { + INTC_IPRC +}; + +static struct ipr_desc ipr_irq_desc = { + .ipr_offsets = ipr_offsets, + .nr_offsets = ARRAY_SIZE(ipr_offsets), + + .ipr_data = ipr_irq_table, + .nr_irqs = ARRAY_SIZE(ipr_irq_table), + .chip = { + .name = "sh7709-ext", + }, +}; + +static void __init init_polaris_irq(void) +{ + /* Disable all interrupts */ + ctrl_outw(0, BCR_ILCRA); + ctrl_outw(0, BCR_ILCRB); + ctrl_outw(0, BCR_ILCRC); + ctrl_outw(0, BCR_ILCRD); + ctrl_outw(0, BCR_ILCRE); + ctrl_outw(0, BCR_ILCRF); + ctrl_outw(0, BCR_ILCRG); + + register_ipr_controller(&ipr_irq_desc); +} + +static struct sh_machine_vector mv_polaris __initmv = { + .mv_name = "Polaris", + .mv_nr_irqs = 61, + .mv_init_irq = init_polaris_irq, +}; diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig new file mode 100644 index 00000000000..320def233b2 --- /dev/null +++ b/arch/sh/configs/polaris_defconfig @@ -0,0 +1,969 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.29-rc4 +# Wed Feb 11 18:41:59 2009 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +# CONFIG_GENERIC_GPIO is not set +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_ARCH_SUSPEND_POSSIBLE is not set +# CONFIG_ARCH_HIBERNATION_POSSIBLE is not set +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +# CONFIG_TASKSTATS is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set + +# +# RCU Subsystem +# +CONFIG_CLASSIC_RCU=y +# CONFIG_TREE_RCU is not set +# CONFIG_PREEMPT_RCU is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_GROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_USER_SCHED=y +# CONFIG_CGROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_COMPAT_BRK=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_ANON_INODES=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_FREEZER is not set + +# +# System type +# +CONFIG_CPU_SH3=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7201 is not set +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +# CONFIG_CPU_SUBTYPE_SH7705 is not set +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +CONFIG_CPU_SUBTYPE_SH7709=y +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7751R is not set +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +# CONFIG_CPU_SUBTYPE_SH7763 is not set +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +# CONFIG_CPU_SUBTYPE_SH7785 is not set +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set +# CONFIG_CPU_SUBTYPE_SH5_101 is not set +# CONFIG_CPU_SUBTYPE_SH5_103 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_MMU=y +CONFIG_PAGE_OFFSET=0x80000000 +CONFIG_MEMORY_START=0x0C000000 +CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_29BIT=y +CONFIG_VSYSCALL=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=1 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_ENTRY_OFFSET=0x00001000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_SPARSEMEM_STATIC=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_UNEVICTABLE_LRU=y + +# +# Cache configuration +# +# CONFIG_SH_DIRECT_MAPPED is not set +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SH_FPU_EMU=y +CONFIG_SH_ADC=y +CONFIG_CPU_HAS_INTEVT=y +CONFIG_CPU_HAS_IPR_IRQ=y +CONFIG_CPU_HAS_SR_RB=y + +# +# Board support +# +# CONFIG_SH_SOLUTION_ENGINE is not set +# CONFIG_SH_HP6XX is not set +CONFIG_SH_POLARIS=y + +# +# Timer and clock configuration +# +CONFIG_SH_TMU=y +CONFIG_SH_TIMER_IRQ=16 +CONFIG_SH_PCLK_FREQ=33000000 +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +CONFIG_SH_DMA_API=y +CONFIG_SH_DMA=y +CONFIG_NR_ONCHIP_DMA_CHANNELS=4 +# CONFIG_NR_DMA_CHANNELS_BOOL is not set + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +CONFIG_HEARTBEAT=y +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +CONFIG_HZ_100=y +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=100 +CONFIG_SCHED_HRTICK=y +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_SECCOMP is not set +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_GUSA=y +# CONFIG_GUSA_RB is not set + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttySC1,115200 root=/dev/mtdblock2 rootfstype=jffs2 mem=63M mtdparts=physmap-flash.0:0x00100000(bootloader)ro,0x00500000(Kernel)ro,0x00A00000(Filesystem)" + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options (EXPERIMENTAL) +# +# CONFIG_PM is not set +# CONFIG_CPU_IDLE is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_COMPAT_NET_DEV_OPS=y +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_GEOMETRY is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_COMPAT=y +CONFIG_MTD_PHYSMAP_START=0x00000000 +CONFIG_MTD_PHYSMAP_LEN=0x01000000 +CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_QINFO_PROBE is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_HD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_93CX6 is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_VITESSE_PHY is not set +CONFIG_SMSC_PHY=y +# CONFIG_BROADCOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_MDIO_BITBANG is not set +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_AX88796 is not set +# CONFIG_STNIC is not set +# CONFIG_SMC91X is not set +# CONFIG_SMC911X is not set +CONFIG_SMSC911X=y +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set + +# +# Wireless LAN +# +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set +# CONFIG_IWLWIFI_LEDS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_DEVKMEM=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_N_HDLC is not set +# CONFIG_RISCOM8 is not set +# CONFIG_SPECIALIX is not set +# CONFIG_RIO is not set +# CONFIG_STALDRV is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=3 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_I2C is not set +# CONFIG_SPI is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_REGULATOR is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_DAB is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_SOUND is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_SH=y +# CONFIG_DMADEVICES is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +# CONFIG_XFS_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_DNOTIFY is not set +# CONFIG_INOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +# CONFIG_JFFS2_FS_WRITEBUFFER is not set +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_JFFS2_LZO is not set +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFSD is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_SUNRPC_REGISTER_V4 is not set +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_NLS is not set +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_DEBUG_PREEMPT=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_PI_LIST=y +# CONFIG_RT_MUTEX_TESTER is not set +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_LOCK_ALLOC=y +# CONFIG_PROVE_LOCKING is not set +CONFIG_LOCKDEP=y +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_SPINLOCK_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +CONFIG_DEBUG_SG=y +# CONFIG_DEBUG_NOTIFIERS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y + +# +# Tracers +# +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_CONTEXT_SWITCH_TRACER is not set +# CONFIG_BOOT_TRACER is not set +# CONFIG_TRACE_BRANCH_PROFILING is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_SH_STANDARD_BIOS is not set +CONFIG_EARLY_SCIF_CONSOLE=y +CONFIG_EARLY_SCIF_CONSOLE_PORT=0x00000000 +CONFIG_EARLY_PRINTK=y +# CONFIG_DEBUG_BOOTMEM is not set +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_4KSTACKS is not set +# CONFIG_IRQSTACKS is not set +CONFIG_DUMP_CODE=y +# CONFIG_SH_NO_BSS_INIT is not set +# CONFIG_MORE_COMPILE_OPTIONS is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_AUDIT_GENERIC=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_PLIST=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index 4932705603b..8477b5d884f 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types @@ -54,3 +54,4 @@ SH7763RDP SH_SH7763RDP SH7785LCR SH_SH7785LCR URQUELL SH_URQUELL ESPT SH_ESPT +POLARIS SH_POLARIS -- cgit v1.2.3 From 949abe574739848b1e68271fbac86c3cb4506aad Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 21 Mar 2009 21:12:19 +0800 Subject: crypto: sha512-s390 - Add missing block size I missed the block size when converting sha512-s390 to shash. Tested-by: Heiko Carstens Signed-off-by: Herbert Xu --- arch/s390/crypto/sha512_s390.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 420acf41b72..83192bfc804 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -84,6 +84,7 @@ static struct shash_alg sha384_alg = { .cra_driver_name= "sha384-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_sha_ctx), .cra_module = THIS_MODULE, } -- cgit v1.2.3 From e84665c9cb4db963393fafad6fefe5efdd7e4a09 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Fri, 20 Mar 2009 09:52:09 +0000 Subject: dsa: add switch chip cascading support The initial version of the DSA driver only supported a single switch chip per network interface, while DSA-capable switch chips can be interconnected to form a tree of switch chips. This patch adds support for multiple switch chips on a network interface. An example topology for a 16-port device with an embedded CPU is as follows: +-----+ +--------+ +--------+ | |eth0 10| switch |9 10| switch | | CPU +----------+ +-------+ | | | | chip 0 | | chip 1 | +-----+ +---++---+ +---++---+ || || || || ||1000baseT ||1000baseT ||ports 1-8 ||ports 9-16 This requires a couple of interdependent changes in the DSA layer: - The dsa platform driver data needs to be extended: there is still only one netdevice per DSA driver instance (eth0 in the example above), but each of the switch chips in the tree needs its own mii_bus device pointer, MII management bus address, and port name array. (include/net/dsa.h) The existing in-tree dsa users need some small changes to deal with this. (arch/arm) - The DSA and Ethertype DSA tagging modules need to be extended to use the DSA device ID field on receive and demultiplex the packet accordingly, and fill in the DSA device ID field on transmit according to which switch chip the packet is heading to. (net/dsa/tag_{dsa,edsa}.c) - The concept of "CPU port", which is the switch chip port that the CPU is connected to (port 10 on switch chip 0 in the example), needs to be extended with the concept of "upstream port", which is the port on the switch chip that will bring us one hop closer to the CPU (port 10 for both switch chips in the example above). - The dsa platform data needs to specify which ports on which switch chips are links to other switch chips, so that we can enable DSA tagging mode on them. (For inter-switch links, we always use non-EtherType DSA tagging, since it has lower overhead. The CPU link uses dsa or edsa tagging depending on what the 'root' switch chip supports.) This is done by specifying "dsa" for the given port in the port array. - The dsa platform data needs to be extended with information on via which port to reach any given switch chip from any given switch chip. This info is specified via the per-switch chip data struct ->rtable[] array, which gives the nexthop ports for each of the other switches in the tree. For the example topology above, the dsa platform data would look something like this: static struct dsa_chip_data sw[2] = { { .mii_bus = &foo, .sw_addr = 1, .port_names[0] = "p1", .port_names[1] = "p2", .port_names[2] = "p3", .port_names[3] = "p4", .port_names[4] = "p5", .port_names[5] = "p6", .port_names[6] = "p7", .port_names[7] = "p8", .port_names[9] = "dsa", .port_names[10] = "cpu", .rtable = (s8 []){ -1, 9, }, }, { .mii_bus = &foo, .sw_addr = 2, .port_names[0] = "p9", .port_names[1] = "p10", .port_names[2] = "p11", .port_names[3] = "p12", .port_names[4] = "p13", .port_names[5] = "p14", .port_names[6] = "p15", .port_names[7] = "p16", .port_names[10] = "dsa", .rtable = (s8 []){ 10, -1, }, }, }, static struct dsa_platform_data pd = { .netdev = &foo, .nr_switches = 2, .sw = sw, }; Signed-off-by: Lennert Buytenhek Tested-by: Gary Thomas Signed-off-by: David S. Miller --- arch/arm/mach-kirkwood/common.c | 5 ++++- arch/arm/mach-kirkwood/rd88f6281-setup.c | 13 +++++++++---- arch/arm/mach-orion5x/common.c | 5 ++++- arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c | 9 +++++++-- arch/arm/mach-orion5x/rd88f5181l-ge-setup.c | 10 ++++++++-- arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c | 10 ++++++++-- arch/arm/mach-orion5x/wrt350n-v2-setup.c | 9 +++++++-- 7 files changed, 47 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index b3404b7775b..0d2074f51a5 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -231,14 +231,17 @@ static struct platform_device kirkwood_switch_device = { void __init kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq) { + int i; + if (irq != NO_IRQ) { kirkwood_switch_resources[0].start = irq; kirkwood_switch_resources[0].end = irq; kirkwood_switch_device.num_resources = 1; } - d->mii_bus = &kirkwood_ge00_shared.dev; d->netdev = &kirkwood_ge00.dev; + for (i = 0; i < d->nr_chips; i++) + d->chip[i].mii_bus = &kirkwood_ge00_shared.dev; kirkwood_switch_device.dev.platform_data = d; platform_device_register(&kirkwood_switch_device); diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index 9a0e905d10c..e1c0516c4df 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c @@ -75,7 +75,7 @@ static struct mv643xx_eth_platform_data rd88f6281_ge00_data = { .duplex = DUPLEX_FULL, }; -static struct dsa_platform_data rd88f6281_switch_data = { +static struct dsa_chip_data rd88f6281_switch_chip_data = { .port_names[0] = "lan1", .port_names[1] = "lan2", .port_names[2] = "lan3", @@ -83,6 +83,11 @@ static struct dsa_platform_data rd88f6281_switch_data = { .port_names[5] = "cpu", }; +static struct dsa_platform_data rd88f6281_switch_plat_data = { + .nr_chips = 1, + .chip = &rd88f6281_switch_chip_data, +}; + static struct mv643xx_eth_platform_data rd88f6281_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(11), }; @@ -105,12 +110,12 @@ static void __init rd88f6281_init(void) kirkwood_ge00_init(&rd88f6281_ge00_data); kirkwood_pcie_id(&dev, &rev); if (rev == MV88F6281_REV_A0) { - rd88f6281_switch_data.sw_addr = 10; + rd88f6281_switch_chip_data.sw_addr = 10; kirkwood_ge01_init(&rd88f6281_ge01_data); } else { - rd88f6281_switch_data.port_names[4] = "wan"; + rd88f6281_switch_chip_data.port_names[4] = "wan"; } - kirkwood_ge00_switch_init(&rd88f6281_switch_data, NO_IRQ); + kirkwood_ge00_switch_init(&rd88f6281_switch_plat_data, NO_IRQ); kirkwood_rtc_init(); kirkwood_sata_init(&rd88f6281_sata_data); diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 0a623379789..a4ecf625981 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -219,14 +219,17 @@ static struct platform_device orion5x_switch_device = { void __init orion5x_eth_switch_init(struct dsa_platform_data *d, int irq) { + int i; + if (irq != NO_IRQ) { orion5x_switch_resources[0].start = irq; orion5x_switch_resources[0].end = irq; orion5x_switch_device.num_resources = 1; } - d->mii_bus = &orion5x_eth_shared.dev; d->netdev = &orion5x_eth.dev; + for (i = 0; i < d->nr_chips; i++) + d->chip[i].mii_bus = &orion5x_eth_shared.dev; orion5x_switch_device.dev.platform_data = d; platform_device_register(&orion5x_switch_device); diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c index 15f53235ee3..9c1ca41730b 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c @@ -94,7 +94,7 @@ static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = { .duplex = DUPLEX_FULL, }; -static struct dsa_platform_data rd88f5181l_fxo_switch_data = { +static struct dsa_chip_data rd88f5181l_fxo_switch_chip_data = { .port_names[0] = "lan2", .port_names[1] = "lan1", .port_names[2] = "wan", @@ -103,6 +103,11 @@ static struct dsa_platform_data rd88f5181l_fxo_switch_data = { .port_names[7] = "lan3", }; +static struct dsa_platform_data rd88f5181l_fxo_switch_plat_data = { + .nr_chips = 1, + .chip = &rd88f5181l_fxo_switch_chip_data, +}; + static void __init rd88f5181l_fxo_init(void) { /* @@ -117,7 +122,7 @@ static void __init rd88f5181l_fxo_init(void) */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f5181l_fxo_eth_data); - orion5x_eth_switch_init(&rd88f5181l_fxo_switch_data, NO_IRQ); + orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data, NO_IRQ); orion5x_uart0_init(); orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE, diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c index 8ad3934399d..ee1399ff0ce 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c @@ -95,7 +95,7 @@ static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = { .duplex = DUPLEX_FULL, }; -static struct dsa_platform_data rd88f5181l_ge_switch_data = { +static struct dsa_chip_data rd88f5181l_ge_switch_chip_data = { .port_names[0] = "lan2", .port_names[1] = "lan1", .port_names[2] = "wan", @@ -104,6 +104,11 @@ static struct dsa_platform_data rd88f5181l_ge_switch_data = { .port_names[7] = "lan3", }; +static struct dsa_platform_data rd88f5181l_ge_switch_plat_data = { + .nr_chips = 1, + .chip = &rd88f5181l_ge_switch_chip_data, +}; + static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = { I2C_BOARD_INFO("ds1338", 0x68), }; @@ -122,7 +127,8 @@ static void __init rd88f5181l_ge_init(void) */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f5181l_ge_eth_data); - orion5x_eth_switch_init(&rd88f5181l_ge_switch_data, gpio_to_irq(8)); + orion5x_eth_switch_init(&rd88f5181l_ge_switch_plat_data, + gpio_to_irq(8)); orion5x_i2c_init(); orion5x_uart0_init(); diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c index 262e25e4dac..7737cf9a8f5 100644 --- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c @@ -35,7 +35,7 @@ static struct mv643xx_eth_platform_data rd88f6183ap_ge_eth_data = { .duplex = DUPLEX_FULL, }; -static struct dsa_platform_data rd88f6183ap_ge_switch_data = { +static struct dsa_chip_data rd88f6183ap_ge_switch_chip_data = { .port_names[0] = "lan1", .port_names[1] = "lan2", .port_names[2] = "lan3", @@ -44,6 +44,11 @@ static struct dsa_platform_data rd88f6183ap_ge_switch_data = { .port_names[5] = "cpu", }; +static struct dsa_platform_data rd88f6183ap_ge_switch_plat_data = { + .nr_chips = 1, + .chip = &rd88f6183ap_ge_switch_chip_data, +}; + static struct mtd_partition rd88f6183ap_ge_partitions[] = { { .name = "kernel", @@ -89,7 +94,8 @@ static void __init rd88f6183ap_ge_init(void) */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f6183ap_ge_eth_data); - orion5x_eth_switch_init(&rd88f6183ap_ge_switch_data, gpio_to_irq(3)); + orion5x_eth_switch_init(&rd88f6183ap_ge_switch_plat_data, + gpio_to_irq(3)); spi_register_board_info(rd88f6183ap_ge_spi_slave_info, ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info)); orion5x_spi_init(); diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c index cc8f8920086..1b4ad9d5e2e 100644 --- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c +++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c @@ -106,7 +106,7 @@ static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = { .duplex = DUPLEX_FULL, }; -static struct dsa_platform_data wrt350n_v2_switch_data = { +static struct dsa_chip_data wrt350n_v2_switch_chip_data = { .port_names[0] = "lan2", .port_names[1] = "lan1", .port_names[2] = "wan", @@ -115,6 +115,11 @@ static struct dsa_platform_data wrt350n_v2_switch_data = { .port_names[7] = "lan4", }; +static struct dsa_platform_data wrt350n_v2_switch_plat_data = { + .nr_chips = 1, + .chip = &wrt350n_v2_switch_chip_data, +}; + static void __init wrt350n_v2_init(void) { /* @@ -129,7 +134,7 @@ static void __init wrt350n_v2_init(void) */ orion5x_ehci0_init(); orion5x_eth_init(&wrt350n_v2_eth_data); - orion5x_eth_switch_init(&wrt350n_v2_switch_data, NO_IRQ); + orion5x_eth_switch_init(&wrt350n_v2_switch_plat_data, NO_IRQ); orion5x_uart0_init(); orion5x_setup_dev_boot_win(WRT350N_V2_NOR_BOOT_BASE, -- cgit v1.2.3 From 345953cf9a44b19c98f8c0fe6ca7724202bcdb94 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Sat, 14 Mar 2009 09:23:03 -0500 Subject: powerpc/mm: Fix Respect _PAGE_COHERENT on classic ppc32 SW TLB load machines Grant picked up the wrong version of "Respect _PAGE_COHERENT on classic ppc32 SW" (commit a4bd6a93c3f14691c8a29e53eb04dc734b27f0db) It was missing the code to actually deal with the fixup of _PAGE_COHERENT based on the CPU feature. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_32.S | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 7db2e42d97a..d794a637e42 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -513,6 +513,9 @@ InstructionTLBMiss: rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ ori r1,r1,0xe04 /* clear out reserved bits */ andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ +BEGIN_FTR_SECTION + rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ +END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 mfspr r3,SPRN_IMISS tlbli r3 @@ -587,6 +590,9 @@ DataLoadTLBMiss: rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ ori r1,r1,0xe04 /* clear out reserved bits */ andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ +BEGIN_FTR_SECTION + rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ +END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 mfspr r3,SPRN_DMISS tlbld r3 @@ -655,6 +661,9 @@ DataStoreTLBMiss: rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ li r1,0xe05 /* clear out reserved bits & PP lsb */ andc r1,r3,r1 /* PP = user? 2: 0 */ +BEGIN_FTR_SECTION + rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ +END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 mfspr r3,SPRN_DMISS tlbld r3 -- cgit v1.2.3 From d7001198366bffce4506ba21b7b0fee2de194f73 Mon Sep 17 00:00:00 2001 From: Jan Nikitenko Date: Fri, 28 Nov 2008 08:52:58 +0100 Subject: MIPS: Fix oops in dma_unmap_page on not coherent mips platforms dma_cache_wback_inv() expects virtual address, but physical was provided due to translation via plat_dma_addr_to_phys(). If replaced with dma_addr_to_virt(), page fault oops from dma_unmap_page() is gone on au1550 platform. Signed-off-by: Jan Nikitenko Acked-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/mm/dma-default.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 546e6977d4f..bed56f1ac83 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -225,7 +225,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { unsigned long addr; - addr = plat_dma_addr_to_phys(dma_address); + addr = dma_addr_to_virt(dma_address); dma_cache_wback_inv(addr, size); } -- cgit v1.2.3 From 5864810bc50de57e1b4757850d3208f69579af7f Mon Sep 17 00:00:00 2001 From: Shinya Kuribayashi Date: Wed, 18 Mar 2009 09:04:01 +0900 Subject: MIPS: VR5500: Enable prefetch Signed-off-by: Shinya Kuribayashi Signed-off-by: Ralf Baechle --- arch/mips/mm/c-r4k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index c43f4b26a69..871e828bc62 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -780,7 +780,7 @@ static void __cpuinit probe_pcache(void) c->dcache.ways = 2; c->dcache.waybit = 0; - c->options |= MIPS_CPU_CACHE_CDEX_P; + c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; break; case CPU_TX49XX: -- cgit v1.2.3 From 5484879c0a50de7f60d403d375faff41cbd4ab01 Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Sat, 21 Mar 2009 13:50:48 +0800 Subject: MIPS: compat: Remove duplicated #include Remove duplicated #include in arch/mips/kernel/linux32.c. Signed-off-by: Huang Weiyi Signed-off-by: Ralf Baechle --- arch/mips/kernel/linux32.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 1a86f84fa94..49aac6e17df 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 89e18eb331cc83fb4923bbc9a93beb5cb53eca0a Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 23 Mar 2009 22:14:55 +0100 Subject: MIPS: Change {set,clear,change}_c0_ to return old value. This is more standard and useful and need for the following fix to work correctly. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/mipsregs.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 0417516503f..526f327475c 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1391,11 +1391,11 @@ static inline void tlb_write_random(void) static inline unsigned int \ set_c0_##name(unsigned int set) \ { \ - unsigned int res; \ + unsigned int res, new; \ \ res = read_c0_##name(); \ - res |= set; \ - write_c0_##name(res); \ + new = res | set; \ + write_c0_##name(new); \ \ return res; \ } \ @@ -1403,24 +1403,24 @@ set_c0_##name(unsigned int set) \ static inline unsigned int \ clear_c0_##name(unsigned int clear) \ { \ - unsigned int res; \ + unsigned int res, new; \ \ res = read_c0_##name(); \ - res &= ~clear; \ - write_c0_##name(res); \ + new = res & ~clear; \ + write_c0_##name(new); \ \ return res; \ } \ \ static inline unsigned int \ -change_c0_##name(unsigned int change, unsigned int new) \ +change_c0_##name(unsigned int change, unsigned int val) \ { \ - unsigned int res; \ + unsigned int res, new; \ \ res = read_c0_##name(); \ - res &= ~change; \ - res |= (new & change); \ - write_c0_##name(res); \ + new = res & ~change; \ + new |= (val & change); \ + write_c0_##name(new); \ \ return res; \ } -- cgit v1.2.3 From 9fb4c2b9e06c0a197d867b34865b113a47370bd5 Mon Sep 17 00:00:00 2001 From: Chris Dearman Date: Fri, 20 Mar 2009 15:33:55 -0700 Subject: MIPS: R2: Fix problem with code that incorrectly modifies ebase. Commit 566f74f6b2f8b85d5b8d6caaf97e5672cecd3e3e had a change that incorrectly modified ebase. This backs out the lines that modified ebase. In addition, the ebase exception vector is now allocated with correct alignment and the ebase register updated according to the architecture specification. Based on original patch by David VomLehn . Signed-off-by: David VomLehn Signed-off-by: Chris Dearman Signed-off-by: Ralf Baechle --- arch/mips/kernel/traps.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index b2d7041341b..29fadaccecd 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1520,7 +1520,9 @@ void __cpuinit per_cpu_trap_init(void) #endif /* CONFIG_MIPS_MT_SMTC */ if (cpu_has_veic || cpu_has_vint) { + unsigned long sr = set_c0_status(ST0_BEV); write_c0_ebase(ebase); + write_c0_status(sr); /* Setting vector spacing enables EI/VI mode */ change_c0_intctl(0x3e0, VECTORSPACING); } @@ -1602,8 +1604,6 @@ void __cpuinit set_uncached_handler(unsigned long offset, void *addr, #ifdef CONFIG_64BIT unsigned long uncached_ebase = TO_UNCAC(ebase); #endif - if (cpu_has_mips_r2) - uncached_ebase += (read_c0_ebase() & 0x3ffff000); if (!addr) panic(panic_null_cerr); @@ -1635,9 +1635,11 @@ void __init trap_init(void) return; /* Already done */ #endif - if (cpu_has_veic || cpu_has_vint) - ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64); - else { + if (cpu_has_veic || cpu_has_vint) { + unsigned long size = 0x200 + VECTORSPACING*64; + ebase = (unsigned long) + __alloc_bootmem(size, 1 << fls(size), 0); + } else { ebase = CAC_BASE; if (cpu_has_mips_r2) ebase += (read_c0_ebase() & 0x3ffff000); -- cgit v1.2.3 From 5201b0a47b83aba66e1e348d18f3f9a7bc17d612 Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Sat, 21 Mar 2009 13:50:58 +0800 Subject: avr32: remove duplicated #include Remove duplicated #include in arch/avr32/boards/hammerhead/flash.c. Signed-off-by: Huang Weiyi Acked-by: Alex Raimondi Signed-off-by: Haavard Skinnemoen --- arch/avr32/boards/hammerhead/flash.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/avr32/boards/hammerhead/flash.c b/arch/avr32/boards/hammerhead/flash.c index a98c6dd3a02..559bbcb03f9 100644 --- a/arch/avr32/boards/hammerhead/flash.c +++ b/arch/avr32/boards/hammerhead/flash.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From f0b85051d0f0891378a83db22c0786f1d756fbff Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:01 +0100 Subject: KVM: SVM: Clean up VINTR setting The current VINTR intercept setters don't look clean to me. To make the code easier to read and enable the possibilty to trap on a VINTR set, this uses a helper function to set the VINTR intercept. v2 uses two distinct functions for setting and clearing the bit Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a9e769e4e25..33407d95761 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -718,6 +718,16 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) to_svm(vcpu)->vmcb->save.rflags = rflags; } +static void svm_set_vintr(struct vcpu_svm *svm) +{ + svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR; +} + +static void svm_clear_vintr(struct vcpu_svm *svm) +{ + svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); +} + static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; @@ -1380,7 +1390,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm, { KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler); - svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); + svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; /* * If the user space waits to inject interrupts, exit as soon as @@ -1593,7 +1603,7 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu) (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { /* unable to deliver irq, set pending irq */ - vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); + svm_set_vintr(svm); svm_inject_irq(svm, 0x0); goto out; } @@ -1652,9 +1662,9 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, */ if (!svm->vcpu.arch.interrupt_window_open && (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window)) - control->intercept |= 1ULL << INTERCEPT_VINTR; - else - control->intercept &= ~(1ULL << INTERCEPT_VINTR); + svm_set_vintr(svm); + else + svm_clear_vintr(svm); } static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) -- cgit v1.2.3 From 9962d032bbff0268f22068787831405f8468c8b4 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:02 +0100 Subject: KVM: SVM: Move EFER and MSR constants to generic x86 code MSR_EFER_SVME_MASK, MSR_VM_CR and MSR_VM_HSAVE_PA are set in KVM specific headers. Linux does have nice header files to collect EFER bits and MSR IDs, so IMHO we should put them there. While at it, I also changed the naming scheme to match that of the other defines. (introduced in v6) Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/include/asm/msr-index.h | 7 +++++++ arch/x86/include/asm/svm.h | 4 ---- arch/x86/include/asm/virtext.h | 2 +- arch/x86/kvm/svm.c | 6 +++--- 5 files changed, 12 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 730843d1d2f..2998efe8927 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -22,6 +22,7 @@ #include #include #include +#include #define KVM_MAX_VCPUS 16 #define KVM_MEMORY_SLOTS 32 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 358acc59ae0..46e9646e7a6 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -18,11 +18,13 @@ #define _EFER_LME 8 /* Long mode enable */ #define _EFER_LMA 10 /* Long mode active (read-only) */ #define _EFER_NX 11 /* No execute enable */ +#define _EFER_SVME 12 /* Enable virtualization */ #define EFER_SCE (1<<_EFER_SCE) #define EFER_LME (1<<_EFER_LME) #define EFER_LMA (1<<_EFER_LMA) #define EFER_NX (1<<_EFER_NX) +#define EFER_SVME (1<<_EFER_SVME) /* Intel MSRs. Some also available on other CPUs */ #define MSR_IA32_PERFCTR0 0x000000c1 @@ -360,4 +362,9 @@ #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c +/* AMD-V MSRs */ + +#define MSR_VM_CR 0xc0010114 +#define MSR_VM_HSAVE_PA 0xc0010117 + #endif /* _ASM_X86_MSR_INDEX_H */ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 1b8afa78e86..82ada75f3eb 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -174,10 +174,6 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_CPUID_FEATURE_SHIFT 2 #define SVM_CPUID_FUNC 0x8000000a -#define MSR_EFER_SVME_MASK (1ULL << 12) -#define MSR_VM_CR 0xc0010114 -#define MSR_VM_HSAVE_PA 0xc0010117ULL - #define SVM_VM_CR_SVM_DISABLE 4 #define SVM_SELECTOR_S_SHIFT 4 diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 59363627523..e0f9aa16358 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -118,7 +118,7 @@ static inline void cpu_svm_disable(void) wrmsrl(MSR_VM_HSAVE_PA, 0); rdmsrl(MSR_EFER, efer); - wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); + wrmsrl(MSR_EFER, efer & ~EFER_SVME); } /** Makes sure SVM is disabled, if it is supported on the CPU diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 33407d95761..e4eb3fd91b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -198,7 +198,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) if (!npt_enabled && !(efer & EFER_LMA)) efer &= ~EFER_LME; - to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; + to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; vcpu->arch.shadow_efer = efer; } @@ -292,7 +292,7 @@ static void svm_hardware_enable(void *garbage) svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); rdmsrl(MSR_EFER, efer); - wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK); + wrmsrl(MSR_EFER, efer | EFER_SVME); wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(svm_data->save_area) << PAGE_SHIFT); @@ -559,7 +559,7 @@ static void init_vmcb(struct vcpu_svm *svm) init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); - save->efer = MSR_EFER_SVME_MASK; + save->efer = EFER_SVME; save->dr6 = 0xffff0ff0; save->dr7 = 0x400; save->rflags = 2; -- cgit v1.2.3 From c0725420cfdcf6dd9705b164a8c6cba86684080d Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:03 +0100 Subject: KVM: SVM: Add helper functions for nested SVM These are helpers for the nested SVM implementation. - nsvm_printk implements a debug printk variant - nested_svm_do calls a handler that can accesses gpa-based memory v3 makes use of the new permission checker v6 changes: - streamline nsvm_debug() - remove printk(KERN_ERR) - SVME check before CPL check - give GP error code - use new EFER constant Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e4eb3fd91b9..87debdcd1b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -50,6 +50,15 @@ MODULE_LICENSE("GPL"); #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) +/* Turn on to get debugging output*/ +/* #define NESTED_DEBUG */ + +#ifdef NESTED_DEBUG +#define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args) +#else +#define nsvm_printk(fmt, args...) do {} while(0) +#endif + /* enable NPT for AMD64 and X86 with PAE */ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) static bool npt_enabled = true; @@ -1149,6 +1158,82 @@ static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return 1; } +static int nested_svm_check_permissions(struct vcpu_svm *svm) +{ + if (!(svm->vcpu.arch.shadow_efer & EFER_SVME) + || !is_paging(&svm->vcpu)) { + kvm_queue_exception(&svm->vcpu, UD_VECTOR); + return 1; + } + + if (svm->vmcb->save.cpl) { + kvm_inject_gp(&svm->vcpu, 0); + return 1; + } + + return 0; +} + +static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa) +{ + struct page *page; + + down_read(¤t->mm->mmap_sem); + page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); + up_read(¤t->mm->mmap_sem); + + if (is_error_page(page)) { + printk(KERN_INFO "%s: could not find page at 0x%llx\n", + __func__, gpa); + kvm_release_page_clean(page); + kvm_inject_gp(&svm->vcpu, 0); + return NULL; + } + return page; +} + +static int nested_svm_do(struct vcpu_svm *svm, + u64 arg1_gpa, u64 arg2_gpa, void *opaque, + int (*handler)(struct vcpu_svm *svm, + void *arg1, + void *arg2, + void *opaque)) +{ + struct page *arg1_page; + struct page *arg2_page = NULL; + void *arg1; + void *arg2 = NULL; + int retval; + + arg1_page = nested_svm_get_page(svm, arg1_gpa); + if(arg1_page == NULL) + return 1; + + if (arg2_gpa) { + arg2_page = nested_svm_get_page(svm, arg2_gpa); + if(arg2_page == NULL) { + kvm_release_page_clean(arg1_page); + return 1; + } + } + + arg1 = kmap_atomic(arg1_page, KM_USER0); + if (arg2_gpa) + arg2 = kmap_atomic(arg2_page, KM_USER1); + + retval = handler(svm, arg1, arg2, opaque); + + kunmap_atomic(arg1, KM_USER0); + if (arg2_gpa) + kunmap_atomic(arg2, KM_USER1); + + kvm_release_page_dirty(arg1_page); + if (arg2_gpa) + kvm_release_page_dirty(arg2_page); + + return retval; +} + static int invalid_op_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { -- cgit v1.2.3 From 1371d90460189d02bf1bcca19dbfe6bd10dc6031 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:04 +0100 Subject: KVM: SVM: Implement GIF, clgi and stgi This patch implements the GIF flag and the clgi and stgi instructions that set this flag. Only if the flag is set (default), interrupts can be received by the CPU. To keep the information about that somewhere, this patch adds a new hidden flags vector. that is used to store information that does not go into the vmcb, but is SVM specific. I tried to write some code to make -no-kvm-irqchip work too, but the first level guest won't even boot with that atm, so I ditched it. v2 moves the hflags to x86 generic code v3 makes use of the new permission helper v6 only enables interrupt_window if GIF=1 Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/svm.c | 47 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 45 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2998efe8927..29e4157732d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -259,6 +259,7 @@ struct kvm_vcpu_arch { unsigned long cr3; unsigned long cr4; unsigned long cr8; + u32 hflags; u64 pdptrs[4]; /* pae */ u64 shadow_efer; u64 apic_base; @@ -738,6 +739,8 @@ enum { TASK_SWITCH_GATE = 3, }; +#define HF_GIF_MASK (1 << 0) + /* * Hardware virtualization extension instructions may fault if a * reboot turns off virtualization while processes are running. diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 87debdcd1b9..79cc06bfe57 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -251,7 +251,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) kvm_rip_write(vcpu, svm->next_rip); svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; - vcpu->arch.interrupt_window_open = 1; + vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK); } static int has_svm(void) @@ -600,6 +600,8 @@ static void init_vmcb(struct vcpu_svm *svm) save->cr4 = 0; } force_new_asid(&svm->vcpu); + + svm->vcpu.arch.hflags = HF_GIF_MASK; } static int svm_vcpu_reset(struct kvm_vcpu *vcpu) @@ -1234,6 +1236,36 @@ static int nested_svm_do(struct vcpu_svm *svm, return retval; } +static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + svm->vcpu.arch.hflags |= HF_GIF_MASK; + + return 1; +} + +static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + svm->vcpu.arch.hflags &= ~HF_GIF_MASK; + + /* After a CLGI no interrupts should come */ + svm_clear_vintr(svm); + svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; + + return 1; +} + static int invalid_op_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { @@ -1535,8 +1567,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_VMMCALL] = vmmcall_interception, [SVM_EXIT_VMLOAD] = invalid_op_interception, [SVM_EXIT_VMSAVE] = invalid_op_interception, - [SVM_EXIT_STGI] = invalid_op_interception, - [SVM_EXIT_CLGI] = invalid_op_interception, + [SVM_EXIT_STGI] = stgi_interception, + [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = invalid_op_interception, [SVM_EXIT_WBINVD] = emulate_on_interception, [SVM_EXIT_MONITOR] = invalid_op_interception, @@ -1684,6 +1716,9 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu) if (!kvm_cpu_has_interrupt(vcpu)) goto out; + if (!(svm->vcpu.arch.hflags & HF_GIF_MASK)) + goto out; + if (!(vmcb->save.rflags & X86_EFLAGS_IF) || (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { @@ -1710,7 +1745,8 @@ static void kvm_reput_irq(struct vcpu_svm *svm) } svm->vcpu.arch.interrupt_window_open = - !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); + !(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && + (svm->vcpu.arch.hflags & HF_GIF_MASK); } static void svm_do_inject_vector(struct vcpu_svm *svm) @@ -1734,7 +1770,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, svm->vcpu.arch.interrupt_window_open = (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && - (svm->vmcb->save.rflags & X86_EFLAGS_IF)); + (svm->vmcb->save.rflags & X86_EFLAGS_IF) && + (svm->vcpu.arch.hflags & HF_GIF_MASK)); if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary) /* -- cgit v1.2.3 From b286d5d8b0836e76832dafcc5a18b0e8e5a3bc5e Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:05 +0100 Subject: KVM: SVM: Implement hsave Implement the hsave MSR, that gives the VCPU a GPA to save the old guest state in. v2 allows userspace to save/restore hsave v4 dummys out the hsave MSR, so we use a host page v6 remembers the guest's hsave and exports the MSR Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/kvm_svm.h | 2 ++ arch/x86/kvm/svm.c | 13 +++++++++++++ arch/x86/kvm/x86.c | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h index 8e5ee99551f..a0877cac7b9 100644 --- a/arch/x86/kvm/kvm_svm.h +++ b/arch/x86/kvm/kvm_svm.h @@ -41,6 +41,8 @@ struct vcpu_svm { unsigned long host_dr7; u32 *msrpm; + struct vmcb *hsave; + u64 hsave_msr; }; #endif diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 79cc06bfe57..59aaff1c959 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -626,6 +626,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) struct vcpu_svm *svm; struct page *page; struct page *msrpm_pages; + struct page *hsave_page; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); @@ -651,6 +652,11 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); + hsave_page = alloc_page(GFP_KERNEL); + if (!hsave_page) + goto uninit; + svm->hsave = page_address(hsave_page); + svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; @@ -680,6 +686,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); + __free_page(virt_to_page(svm->hsave)); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); } @@ -1377,6 +1384,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) case MSR_IA32_LASTINTTOIP: *data = svm->vmcb->save.last_excp_to; break; + case MSR_VM_HSAVE_PA: + *data = svm->hsave_msr; + break; default: return kvm_get_msr_common(vcpu, ecx, data); } @@ -1470,6 +1480,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) */ pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data); + break; + case MSR_VM_HSAVE_PA: + svm->hsave_msr = data; break; default: return kvm_set_msr_common(vcpu, ecx, data); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 758b7a155ae..99165a961f0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -456,7 +456,7 @@ static u32 msrs_to_save[] = { MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, - MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT + MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA }; static unsigned num_msrs_to_save; -- cgit v1.2.3 From 5542675baa7e62ca4d18278c8758b6a4ec410639 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:06 +0100 Subject: KVM: SVM: Add VMLOAD and VMSAVE handlers This implements the VMLOAD and VMSAVE instructions, that usually surround the VMRUN instructions. Both instructions load / restore the same elements, so we only need to implement them once. v2 fixes CPL checking and replaces memcpy by assignments v3 makes use of the new permission checking Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 59aaff1c959..a83c94eb577 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1243,6 +1243,62 @@ static int nested_svm_do(struct vcpu_svm *svm, return retval; } +static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) +{ + to_vmcb->save.fs = from_vmcb->save.fs; + to_vmcb->save.gs = from_vmcb->save.gs; + to_vmcb->save.tr = from_vmcb->save.tr; + to_vmcb->save.ldtr = from_vmcb->save.ldtr; + to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; + to_vmcb->save.star = from_vmcb->save.star; + to_vmcb->save.lstar = from_vmcb->save.lstar; + to_vmcb->save.cstar = from_vmcb->save.cstar; + to_vmcb->save.sfmask = from_vmcb->save.sfmask; + to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; + to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; + to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; + + return 1; +} + +static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb, + void *arg2, void *opaque) +{ + return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb); +} + +static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb, + void *arg2, void *opaque) +{ + return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb); +} + +static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload); + + return 1; +} + +static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave); + + return 1; +} + static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { if (nested_svm_check_permissions(svm)) @@ -1578,8 +1634,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_SHUTDOWN] = shutdown_interception, [SVM_EXIT_VMRUN] = invalid_op_interception, [SVM_EXIT_VMMCALL] = vmmcall_interception, - [SVM_EXIT_VMLOAD] = invalid_op_interception, - [SVM_EXIT_VMSAVE] = invalid_op_interception, + [SVM_EXIT_VMLOAD] = vmload_interception, + [SVM_EXIT_VMSAVE] = vmsave_interception, [SVM_EXIT_STGI] = stgi_interception, [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = invalid_op_interception, -- cgit v1.2.3 From 3d6368ef580a4dff012960834bba4e28d3c1430c Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:07 +0100 Subject: KVM: SVM: Add VMRUN handler This patch implements VMRUN. VMRUN enters a virtual CPU and runs that in the same context as the normal guest CPU would run. So basically it is implemented the same way, a normal CPU would do it. We also prepare all intercepts that get OR'ed with the original intercepts, as we do not allow a level 2 guest to be intercepted less than the first level guest. v2 implements the following improvements: - fixes the CPL check - does not allocate iopm when not used - remembers the host's IF in the HIF bit in the hflags v3: - make use of the new permission checking - add support for V_INTR_MASKING_MASK v4: - use host page backed hsave v5: - remove IOPM merging code v6: - save cr4 so PAE l1 guests work v7: - return 0 on vmrun so we check the MSRs too - fix MSR check to use the correct variable Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/kvm_svm.h | 8 ++ arch/x86/kvm/svm.c | 157 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 165 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 29e4157732d..53779309514 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -740,6 +740,8 @@ enum { }; #define HF_GIF_MASK (1 << 0) +#define HF_HIF_MASK (1 << 1) +#define HF_VINTR_MASK (1 << 2) /* * Hardware virtualization extension instructions may fault if a diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h index a0877cac7b9..91673413d8f 100644 --- a/arch/x86/kvm/kvm_svm.h +++ b/arch/x86/kvm/kvm_svm.h @@ -43,6 +43,14 @@ struct vcpu_svm { u32 *msrpm; struct vmcb *hsave; u64 hsave_msr; + + u64 nested_vmcb; + + /* These are the merged vectors */ + u32 *nested_msrpm; + + /* gpa pointers to the real vectors */ + u64 nested_vmcb_msrpm; }; #endif diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a83c94eb577..fad187cbfab 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -77,6 +77,11 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) return container_of(vcpu, struct vcpu_svm, vcpu); } +static inline bool is_nested(struct vcpu_svm *svm) +{ + return svm->nested_vmcb; +} + static unsigned long iopm_base; struct kvm_ldttss_desc { @@ -601,6 +606,7 @@ static void init_vmcb(struct vcpu_svm *svm) } force_new_asid(&svm->vcpu); + svm->nested_vmcb = 0; svm->vcpu.arch.hflags = HF_GIF_MASK; } @@ -627,6 +633,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) struct page *page; struct page *msrpm_pages; struct page *hsave_page; + struct page *nested_msrpm_pages; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); @@ -649,6 +656,11 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!msrpm_pages) goto uninit; + + nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); + if (!nested_msrpm_pages) + goto uninit; + svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); @@ -657,6 +669,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) goto uninit; svm->hsave = page_address(hsave_page); + svm->nested_msrpm = page_address(nested_msrpm_pages); + svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; @@ -687,6 +701,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_page(virt_to_page(svm->hsave)); + __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); } @@ -1243,6 +1258,123 @@ static int nested_svm_do(struct vcpu_svm *svm, return retval; } + +static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1, + void *arg2, void *opaque) +{ + int i; + u32 *nested_msrpm = (u32*)arg1; + for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) + svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; + svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm); + + return 0; +} + +static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1, + void *arg2, void *opaque) +{ + struct vmcb *nested_vmcb = (struct vmcb *)arg1; + struct vmcb *hsave = svm->hsave; + + /* nested_vmcb is our indicator if nested SVM is activated */ + svm->nested_vmcb = svm->vmcb->save.rax; + + /* Clear internal status */ + svm->vcpu.arch.exception.pending = false; + + /* Save the old vmcb, so we don't need to pick what we save, but + can restore everything when a VMEXIT occurs */ + memcpy(hsave, svm->vmcb, sizeof(struct vmcb)); + /* We need to remember the original CR3 in the SPT case */ + if (!npt_enabled) + hsave->save.cr3 = svm->vcpu.arch.cr3; + hsave->save.cr4 = svm->vcpu.arch.cr4; + hsave->save.rip = svm->next_rip; + + if (svm->vmcb->save.rflags & X86_EFLAGS_IF) + svm->vcpu.arch.hflags |= HF_HIF_MASK; + else + svm->vcpu.arch.hflags &= ~HF_HIF_MASK; + + /* Load the nested guest state */ + svm->vmcb->save.es = nested_vmcb->save.es; + svm->vmcb->save.cs = nested_vmcb->save.cs; + svm->vmcb->save.ss = nested_vmcb->save.ss; + svm->vmcb->save.ds = nested_vmcb->save.ds; + svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; + svm->vmcb->save.idtr = nested_vmcb->save.idtr; + svm->vmcb->save.rflags = nested_vmcb->save.rflags; + svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); + svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); + svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); + if (npt_enabled) { + svm->vmcb->save.cr3 = nested_vmcb->save.cr3; + svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; + } else { + kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); + kvm_mmu_reset_context(&svm->vcpu); + } + svm->vmcb->save.cr2 = nested_vmcb->save.cr2; + kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); + kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); + kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); + /* In case we don't even reach vcpu_run, the fields are not updated */ + svm->vmcb->save.rax = nested_vmcb->save.rax; + svm->vmcb->save.rsp = nested_vmcb->save.rsp; + svm->vmcb->save.rip = nested_vmcb->save.rip; + svm->vmcb->save.dr7 = nested_vmcb->save.dr7; + svm->vmcb->save.dr6 = nested_vmcb->save.dr6; + svm->vmcb->save.cpl = nested_vmcb->save.cpl; + + /* We don't want a nested guest to be more powerful than the guest, + so all intercepts are ORed */ + svm->vmcb->control.intercept_cr_read |= + nested_vmcb->control.intercept_cr_read; + svm->vmcb->control.intercept_cr_write |= + nested_vmcb->control.intercept_cr_write; + svm->vmcb->control.intercept_dr_read |= + nested_vmcb->control.intercept_dr_read; + svm->vmcb->control.intercept_dr_write |= + nested_vmcb->control.intercept_dr_write; + svm->vmcb->control.intercept_exceptions |= + nested_vmcb->control.intercept_exceptions; + + svm->vmcb->control.intercept |= nested_vmcb->control.intercept; + + svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; + + force_new_asid(&svm->vcpu); + svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info; + svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err; + svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; + if (nested_vmcb->control.int_ctl & V_IRQ_MASK) { + nsvm_printk("nSVM Injecting Interrupt: 0x%x\n", + nested_vmcb->control.int_ctl); + } + if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) + svm->vcpu.arch.hflags |= HF_VINTR_MASK; + else + svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; + + nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n", + nested_vmcb->control.exit_int_info, + nested_vmcb->control.int_state); + + svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; + svm->vmcb->control.int_state = nested_vmcb->control.int_state; + svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; + if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID) + nsvm_printk("Injecting Event: 0x%x\n", + nested_vmcb->control.event_inj); + svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; + svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; + + svm->vcpu.arch.hflags |= HF_GIF_MASK; + + return 0; +} + static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) { to_vmcb->save.fs = from_vmcb->save.fs; @@ -1299,6 +1431,26 @@ static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return 1; } +static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + nsvm_printk("VMrun\n"); + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + if (nested_svm_do(svm, svm->vmcb->save.rax, 0, + NULL, nested_svm_vmrun)) + return 1; + + if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0, + NULL, nested_svm_vmrun_msrpm)) + return 1; + + return 1; +} + static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { if (nested_svm_check_permissions(svm)) @@ -1632,7 +1784,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_MSR] = msr_interception, [SVM_EXIT_TASK_SWITCH] = task_switch_interception, [SVM_EXIT_SHUTDOWN] = shutdown_interception, - [SVM_EXIT_VMRUN] = invalid_op_interception, + [SVM_EXIT_VMRUN] = vmrun_interception, [SVM_EXIT_VMMCALL] = vmmcall_interception, [SVM_EXIT_VMLOAD] = vmload_interception, [SVM_EXIT_VMSAVE] = vmsave_interception, @@ -1939,7 +2091,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) svm->host_cr2 = kvm_read_cr2(); svm->host_dr6 = read_dr6(); svm->host_dr7 = read_dr7(); - svm->vmcb->save.cr2 = vcpu->arch.cr2; + if (!is_nested(svm)) + svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ if (npt_enabled) svm->vmcb->save.cr3 = vcpu->arch.cr3; -- cgit v1.2.3 From cf74a78b229d07f77416d2fe1f029f183a8a31df Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:08 +0100 Subject: KVM: SVM: Add VMEXIT handler and intercepts This adds the #VMEXIT intercept, so we return to the level 1 guest when something happens in the level 2 guest that should return to the level 1 guest. v2 implements HIF handling and cleans up exception interception v3 adds support for V_INTR_MASKING_MASK v4 uses the host page hsave v5 removes IOPM merging code v6 moves mmu code out of the atomic section Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 293 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 293 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index fad187cbfab..4cb2920b152 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -72,6 +72,13 @@ module_param(npt, int, S_IRUGO); static void kvm_reput_irq(struct vcpu_svm *svm); static void svm_flush_tlb(struct kvm_vcpu *vcpu); +static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override); +static int nested_svm_vmexit(struct vcpu_svm *svm); +static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb, + void *arg2, void *opaque); +static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, + bool has_error_code, u32 error_code); + static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_svm, vcpu); @@ -221,6 +228,11 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, { struct vcpu_svm *svm = to_svm(vcpu); + /* If we are within a nested VM we'd better #VMEXIT and let the + guest handle the exception */ + if (nested_svm_check_exception(svm, nr, has_error_code, error_code)) + return; + svm->vmcb->control.event_inj = nr | SVM_EVTINJ_VALID | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) @@ -1198,6 +1210,46 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm) return 0; } +static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, + bool has_error_code, u32 error_code) +{ + if (is_nested(svm)) { + svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; + svm->vmcb->control.exit_code_hi = 0; + svm->vmcb->control.exit_info_1 = error_code; + svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; + if (nested_svm_exit_handled(svm, false)) { + nsvm_printk("VMexit -> EXCP 0x%x\n", nr); + + nested_svm_vmexit(svm); + return 1; + } + } + + return 0; +} + +static inline int nested_svm_intr(struct vcpu_svm *svm) +{ + if (is_nested(svm)) { + if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) + return 0; + + if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) + return 0; + + svm->vmcb->control.exit_code = SVM_EXIT_INTR; + + if (nested_svm_exit_handled(svm, false)) { + nsvm_printk("VMexit -> INTR\n"); + nested_svm_vmexit(svm); + return 1; + } + } + + return 0; +} + static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa) { struct page *page; @@ -1258,6 +1310,228 @@ static int nested_svm_do(struct vcpu_svm *svm, return retval; } +static int nested_svm_exit_handled_real(struct vcpu_svm *svm, + void *arg1, + void *arg2, + void *opaque) +{ + struct vmcb *nested_vmcb = (struct vmcb *)arg1; + bool kvm_overrides = *(bool *)opaque; + u32 exit_code = svm->vmcb->control.exit_code; + + if (kvm_overrides) { + switch (exit_code) { + case SVM_EXIT_INTR: + case SVM_EXIT_NMI: + return 0; + /* For now we are always handling NPFs when using them */ + case SVM_EXIT_NPF: + if (npt_enabled) + return 0; + break; + /* When we're shadowing, trap PFs */ + case SVM_EXIT_EXCP_BASE + PF_VECTOR: + if (!npt_enabled) + return 0; + break; + default: + break; + } + } + + switch (exit_code) { + case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: { + u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0); + if (nested_vmcb->control.intercept_cr_read & cr_bits) + return 1; + break; + } + case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: { + u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0); + if (nested_vmcb->control.intercept_cr_write & cr_bits) + return 1; + break; + } + case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: { + u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0); + if (nested_vmcb->control.intercept_dr_read & dr_bits) + return 1; + break; + } + case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: { + u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0); + if (nested_vmcb->control.intercept_dr_write & dr_bits) + return 1; + break; + } + case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { + u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); + if (nested_vmcb->control.intercept_exceptions & excp_bits) + return 1; + break; + } + default: { + u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); + nsvm_printk("exit code: 0x%x\n", exit_code); + if (nested_vmcb->control.intercept & exit_bits) + return 1; + } + } + + return 0; +} + +static int nested_svm_exit_handled_msr(struct vcpu_svm *svm, + void *arg1, void *arg2, + void *opaque) +{ + struct vmcb *nested_vmcb = (struct vmcb *)arg1; + u8 *msrpm = (u8 *)arg2; + u32 t0, t1; + u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; + u32 param = svm->vmcb->control.exit_info_1 & 1; + + if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT))) + return 0; + + switch(msr) { + case 0 ... 0x1fff: + t0 = (msr * 2) % 8; + t1 = msr / 8; + break; + case 0xc0000000 ... 0xc0001fff: + t0 = (8192 + msr - 0xc0000000) * 2; + t1 = (t0 / 8); + t0 %= 8; + break; + case 0xc0010000 ... 0xc0011fff: + t0 = (16384 + msr - 0xc0010000) * 2; + t1 = (t0 / 8); + t0 %= 8; + break; + default: + return 1; + break; + } + if (msrpm[t1] & ((1 << param) << t0)) + return 1; + + return 0; +} + +static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override) +{ + bool k = kvm_override; + + switch (svm->vmcb->control.exit_code) { + case SVM_EXIT_MSR: + return nested_svm_do(svm, svm->nested_vmcb, + svm->nested_vmcb_msrpm, NULL, + nested_svm_exit_handled_msr); + default: break; + } + + return nested_svm_do(svm, svm->nested_vmcb, 0, &k, + nested_svm_exit_handled_real); +} + +static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, + void *arg2, void *opaque) +{ + struct vmcb *nested_vmcb = (struct vmcb *)arg1; + struct vmcb *hsave = svm->hsave; + u64 nested_save[] = { nested_vmcb->save.cr0, + nested_vmcb->save.cr3, + nested_vmcb->save.cr4, + nested_vmcb->save.efer, + nested_vmcb->control.intercept_cr_read, + nested_vmcb->control.intercept_cr_write, + nested_vmcb->control.intercept_dr_read, + nested_vmcb->control.intercept_dr_write, + nested_vmcb->control.intercept_exceptions, + nested_vmcb->control.intercept, + nested_vmcb->control.msrpm_base_pa, + nested_vmcb->control.iopm_base_pa, + nested_vmcb->control.tsc_offset }; + + /* Give the current vmcb to the guest */ + memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb)); + nested_vmcb->save.cr0 = nested_save[0]; + if (!npt_enabled) + nested_vmcb->save.cr3 = nested_save[1]; + nested_vmcb->save.cr4 = nested_save[2]; + nested_vmcb->save.efer = nested_save[3]; + nested_vmcb->control.intercept_cr_read = nested_save[4]; + nested_vmcb->control.intercept_cr_write = nested_save[5]; + nested_vmcb->control.intercept_dr_read = nested_save[6]; + nested_vmcb->control.intercept_dr_write = nested_save[7]; + nested_vmcb->control.intercept_exceptions = nested_save[8]; + nested_vmcb->control.intercept = nested_save[9]; + nested_vmcb->control.msrpm_base_pa = nested_save[10]; + nested_vmcb->control.iopm_base_pa = nested_save[11]; + nested_vmcb->control.tsc_offset = nested_save[12]; + + /* We always set V_INTR_MASKING and remember the old value in hflags */ + if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) + nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; + + if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) && + (nested_vmcb->control.int_vector)) { + nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n", + nested_vmcb->control.int_vector); + } + + /* Restore the original control entries */ + svm->vmcb->control = hsave->control; + + /* Kill any pending exceptions */ + if (svm->vcpu.arch.exception.pending == true) + nsvm_printk("WARNING: Pending Exception\n"); + svm->vcpu.arch.exception.pending = false; + + /* Restore selected save entries */ + svm->vmcb->save.es = hsave->save.es; + svm->vmcb->save.cs = hsave->save.cs; + svm->vmcb->save.ss = hsave->save.ss; + svm->vmcb->save.ds = hsave->save.ds; + svm->vmcb->save.gdtr = hsave->save.gdtr; + svm->vmcb->save.idtr = hsave->save.idtr; + svm->vmcb->save.rflags = hsave->save.rflags; + svm_set_efer(&svm->vcpu, hsave->save.efer); + svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); + svm_set_cr4(&svm->vcpu, hsave->save.cr4); + if (npt_enabled) { + svm->vmcb->save.cr3 = hsave->save.cr3; + svm->vcpu.arch.cr3 = hsave->save.cr3; + } else { + kvm_set_cr3(&svm->vcpu, hsave->save.cr3); + } + kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); + kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); + kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); + svm->vmcb->save.dr7 = 0; + svm->vmcb->save.cpl = 0; + svm->vmcb->control.exit_int_info = 0; + + svm->vcpu.arch.hflags &= ~HF_GIF_MASK; + /* Exit nested SVM mode */ + svm->nested_vmcb = 0; + + return 0; +} + +static int nested_svm_vmexit(struct vcpu_svm *svm) +{ + nsvm_printk("VMexit\n"); + if (nested_svm_do(svm, svm->nested_vmcb, 0, + NULL, nested_svm_vmexit_real)) + return 1; + + kvm_mmu_reset_context(&svm->vcpu); + kvm_mmu_load(&svm->vcpu); + + return 0; +} static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1, void *arg2, void *opaque) @@ -1805,6 +2079,17 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip, (u32)((u64)svm->vmcb->save.rip >> 32), entryexit); + if (is_nested(svm)) { + nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n", + exit_code, svm->vmcb->control.exit_info_1, + svm->vmcb->control.exit_info_2, svm->vmcb->save.rip); + if (nested_svm_exit_handled(svm, true)) { + nested_svm_vmexit(svm); + nsvm_printk("-> #VMEXIT\n"); + return 1; + } + } + if (npt_enabled) { int mmu_reload = 0; if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { @@ -1892,6 +2177,8 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) { struct vcpu_svm *svm = to_svm(vcpu); + nested_svm_intr(svm); + svm_inject_irq(svm, irq); } @@ -1937,6 +2224,9 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu) if (!kvm_cpu_has_interrupt(vcpu)) goto out; + if (nested_svm_intr(svm)) + goto out; + if (!(svm->vcpu.arch.hflags & HF_GIF_MASK)) goto out; @@ -1989,6 +2279,9 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; + if (nested_svm_intr(svm)) + return; + svm->vcpu.arch.interrupt_window_open = (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && (svm->vmcb->save.rflags & X86_EFLAGS_IF) && -- cgit v1.2.3 From eb6f302edff9da963e687bf6c15dcf88843b1c3b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 25 Nov 2008 20:17:09 +0100 Subject: KVM: SVM: Allow read access to MSR_VM_VR KVM tries to read the VM_CR MSR to find out if SVM was disabled by the BIOS. So implement read support for this MSR to make nested SVM running. Signed-off-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4cb2920b152..df5b4119266 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1869,6 +1869,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) case MSR_VM_HSAVE_PA: *data = svm->hsave_msr; break; + case MSR_VM_CR: + *data = 0; + break; default: return kvm_get_msr_common(vcpu, ecx, data); } -- cgit v1.2.3 From 236de05553a7ef8f6940de8686ae9bf1272cd2cf Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:10 +0100 Subject: KVM: SVM: Allow setting the SVME bit Normally setting the SVME bit in EFER is not allowed, as we did not support SVM. Not since we do, we should also allow enabling SVM mode. v2 comes as last patch, so we don't enable half-ready code v4 introduces a module option to enable SVM v6 warns that nesting is enabled Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index df5b4119266..0fbbde54eca 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -69,6 +69,9 @@ static int npt = 1; module_param(npt, int, S_IRUGO); +static int nested = 0; +module_param(nested, int, S_IRUGO); + static void kvm_reput_irq(struct vcpu_svm *svm); static void svm_flush_tlb(struct kvm_vcpu *vcpu); @@ -443,6 +446,11 @@ static __init int svm_hardware_setup(void) if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); + if (nested) { + printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); + kvm_enable_efer_bits(EFER_SVME); + } + for_each_online_cpu(cpu) { r = svm_cpu_init(cpu); if (r) -- cgit v1.2.3 From d80174745ba3938bc6abb8f95ed670ac0631a182 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:11 +0100 Subject: KVM: SVM: Only allow setting of EFER_SVME when CPUID SVM is set Userspace has to tell the kernel module somehow that nested SVM should be used. The easiest way that doesn't break anything I could think of is to implement if (cpuid & svm) allow write to efer else deny write to efer Old userspaces mask the SVM capability bit, so they don't break. In order to find out that the SVM capability is set, I had to split the kvm_emulate_cpuid into a finding and an emulating part. (introduced in v6) Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 58 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 99165a961f0..b5e9932e0f6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -69,6 +69,8 @@ static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL; static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); +struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, + u32 function, u32 index); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); @@ -173,6 +175,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, u32 error_code) { ++vcpu->stat.pf_guest; + if (vcpu->arch.exception.pending) { if (vcpu->arch.exception.nr == PF_VECTOR) { printk(KERN_DEBUG "kvm: inject_page_fault:" @@ -442,6 +445,11 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_get_cr8); +static inline u32 bit(int bitno) +{ + return 1 << (bitno & 31); +} + /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. @@ -481,6 +489,17 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) return; } + if (efer & EFER_SVME) { + struct kvm_cpuid_entry2 *feat; + + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { + printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); + kvm_inject_gp(vcpu, 0); + return; + } + } + kvm_x86_ops->set_efer(vcpu, efer); efer &= ~EFER_LMA; @@ -1181,11 +1200,6 @@ out: return r; } -static inline u32 bit(int bitno) -{ - return 1 << (bitno & 31); -} - static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, u32 index) { @@ -1228,7 +1242,8 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, const u32 kvm_supported_word3_x86_features = bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16); const u32 kvm_supported_word6_x86_features = - bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY); + bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) | + bit(X86_FEATURE_SVM); /* all func 2 cpuid_count() should be called on the same cpu */ get_cpu(); @@ -2832,20 +2847,15 @@ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, return 1; } -void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) +struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, + u32 function, u32 index) { int i; - u32 function, index; - struct kvm_cpuid_entry2 *e, *best; + struct kvm_cpuid_entry2 *best = NULL; - function = kvm_register_read(vcpu, VCPU_REGS_RAX); - index = kvm_register_read(vcpu, VCPU_REGS_RCX); - kvm_register_write(vcpu, VCPU_REGS_RAX, 0); - kvm_register_write(vcpu, VCPU_REGS_RBX, 0); - kvm_register_write(vcpu, VCPU_REGS_RCX, 0); - kvm_register_write(vcpu, VCPU_REGS_RDX, 0); - best = NULL; for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { + struct kvm_cpuid_entry2 *e; + e = &vcpu->arch.cpuid_entries[i]; if (is_matching_cpuid_entry(e, function, index)) { if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) @@ -2860,6 +2870,22 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) if (!best || e->function > best->function) best = e; } + + return best; +} + +void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) +{ + u32 function, index; + struct kvm_cpuid_entry2 *best; + + function = kvm_register_read(vcpu, VCPU_REGS_RAX); + index = kvm_register_read(vcpu, VCPU_REGS_RCX); + kvm_register_write(vcpu, VCPU_REGS_RAX, 0); + kvm_register_write(vcpu, VCPU_REGS_RBX, 0); + kvm_register_write(vcpu, VCPU_REGS_RCX, 0); + kvm_register_write(vcpu, VCPU_REGS_RDX, 0); + best = kvm_find_cpuid_entry(vcpu, function, index); if (best) { kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); -- cgit v1.2.3 From 8ab2d2e231062814bd89bba2d6d92563190aa2bb Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: VMX: Support for injecting software exceptions VMX differentiates between processor and software generated exceptions when injecting them into the guest. Extend vmx_queue_exception accordingly (and refactor related constants) so that we can use this service reliably for the new guest debugging framework. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/include/asm/vmx.h | 3 ++- arch/x86/kvm/vmx.c | 35 ++++++++++++++++++++--------------- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index d0238e6151d..32159f034ef 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -270,8 +270,9 @@ enum vmcs_field { #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ -#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ +#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ +#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ /* GUEST_INTERRUPTIBILITY_INFO flags. */ #define GUEST_INTR_STATE_STI 0x00000001 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7611af57682..1d974c1eaa7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -189,21 +189,21 @@ static inline int is_page_fault(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == - (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); + (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); } static inline int is_no_device(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == - (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); + (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); } static inline int is_invalid_opcode(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == - (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); + (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); } static inline int is_external_interrupt(u32 intr_info) @@ -747,29 +747,33 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 intr_info = nr | INTR_INFO_VALID_MASK; - if (has_error_code) + if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); + intr_info |= INTR_INFO_DELIVER_CODE_MASK; + } if (vcpu->arch.rmode.active) { vmx->rmode.irq.pending = true; vmx->rmode.irq.vector = nr; vmx->rmode.irq.rip = kvm_rip_read(vcpu); - if (nr == BP_VECTOR) + if (nr == BP_VECTOR || nr == OF_VECTOR) vmx->rmode.irq.rip++; - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, - nr | INTR_TYPE_SOFT_INTR - | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) - | INTR_INFO_VALID_MASK); + intr_info |= INTR_TYPE_SOFT_INTR; + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); return; } - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, - nr | INTR_TYPE_EXCEPTION - | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) - | INTR_INFO_VALID_MASK); + if (nr == BP_VECTOR || nr == OF_VECTOR) { + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); + intr_info |= INTR_TYPE_SOFT_EXCEPTION; + } else + intr_info |= INTR_TYPE_HARD_EXCEPTION; + + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); } static bool vmx_exception_injected(struct kvm_vcpu *vcpu) @@ -2650,7 +2654,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == - (INTR_TYPE_EXCEPTION | 1)) { + (INTR_TYPE_HARD_EXCEPTION | 1)) { kvm_run->exit_reason = KVM_EXIT_DEBUG; return 0; } @@ -3238,7 +3242,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) vmx->vcpu.arch.nmi_injected = false; } kvm_clear_exception_queue(&vmx->vcpu); - if (idtv_info_valid && type == INTR_TYPE_EXCEPTION) { + if (idtv_info_valid && (type == INTR_TYPE_HARD_EXCEPTION || + type == INTR_TYPE_SOFT_EXCEPTION)) { if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { error = vmcs_read32(IDT_VECTORING_ERROR_CODE); kvm_queue_exception_e(&vmx->vcpu, vector, error); -- cgit v1.2.3 From d0bfb940ecabf0b44fb1fd80d8d60594e569e5ec Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: New guest debug interface This rips out the support for KVM_DEBUG_GUEST and introduces a new IOCTL instead: KVM_SET_GUEST_DEBUG. The IOCTL payload consists of a generic part, controlling the "main switch" and the single-step feature. The arch specific part adds an x86 interface for intercepting both types of debug exceptions separately and re-injecting them when the host was not interested. Moveover, the foundation for guest debugging via debug registers is layed. To signal breakpoint events properly back to userland, an arch-specific data block is now returned along KVM_EXIT_DEBUG. For x86, the arch block contains the PC, the debug exception, and relevant debug registers to tell debug events properly apart. The availability of this new interface is signaled by KVM_CAP_SET_GUEST_DEBUG. Empty stubs for not yet supported archs are provided. Note that both SVM and VTX are supported, but only the latter was tested yet. Based on the experience with all those VTX corner case, I would be fairly surprised if SVM will work out of the box. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm.h | 7 ++++ arch/ia64/kvm/kvm-ia64.c | 4 +- arch/powerpc/include/asm/kvm.h | 7 ++++ arch/powerpc/kvm/powerpc.c | 4 +- arch/s390/include/asm/kvm.h | 7 ++++ arch/s390/kvm/kvm-s390.c | 4 +- arch/x86/include/asm/kvm.h | 18 ++++++++ arch/x86/include/asm/kvm_host.h | 9 +--- arch/x86/kvm/svm.c | 50 +++++++++++++++++++++- arch/x86/kvm/vmx.c | 93 ++++++++++++++++------------------------- arch/x86/kvm/x86.c | 14 ++++--- 11 files changed, 138 insertions(+), 79 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index bfa86b6af7c..be3fdb89121 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -214,4 +214,11 @@ struct kvm_sregs { struct kvm_fpu { }; +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + #endif diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 28f982045f2..de47467a0e6 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1303,8 +1303,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) return -EINVAL; } -int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, - struct kvm_debug_guest *dbg) +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) { return -EINVAL; } diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index f993e4198d5..755f1b1948c 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h @@ -52,4 +52,11 @@ struct kvm_fpu { __u64 fpr[32]; }; +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 5f81256287f..7c2ad4017d6 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -240,8 +240,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvmppc_core_vcpu_put(vcpu); } -int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, - struct kvm_debug_guest *dbg) +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) { int i; diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h index e1f54654e3a..0b2f829f6d5 100644 --- a/arch/s390/include/asm/kvm.h +++ b/arch/s390/include/asm/kvm.h @@ -42,4 +42,11 @@ struct kvm_fpu { __u64 fprs[16]; }; +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + #endif diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0d33893e1e8..cbfe91e1012 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -422,8 +422,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, return -EINVAL; /* not implemented yet */ } -int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, - struct kvm_debug_guest *dbg) +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) { return -EINVAL; /* not implemented yet */ } diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 886c9402ec4..32eb96c7ca2 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -212,6 +212,24 @@ struct kvm_pit_channel_state { __s64 count_load_time; }; +struct kvm_debug_exit_arch { + __u32 exception; + __u32 pad; + __u64 pc; + __u64 dr6; + __u64 dr7; +}; + +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 +#define KVM_GUESTDBG_USE_HW_BP 0x00020000 +#define KVM_GUESTDBG_INJECT_DB 0x00040000 +#define KVM_GUESTDBG_INJECT_BP 0x00080000 + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { + __u64 debugreg[8]; +}; + struct kvm_pit_state { struct kvm_pit_channel_state channels[3]; }; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 53779309514..c430cd580ee 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -135,12 +135,6 @@ enum { #define KVM_NR_MEM_OBJS 40 -struct kvm_guest_debug { - int enabled; - unsigned long bp[4]; - int singlestep; -}; - /* * We don't want allocation failures within the mmu code, so we preallocate * enough memory for a single page fault in a cache. @@ -448,8 +442,7 @@ struct kvm_x86_ops { void (*vcpu_put)(struct kvm_vcpu *vcpu); int (*set_guest_debug)(struct kvm_vcpu *vcpu, - struct kvm_debug_guest *dbg); - void (*guest_debug_pre)(struct kvm_vcpu *vcpu); + struct kvm_guest_debug *dbg); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0fbbde54eca..88d9062f454 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -968,9 +968,32 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, } -static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) +static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - return -EOPNOTSUPP; + int old_debug = vcpu->guest_debug; + struct vcpu_svm *svm = to_svm(vcpu); + + vcpu->guest_debug = dbg->control; + + svm->vmcb->control.intercept_exceptions &= + ~((1 << DB_VECTOR) | (1 << BP_VECTOR)); + if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { + if (vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + svm->vmcb->control.intercept_exceptions |= + 1 << DB_VECTOR; + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + svm->vmcb->control.intercept_exceptions |= + 1 << BP_VECTOR; + } else + vcpu->guest_debug = 0; + + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; + else if (old_debug & KVM_GUESTDBG_SINGLESTEP) + svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); + + return 0; } static int svm_get_irq(struct kvm_vcpu *vcpu) @@ -1094,6 +1117,27 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); } +static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (!(svm->vcpu.guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { + kvm_queue_exception(&svm->vcpu, DB_VECTOR); + return 1; + } + kvm_run->exit_reason = KVM_EXIT_DEBUG; + kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; + kvm_run->debug.arch.exception = DB_VECTOR; + return 0; +} + +static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + kvm_run->exit_reason = KVM_EXIT_DEBUG; + kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; + kvm_run->debug.arch.exception = BP_VECTOR; + return 0; +} + static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { int er; @@ -2050,6 +2094,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_WRITE_DR3] = emulate_on_interception, [SVM_EXIT_WRITE_DR5] = emulate_on_interception, [SVM_EXIT_WRITE_DR7] = emulate_on_interception, + [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, + [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1d974c1eaa7..f55690ddb3a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -480,8 +480,13 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); if (!vcpu->fpu_active) eb |= 1u << NM_VECTOR; - if (vcpu->guest_debug.enabled) - eb |= 1u << DB_VECTOR; + if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { + if (vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + eb |= 1u << DB_VECTOR; + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + eb |= 1u << BP_VECTOR; + } if (vcpu->arch.rmode.active) eb = ~0; if (vm_need_ept()) @@ -1003,40 +1008,23 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) } } -static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) +static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - unsigned long dr7 = 0x400; - int old_singlestep; - - old_singlestep = vcpu->guest_debug.singlestep; - - vcpu->guest_debug.enabled = dbg->enabled; - if (vcpu->guest_debug.enabled) { - int i; - - dr7 |= 0x200; /* exact */ - for (i = 0; i < 4; ++i) { - if (!dbg->breakpoints[i].enabled) - continue; - vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; - dr7 |= 2 << (i*2); /* global enable */ - dr7 |= 0 << (i*4+16); /* execution breakpoint */ - } - - vcpu->guest_debug.singlestep = dbg->singlestep; - } else - vcpu->guest_debug.singlestep = 0; + int old_debug = vcpu->guest_debug; + unsigned long flags; - if (old_singlestep && !vcpu->guest_debug.singlestep) { - unsigned long flags; + vcpu->guest_debug = dbg->control; + if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) + vcpu->guest_debug = 0; - flags = vmcs_readl(GUEST_RFLAGS); + flags = vmcs_readl(GUEST_RFLAGS); + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; + else if (old_debug & KVM_GUESTDBG_SINGLESTEP) flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); - vmcs_writel(GUEST_RFLAGS, flags); - } + vmcs_writel(GUEST_RFLAGS, flags); update_exception_bitmap(vcpu); - vmcs_writel(GUEST_DR7, dr7); return 0; } @@ -2540,24 +2528,6 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) return 0; } -static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) -{ - struct kvm_guest_debug *dbg = &vcpu->guest_debug; - - set_debugreg(dbg->bp[0], 0); - set_debugreg(dbg->bp[1], 1); - set_debugreg(dbg->bp[2], 2); - set_debugreg(dbg->bp[3], 3); - - if (dbg->singlestep) { - unsigned long flags; - - flags = vmcs_readl(GUEST_RFLAGS); - flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; - vmcs_writel(GUEST_RFLAGS, flags); - } -} - static int handle_rmode_exception(struct kvm_vcpu *vcpu, int vec, u32 err_code) { @@ -2574,9 +2544,17 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, * the required debugging infrastructure rework. */ switch (vec) { - case DE_VECTOR: case DB_VECTOR: + if (vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + return 0; + kvm_queue_exception(vcpu, vec); + return 1; case BP_VECTOR: + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + return 0; + /* fall through */ + case DE_VECTOR: case OF_VECTOR: case BR_VECTOR: case UD_VECTOR: @@ -2593,7 +2571,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 intr_info, error_code; + u32 intr_info, ex_no, error_code; unsigned long cr2, rip; u32 vect_info; enum emulation_result er; @@ -2653,14 +2631,16 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 1; } - if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | 1)) { + ex_no = intr_info & INTR_INFO_VECTOR_MASK; + if (ex_no == DB_VECTOR || ex_no == BP_VECTOR) { kvm_run->exit_reason = KVM_EXIT_DEBUG; - return 0; + kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; + kvm_run->debug.arch.exception = ex_no; + } else { + kvm_run->exit_reason = KVM_EXIT_EXCEPTION; + kvm_run->ex.exception = ex_no; + kvm_run->ex.error_code = error_code; } - kvm_run->exit_reason = KVM_EXIT_EXCEPTION; - kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK; - kvm_run->ex.error_code = error_code; return 0; } @@ -3600,7 +3580,6 @@ static struct kvm_x86_ops vmx_x86_ops = { .vcpu_put = vmx_vcpu_put, .set_guest_debug = set_guest_debug, - .guest_debug_pre = kvm_guest_debug_pre, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b5e9932e0f6..e990d164b56 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3005,9 +3005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) goto out; } - if (vcpu->guest_debug.enabled) - kvm_x86_ops->guest_debug_pre(vcpu); - vcpu->guest_mode = 1; /* * Make sure that guest_mode assignment won't happen after @@ -3218,7 +3215,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) /* * Don't leak debug flags in case they were set for guest debugging */ - if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep) + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); vcpu_put(vcpu); @@ -3837,8 +3834,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return 0; } -int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, - struct kvm_debug_guest *dbg) +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) { int r; @@ -3846,6 +3843,11 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, r = kvm_x86_ops->set_guest_debug(vcpu, dbg); + if (dbg->control & KVM_GUESTDBG_INJECT_DB) + kvm_queue_exception(vcpu, DB_VECTOR); + else if (dbg->control & KVM_GUESTDBG_INJECT_BP) + kvm_queue_exception(vcpu, BP_VECTOR); + vcpu_put(vcpu); return r; -- cgit v1.2.3 From 55934c0bd3bb232a9cf902820dd63ad18ed65e49 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: VMX: Allow single-stepping when uninterruptible When single-stepping over STI and MOV SS, we must clear the corresponding interruptibility bits in the guest state. Otherwise vmentry fails as it then expects bit 14 (BS) in pending debug exceptions being set, but that's not correct for the guest debugging case. Note that clearing those bits is safe as we check for interruptibility based on the original state and do not inject interrupts or NMIs if guest interruptibility was blocked. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f55690ddb3a..c776868ffe4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2478,6 +2478,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, { vmx_update_window_states(vcpu); + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_STI | + GUEST_INTR_STATE_MOV_SS); + if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { if (vcpu->arch.interrupt.pending) { enable_nmi_window(vcpu); @@ -3244,6 +3249,11 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) vmx_update_window_states(vcpu); + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_STI | + GUEST_INTR_STATE_MOV_SS); + if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { if (vcpu->arch.interrupt.pending) { enable_nmi_window(vcpu); -- cgit v1.2.3 From 42dbaa5a057736bf8b5c22aa42dbe975bf1080e5 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: x86: Virtualize debug registers So far KVM only had basic x86 debug register support, once introduced to realize guest debugging that way. The guest itself was not able to use those registers. This patch now adds (almost) full support for guest self-debugging via hardware registers. It refactors the code, moving generic parts out of SVM (VMX was already cleaned up by the KVM_SET_GUEST_DEBUG patches), and it ensures that the registers are properly switched between host and guest. This patch also prepares debug register usage by the host. The latter will (once wired-up by the following patch) allow for hardware breakpoints/watchpoints in guest code. If this is enabled, the guest will only see faked debug registers without functionality, but with content reflecting the guest's modifications. Tested on Intel only, but SVM /should/ work as well, but who knows... Known limitations: Trapping on tss switch won't work - most probably on Intel. Credits also go to Joerg Roedel - I used his once posted debugging series as platform for this patch. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 22 ++++++++ arch/x86/include/asm/vmx.h | 2 +- arch/x86/kvm/kvm_svm.h | 6 --- arch/x86/kvm/svm.c | 116 +++++++++++++++------------------------- arch/x86/kvm/vmx.c | 114 +++++++++++++++++++++++++++++++++------ arch/x86/kvm/x86.c | 29 ++++++++++ 6 files changed, 193 insertions(+), 96 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c430cd580ee..0a4dab25a91 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -135,6 +135,19 @@ enum { #define KVM_NR_MEM_OBJS 40 +#define KVM_NR_DB_REGS 4 + +#define DR6_BD (1 << 13) +#define DR6_BS (1 << 14) +#define DR6_FIXED_1 0xffff0ff0 +#define DR6_VOLATILE 0x0000e00f + +#define DR7_BP_EN_MASK 0x000000ff +#define DR7_GE (1 << 9) +#define DR7_GD (1 << 13) +#define DR7_FIXED_1 0x00000400 +#define DR7_VOLATILE 0xffff23ff + /* * We don't want allocation failures within the mmu code, so we preallocate * enough memory for a single page fault in a cache. @@ -334,6 +347,15 @@ struct kvm_vcpu_arch { struct mtrr_state_type mtrr_state; u32 pat; + + int switch_db_regs; + unsigned long host_db[KVM_NR_DB_REGS]; + unsigned long host_dr6; + unsigned long host_dr7; + unsigned long db[KVM_NR_DB_REGS]; + unsigned long dr6; + unsigned long dr7; + unsigned long eff_db[KVM_NR_DB_REGS]; }; struct kvm_mem_alias { diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 32159f034ef..498f944010b 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -312,7 +312,7 @@ enum vmcs_field { #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ #define TYPE_MOV_TO_DR (0 << 4) #define TYPE_MOV_FROM_DR (1 << 4) -#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */ +#define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */ /* segment AR */ diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h index 91673413d8f..ed66e4c078d 100644 --- a/arch/x86/kvm/kvm_svm.h +++ b/arch/x86/kvm/kvm_svm.h @@ -18,7 +18,6 @@ static const u32 host_save_user_msrs[] = { }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) -#define NUM_DB_REGS 4 struct kvm_vcpu; @@ -29,16 +28,11 @@ struct vcpu_svm { struct svm_cpu_data *svm_data; uint64_t asid_generation; - unsigned long db_regs[NUM_DB_REGS]; - u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; u64 host_gs_base; unsigned long host_cr2; - unsigned long host_db_regs[NUM_DB_REGS]; - unsigned long host_dr6; - unsigned long host_dr7; u32 *msrpm; struct vmcb *hsave; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 88d9062f454..815f50e425a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -38,9 +38,6 @@ MODULE_LICENSE("GPL"); #define IOPM_ALLOC_ORDER 2 #define MSRPM_ALLOC_ORDER 1 -#define DR7_GD_MASK (1 << 13) -#define DR6_BD_MASK (1 << 13) - #define SEG_TYPE_LDT 2 #define SEG_TYPE_BUSY_TSS16 3 @@ -181,32 +178,6 @@ static inline void kvm_write_cr2(unsigned long val) asm volatile ("mov %0, %%cr2" :: "r" (val)); } -static inline unsigned long read_dr6(void) -{ - unsigned long dr6; - - asm volatile ("mov %%dr6, %0" : "=r" (dr6)); - return dr6; -} - -static inline void write_dr6(unsigned long val) -{ - asm volatile ("mov %0, %%dr6" :: "r" (val)); -} - -static inline unsigned long read_dr7(void) -{ - unsigned long dr7; - - asm volatile ("mov %%dr7, %0" : "=r" (dr7)); - return dr7; -} - -static inline void write_dr7(unsigned long val) -{ - asm volatile ("mov %0, %%dr7" :: "r" (val)); -} - static inline void force_new_asid(struct kvm_vcpu *vcpu) { to_svm(vcpu)->asid_generation--; @@ -695,7 +666,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; - memset(svm->db_regs, 0, sizeof(svm->db_regs)); init_vmcb(svm); fx_init(&svm->vcpu); @@ -1035,7 +1005,29 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) { - unsigned long val = to_svm(vcpu)->db_regs[dr]; + struct vcpu_svm *svm = to_svm(vcpu); + unsigned long val; + + switch (dr) { + case 0 ... 3: + val = vcpu->arch.db[dr]; + break; + case 6: + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + val = vcpu->arch.dr6; + else + val = svm->vmcb->save.dr6; + break; + case 7: + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + val = vcpu->arch.dr7; + else + val = svm->vmcb->save.dr7; + break; + default: + val = 0; + } + KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); return val; } @@ -1045,33 +1037,40 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, { struct vcpu_svm *svm = to_svm(vcpu); - *exception = 0; + KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler); - if (svm->vmcb->save.dr7 & DR7_GD_MASK) { - svm->vmcb->save.dr7 &= ~DR7_GD_MASK; - svm->vmcb->save.dr6 |= DR6_BD_MASK; - *exception = DB_VECTOR; - return; - } + *exception = 0; switch (dr) { case 0 ... 3: - svm->db_regs[dr] = value; + vcpu->arch.db[dr] = value; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) + vcpu->arch.eff_db[dr] = value; return; case 4 ... 5: - if (vcpu->arch.cr4 & X86_CR4_DE) { + if (vcpu->arch.cr4 & X86_CR4_DE) *exception = UD_VECTOR; + return; + case 6: + if (value & 0xffffffff00000000ULL) { + *exception = GP_VECTOR; return; } - case 7: { - if (value & ~((1ULL << 32) - 1)) { + vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1; + return; + case 7: + if (value & 0xffffffff00000000ULL) { *exception = GP_VECTOR; return; } - svm->vmcb->save.dr7 = value; + vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { + svm->vmcb->save.dr7 = vcpu->arch.dr7; + vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK); + } return; - } default: + /* FIXME: Possible case? */ printk(KERN_DEBUG "%s: unexpected dr %u\n", __func__, dr); *exception = UD_VECTOR; @@ -2365,22 +2364,6 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) return 0; } -static void save_db_regs(unsigned long *db_regs) -{ - asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); - asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1])); - asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2])); - asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3])); -} - -static void load_db_regs(unsigned long *db_regs) -{ - asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0])); - asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1])); - asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2])); - asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); -} - static void svm_flush_tlb(struct kvm_vcpu *vcpu) { force_new_asid(vcpu); @@ -2439,20 +2422,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) gs_selector = kvm_read_gs(); ldt_selector = kvm_read_ldt(); svm->host_cr2 = kvm_read_cr2(); - svm->host_dr6 = read_dr6(); - svm->host_dr7 = read_dr7(); if (!is_nested(svm)) svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ if (npt_enabled) svm->vmcb->save.cr3 = vcpu->arch.cr3; - if (svm->vmcb->save.dr7 & 0xff) { - write_dr7(0); - save_db_regs(svm->host_db_regs); - load_db_regs(svm->db_regs); - } - clgi(); local_irq_enable(); @@ -2528,16 +2503,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) #endif ); - if ((svm->vmcb->save.dr7 & 0xff)) - load_db_regs(svm->host_db_regs); - vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; - write_dr6(svm->host_dr6); - write_dr7(svm->host_dr7); kvm_write_cr2(svm->host_cr2); kvm_load_fs(fs_selector); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c776868ffe4..0989776ee7b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2311,7 +2311,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) kvm_rip_write(vcpu, 0); kvm_register_write(vcpu, VCPU_REGS_RSP, 0); - /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ vmcs_writel(GUEST_DR7, 0x400); vmcs_writel(GUEST_GDTR_BASE, 0); @@ -2577,7 +2576,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info, ex_no, error_code; - unsigned long cr2, rip; + unsigned long cr2, rip, dr6; u32 vect_info; enum emulation_result er; @@ -2637,14 +2636,28 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } ex_no = intr_info & INTR_INFO_VECTOR_MASK; - if (ex_no == DB_VECTOR || ex_no == BP_VECTOR) { + switch (ex_no) { + case DB_VECTOR: + dr6 = vmcs_readl(EXIT_QUALIFICATION); + if (!(vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { + vcpu->arch.dr6 = dr6 | DR6_FIXED_1; + kvm_queue_exception(vcpu, DB_VECTOR); + return 1; + } + kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; + kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); + /* fall through */ + case BP_VECTOR: kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; kvm_run->debug.arch.exception = ex_no; - } else { + break; + default: kvm_run->exit_reason = KVM_EXIT_EXCEPTION; kvm_run->ex.exception = ex_no; kvm_run->ex.error_code = error_code; + break; } return 0; } @@ -2784,21 +2797,44 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) unsigned long val; int dr, reg; - /* - * FIXME: this code assumes the host is debugging the guest. - * need to deal with guest debugging itself too. - */ + dr = vmcs_readl(GUEST_DR7); + if (dr & DR7_GD) { + /* + * As the vm-exit takes precedence over the debug trap, we + * need to emulate the latter, either for the host or the + * guest debugging itself. + */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { + kvm_run->debug.arch.dr6 = vcpu->arch.dr6; + kvm_run->debug.arch.dr7 = dr; + kvm_run->debug.arch.pc = + vmcs_readl(GUEST_CS_BASE) + + vmcs_readl(GUEST_RIP); + kvm_run->debug.arch.exception = DB_VECTOR; + kvm_run->exit_reason = KVM_EXIT_DEBUG; + return 0; + } else { + vcpu->arch.dr7 &= ~DR7_GD; + vcpu->arch.dr6 |= DR6_BD; + vmcs_writel(GUEST_DR7, vcpu->arch.dr7); + kvm_queue_exception(vcpu, DB_VECTOR); + return 1; + } + } + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - dr = exit_qualification & 7; - reg = (exit_qualification >> 8) & 15; - if (exit_qualification & 16) { - /* mov from dr */ + dr = exit_qualification & DEBUG_REG_ACCESS_NUM; + reg = DEBUG_REG_ACCESS_REG(exit_qualification); + if (exit_qualification & TYPE_MOV_FROM_DR) { switch (dr) { + case 0 ... 3: + val = vcpu->arch.db[dr]; + break; case 6: - val = 0xffff0ff0; + val = vcpu->arch.dr6; break; case 7: - val = 0x400; + val = vcpu->arch.dr7; break; default: val = 0; @@ -2806,7 +2842,38 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_register_write(vcpu, reg, val); KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); } else { - /* mov to dr */ + val = vcpu->arch.regs[reg]; + switch (dr) { + case 0 ... 3: + vcpu->arch.db[dr] = val; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) + vcpu->arch.eff_db[dr] = val; + break; + case 4 ... 5: + if (vcpu->arch.cr4 & X86_CR4_DE) + kvm_queue_exception(vcpu, UD_VECTOR); + break; + case 6: + if (val & 0xffffffff00000000ULL) { + kvm_queue_exception(vcpu, GP_VECTOR); + break; + } + vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; + break; + case 7: + if (val & 0xffffffff00000000ULL) { + kvm_queue_exception(vcpu, GP_VECTOR); + break; + } + vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { + vmcs_writel(GUEST_DR7, vcpu->arch.dr7); + vcpu->arch.switch_db_regs = + (val & DR7_BP_EN_MASK); + } + break; + } + KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler); } skip_emulated_instruction(vcpu); return 1; @@ -2957,7 +3024,18 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } tss_selector = exit_qualification; - return kvm_task_switch(vcpu, tss_selector, reason); + if (!kvm_task_switch(vcpu, tss_selector, reason)) + return 0; + + /* clear all local breakpoint enable flags */ + vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55); + + /* + * TODO: What about debug traps on tss switch? + * Are we supposed to inject them and update dr6? + */ + + return 1; } static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) @@ -3342,6 +3420,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) */ vmcs_writel(HOST_CR0, read_cr0()); + set_debugreg(vcpu->arch.dr6, 6); + asm( /* Store host registers */ "push %%"R"dx; push %%"R"bp;" @@ -3436,6 +3516,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); vcpu->arch.regs_dirty = 0; + get_debugreg(vcpu->arch.dr6, 6); + vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); if (vmx->rmode.irq.pending) fixup_rmode_irq(vmx); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e990d164b56..300bc4d42ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3025,10 +3025,34 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_guest_enter(); + get_debugreg(vcpu->arch.host_dr6, 6); + get_debugreg(vcpu->arch.host_dr7, 7); + if (unlikely(vcpu->arch.switch_db_regs)) { + get_debugreg(vcpu->arch.host_db[0], 0); + get_debugreg(vcpu->arch.host_db[1], 1); + get_debugreg(vcpu->arch.host_db[2], 2); + get_debugreg(vcpu->arch.host_db[3], 3); + + set_debugreg(0, 7); + set_debugreg(vcpu->arch.eff_db[0], 0); + set_debugreg(vcpu->arch.eff_db[1], 1); + set_debugreg(vcpu->arch.eff_db[2], 2); + set_debugreg(vcpu->arch.eff_db[3], 3); + } KVMTRACE_0D(VMENTRY, vcpu, entryexit); kvm_x86_ops->run(vcpu, kvm_run); + if (unlikely(vcpu->arch.switch_db_regs)) { + set_debugreg(0, 7); + set_debugreg(vcpu->arch.host_db[0], 0); + set_debugreg(vcpu->arch.host_db[1], 1); + set_debugreg(vcpu->arch.host_db[2], 2); + set_debugreg(vcpu->arch.host_db[3], 3); + } + set_debugreg(vcpu->arch.host_dr6, 6); + set_debugreg(vcpu->arch.host_dr7, 7); + vcpu->guest_mode = 0; local_irq_enable(); @@ -4035,6 +4059,11 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = false; + vcpu->arch.switch_db_regs = 0; + memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); + vcpu->arch.dr6 = DR6_FIXED_1; + vcpu->arch.dr7 = DR7_FIXED_1; + return kvm_x86_ops->vcpu_reset(vcpu); } -- cgit v1.2.3 From ae675ef01cd86014acf8da5dee87876b71122495 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: x86: Wire-up hardware breakpoints for guest debugging Add the remaining bits to make use of debug registers also for guest debugging, thus enabling the use of hardware breakpoints and watchpoints. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 5 +++++ arch/x86/kvm/vmx.c | 5 +++++ arch/x86/kvm/x86.c | 14 +++++++++++++- 3 files changed, 23 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 815f50e425a..b3a7a314d55 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -958,6 +958,11 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) } else vcpu->guest_debug = 0; + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + svm->vmcb->save.dr7 = dbg->arch.debugreg[7]; + else + svm->vmcb->save.dr7 = vcpu->arch.dr7; + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; else if (old_debug & KVM_GUESTDBG_SINGLESTEP) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0989776ee7b..cee81c9a665 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1017,6 +1017,11 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]); + else + vmcs_writel(GUEST_DR7, vcpu->arch.dr7); + flags = vmcs_readl(GUEST_RFLAGS); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 300bc4d42ab..2477e87b2f8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3861,10 +3861,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - int r; + int i, r; vcpu_load(vcpu); + if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) == + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) { + for (i = 0; i < KVM_NR_DB_REGS; ++i) + vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; + vcpu->arch.switch_db_regs = + (dbg->arch.debugreg[7] & DR7_BP_EN_MASK); + } else { + for (i = 0; i < KVM_NR_DB_REGS; i++) + vcpu->arch.eff_db[i] = vcpu->arch.db[i]; + vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); + } + r = kvm_x86_ops->set_guest_debug(vcpu, dbg); if (dbg->control & KVM_GUESTDBG_INJECT_DB) -- cgit v1.2.3 From e9a999fe1feaddb71bffbacbbd68e0da8ca8b50b Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 18 Dec 2008 12:17:51 +0100 Subject: KVM: ia64: stack get/restore patch Implement KVM_IA64_VCPU_[GS]ET_STACK ioctl calls. This is required for live migrations. Patch is based on previous implementation that was part of old GET/SET_REGS ioctl calls. Signed-off-by: Jes Sorensen Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm.h | 7 +++ arch/ia64/include/asm/kvm_host.h | 6 ++- arch/ia64/kvm/kvm-ia64.c | 92 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 101 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index be3fdb89121..b5145784233 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -214,6 +214,13 @@ struct kvm_sregs { struct kvm_fpu { }; +#define KVM_IA64_VCPU_STACK_SHIFT 16 +#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT) + +struct kvm_ia64_vcpu_stack { + unsigned char stack[KVM_IA64_VCPU_STACK_SIZE]; +}; + struct kvm_debug_exit_arch { }; diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 34866366165..7da0c096322 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -112,7 +112,11 @@ #define VCPU_STRUCT_SHIFT 16 #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) -#define KVM_STK_OFFSET VCPU_STRUCT_SIZE +/* + * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h + */ +#define KVM_STK_SHIFT 16 +#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT) #define KVM_VM_STRUCT_SHIFT 19 #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index de47467a0e6..1477f91617a 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1421,6 +1421,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) return 0; } +int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, + struct kvm_ia64_vcpu_stack *stack) +{ + memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); + return 0; +} + +int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, + struct kvm_ia64_vcpu_stack *stack) +{ + memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), + sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); + + vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; + return 0; +} + void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { @@ -1430,9 +1447,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) long kvm_arch_vcpu_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) + unsigned int ioctl, unsigned long arg) { - return -EINVAL; + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + struct kvm_ia64_vcpu_stack *stack = NULL; + long r; + + switch (ioctl) { + case KVM_IA64_VCPU_GET_STACK: { + struct kvm_ia64_vcpu_stack __user *user_stack; + void __user *first_p = argp; + + r = -EFAULT; + if (copy_from_user(&user_stack, first_p, sizeof(void *))) + goto out; + + if (!access_ok(VERIFY_WRITE, user_stack, + sizeof(struct kvm_ia64_vcpu_stack))) { + printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " + "Illegal user destination address for stack\n"); + goto out; + } + stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); + if (!stack) { + r = -ENOMEM; + goto out; + } + + r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); + if (r) + goto out; + + if (copy_to_user(user_stack, stack, + sizeof(struct kvm_ia64_vcpu_stack))) + goto out; + + break; + } + case KVM_IA64_VCPU_SET_STACK: { + struct kvm_ia64_vcpu_stack __user *user_stack; + void __user *first_p = argp; + + r = -EFAULT; + if (copy_from_user(&user_stack, first_p, sizeof(void *))) + goto out; + + if (!access_ok(VERIFY_READ, user_stack, + sizeof(struct kvm_ia64_vcpu_stack))) { + printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " + "Illegal user address for stack\n"); + goto out; + } + stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); + if (!stack) { + r = -ENOMEM; + goto out; + } + if (copy_from_user(stack, user_stack, + sizeof(struct kvm_ia64_vcpu_stack))) + goto out; + + r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); + break; + } + + default: + r = -EINVAL; + } + +out: + kfree(stack); + return r; } int kvm_arch_set_memory_region(struct kvm *kvm, @@ -1472,7 +1558,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm) } long kvm_arch_dev_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) + unsigned int ioctl, unsigned long arg) { return -EINVAL; } -- cgit v1.2.3 From 989c0f0ed56468a4aa48711cef5acf122a40d1dd Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Thu, 18 Dec 2008 12:33:18 +0100 Subject: KVM: Remove old kvm_guest_debug structs Remove the remaining arch fragments of the old guest debug interface that now break non-x86 builds. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm_host.h | 2 -- arch/powerpc/include/asm/kvm_host.h | 6 ------ arch/s390/include/asm/kvm_host.h | 3 --- 3 files changed, 11 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 7da0c096322..46f8b0eb568 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -239,8 +239,6 @@ struct kvm_vm_data { struct kvm; struct kvm_vcpu; -struct kvm_guest_debug{ -}; struct kvm_mmio_req { uint64_t addr; /* physical address */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index c1e436fe773..a22600537a8 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -200,10 +200,4 @@ struct kvm_vcpu_arch { unsigned long pending_exceptions; }; -struct kvm_guest_debug { - int enabled; - unsigned long bp[4]; - int singlestep; -}; - #endif /* __POWERPC_KVM_HOST_H__ */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3c55e4107dc..c6e674f5fca 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -21,9 +21,6 @@ /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 4 -struct kvm_guest_debug { -}; - struct sca_entry { atomic_t scn; __u64 reserved; -- cgit v1.2.3 From 22ccb14203d59a8bcf6f3fea76b3594d710569fa Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Thu, 18 Dec 2008 10:23:58 +0800 Subject: KVM: ia64: Code cleanup Remove some unnecessary blank lines to accord with Kernel's coding style. Also remove vcpu_get_itir_on_fault due to no reference to it. Signed-off-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/process.c | 15 --------------- arch/ia64/kvm/vcpu.c | 39 ++------------------------------------- arch/ia64/kvm/vtlb.c | 5 ----- 3 files changed, 2 insertions(+), 57 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 230eae482f3..f9c9504144f 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c @@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) return (rr1.val); } - /* * Set vIFA & vITIR & vIHA, when vPSR.ic =1 * Parameter: @@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); } - - /* * Data Nested TLB Fault * @ Data Nested TLB Vector @@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); } - /* * Data TLB Fault * @ Data TLB vector @@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) /* If vPSR.ic, IFA, ITIR, IHA*/ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); - - } /* @@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) _vhpt_fault(vcpu, vadr); } - /* * VHPT Data Fault * @ VHPT Translation vector @@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) _vhpt_fault(vcpu, vadr); } - - /* * Deal with: * General Exception vector @@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu) inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); } - /* * Illegal Operation Fault * @ General Exception Vector @@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); } - void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) { __page_not_present(vcpu, vadr); } - void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) { __page_not_present(vcpu, vadr); } - /* Deal with * Data access rights vector */ @@ -703,7 +690,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu) } } - void leave_hypervisor_tail(void) { struct kvm_vcpu *v = current_vcpu; @@ -737,7 +723,6 @@ void leave_hypervisor_tail(void) } } - static inline void handle_lds(struct kvm_pt_regs *regs) { regs->cr_ipsr |= IA64_PSR_ED; diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index ecd526b5532..4d8be4c252f 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c @@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu) return; } - void switch_to_virtual_rid(struct kvm_vcpu *vcpu) { unsigned long psr; @@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, return; } - - /* * In physical mode, insert tc/tr for region 0 and 4 uses * RID[0] and RID[4] which is for physical mode emulation. @@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs, return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); } - /* * The inverse of the above: given bspstore and the number of * registers, calculate ar.bsp. @@ -1039,8 +1035,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) return key; } - - void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long thash, vadr; @@ -1050,7 +1044,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); } - void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long tag, vadr; @@ -1131,7 +1124,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) return IA64_NO_FAULT; } - int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r1, r3; @@ -1154,7 +1146,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); } - /************************************ * Insert/Purge translation register/cache ************************************/ @@ -1385,7 +1376,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_itc(vcpu, r2); } - void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r1; @@ -1393,8 +1383,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) r1 = vcpu_get_itc(vcpu); vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); } + /************************************************************************** - struct kvm_vcpu*protection key register access routines + struct kvm_vcpu protection key register access routines **************************************************************************/ unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) @@ -1407,20 +1398,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) ia64_set_pkr(reg, val); } - -unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa) -{ - union ia64_rr rr, rr1; - - rr.val = vcpu_get_rr(vcpu, ifa); - rr1.val = 0; - rr1.ps = rr.ps; - rr1.rid = rr.rid; - return (rr1.val); -} - - - /******************************** * Moves to privileged registers ********************************/ @@ -1464,8 +1441,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, return (IA64_NO_FAULT); } - - void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r3, r2; @@ -1510,8 +1485,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_pkr(vcpu, r3, r2); } - - void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r3, r1; @@ -1557,7 +1530,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); } - unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) { /* FIXME: This could get called as a result of a rsvd-reg fault */ @@ -1609,7 +1581,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) return 0; } - unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long tgt = inst.M33.r1; @@ -1633,8 +1604,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) return 0; } - - void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) { @@ -1776,9 +1745,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu) } } - - - void vcpu_rfi(struct kvm_vcpu *vcpu) { unsigned long ifs, psr; @@ -1796,7 +1762,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu) regs->cr_iip = VCPU(vcpu, iip); } - /* VPSR can't keep track of below bits of guest PSR This function gets guest PSR diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 6b6307a3bd5..ac94867f826 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -509,7 +509,6 @@ void thash_purge_all(struct kvm_vcpu *v) local_flush_tlb_all(); } - /* * Lookup the hash table and its collision chain to find an entry * covering this address rid:va or the entry. @@ -517,7 +516,6 @@ void thash_purge_all(struct kvm_vcpu *v) * INPUT: * in: TLB format for both VHPT & TLB. */ - struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) { struct thash_data *cch; @@ -547,7 +545,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) return NULL; } - /* * Initialize internal control data before service. */ @@ -589,7 +586,6 @@ u64 kvm_gpa_to_mpa(u64 gpa) return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); } - /* * Fetch guest bundle code. * INPUT: @@ -631,7 +627,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) return IA64_NO_FAULT; } - void kvm_init_vhpt(struct kvm_vcpu *v) { v->arch.vhpt.num = VHPT_NUM_ENTRIES; -- cgit v1.2.3 From a770f6f28b1a9287189f3dc8333eb694d9a2f0ab Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 21 Dec 2008 19:20:09 +0200 Subject: KVM: MMU: Inherit a shadow page's guest level count from vcpu setup Instead of "calculating" it on every shadow page allocation, set it once when switching modes, and copy it when allocating pages. This doesn't buy us much, but sets up the stage for inheriting more information related to the mmu setup. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu.c | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0a4dab25a91..28f875f28f5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -244,6 +244,7 @@ struct kvm_mmu { hpa_t root_hpa; int root_level; int shadow_root_level; + union kvm_mmu_page_role base_role; u64 *pae_root; }; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2d4477c7147..f15023c11fe 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1204,8 +1204,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp; struct hlist_node *node, *tmp; - role.word = 0; - role.glevels = vcpu->arch.mmu.root_level; + role = vcpu->arch.mmu.base_role; role.level = level; role.metaphysical = metaphysical; role.access = access; @@ -2251,17 +2250,23 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) static int init_kvm_softmmu(struct kvm_vcpu *vcpu) { + int r; + ASSERT(vcpu); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); if (!is_paging(vcpu)) - return nonpaging_init_context(vcpu); + r = nonpaging_init_context(vcpu); else if (is_long_mode(vcpu)) - return paging64_init_context(vcpu); + r = paging64_init_context(vcpu); else if (is_pae(vcpu)) - return paging32E_init_context(vcpu); + r = paging32E_init_context(vcpu); else - return paging32_init_context(vcpu); + r = paging32_init_context(vcpu); + + vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level; + + return r; } static int init_kvm_mmu(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 2f0b3d60b2c43aef7cd10169c425c052169c622a Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 21 Dec 2008 19:27:36 +0200 Subject: KVM: MMU: Segregate mmu pages created with different cr4.pge settings Don't allow a vcpu with cr4.pge cleared to use a shadow page created with cr4.pge set; this might cause a cr3 switch not to sync ptes that have the global bit set (the global bit has no effect if !cr4.pge). This can only occur on smp with different cr4.pge settings for different vcpus (since a cr4 change will resync the shadow ptes), but there's no cost to being correct here. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 1 + 2 files changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 28f875f28f5..c2a01d0513f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -183,6 +183,7 @@ union kvm_mmu_page_role { unsigned metaphysical:1; unsigned access:3; unsigned invalid:1; + unsigned cr4_pge:1; }; }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2477e87b2f8..873602b5edf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -364,6 +364,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) } kvm_x86_ops->set_cr4(vcpu, cr4); vcpu->arch.cr4 = cr4; + vcpu->arch.mmu.base_role.cr4_pge = !!(cr4 & X86_CR4_PGE); kvm_mmu_sync_global(vcpu); kvm_mmu_reset_context(vcpu); } -- cgit v1.2.3 From e2078318042569682d0496cfd7bd962a766e82b1 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 21 Dec 2008 19:36:59 +0200 Subject: KVM: MMU: Initialize a shadow page's global attribute from cr4.pge If cr4.pge is cleared, we ought to treat any ptes in the page as non-global. This allows us to remove the check from set_spte(). Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f15023c11fe..5f03ec324e3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -797,7 +797,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, ASSERT(is_empty_shadow_page(sp->spt)); bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); sp->multimapped = 0; - sp->global = 1; sp->parent_pte = parent_pte; --vcpu->kvm->arch.n_free_mmu_pages; return sp; @@ -1241,6 +1240,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); sp->gfn = gfn; sp->role = role; + sp->global = role.cr4_pge; hlist_add_head(&sp->hash_link, bucket); if (!metaphysical) { if (rmap_write_protect(vcpu->kvm, gfn)) @@ -1668,8 +1668,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, u64 mt_mask = shadow_mt_mask; struct kvm_mmu_page *sp = page_header(__pa(shadow_pte)); - if (!(vcpu->arch.cr4 & X86_CR4_PGE)) - global = 0; if (!global && sp->global) { sp->global = 0; if (sp->unsync) { -- cgit v1.2.3 From 2b3d2a206037b1471de6a6dc51427af034cfdb47 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 23 Dec 2008 19:46:01 +0200 Subject: KVM: Fix vmload and friends misinterpreted as lidt The AMD SVM instruction family all overload the 0f 01 /3 opcode, further multiplexing on the three r/m bits. But the code decided that anything that isn't a vmmcall must be an lidt (which shares the 0f 01 /3 opcode, for the case that mod = 3). Fix by aborting emulation if this isn't a vmmcall. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index d174db7a337..54fb09889a8 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1908,11 +1908,16 @@ twobyte_insn: c->dst.type = OP_NONE; break; case 3: /* lidt/vmmcall */ - if (c->modrm_mod == 3 && c->modrm_rm == 1) { - rc = kvm_fix_hypercall(ctxt->vcpu); - if (rc) - goto done; - kvm_emulate_hypercall(ctxt->vcpu); + if (c->modrm_mod == 3) { + switch (c->modrm_rm) { + case 1: + rc = kvm_fix_hypercall(ctxt->vcpu); + if (rc) + goto done; + break; + default: + goto cannot_emulate; + } } else { rc = read_descriptor(ctxt, ops, c->src.ptr, &size, &address, -- cgit v1.2.3 From 2d11123a77e54d5cea262c958e8498f4a08bce3d Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 14:39:47 +0200 Subject: KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow() Using a for_each loop style removes the need to write callback and nasty casts. Implement the walk_shadow() using the for_each_shadow_entry(). Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 69 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 49 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5f03ec324e3..c669f2af1d1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -150,6 +150,20 @@ struct kvm_shadow_walk { u64 addr, u64 *spte, int level); }; +struct kvm_shadow_walk_iterator { + u64 addr; + hpa_t shadow_addr; + int level; + u64 *sptep; + unsigned index; +}; + +#define for_each_shadow_entry(_vcpu, _addr, _walker) \ + for (shadow_walk_init(&(_walker), _vcpu, _addr); \ + shadow_walk_okay(&(_walker)); \ + shadow_walk_next(&(_walker))) + + struct kvm_unsync_walk { int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); }; @@ -1254,33 +1268,48 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, return sp; } +static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, + struct kvm_vcpu *vcpu, u64 addr) +{ + iterator->addr = addr; + iterator->shadow_addr = vcpu->arch.mmu.root_hpa; + iterator->level = vcpu->arch.mmu.shadow_root_level; + if (iterator->level == PT32E_ROOT_LEVEL) { + iterator->shadow_addr + = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; + iterator->shadow_addr &= PT64_BASE_ADDR_MASK; + --iterator->level; + if (!iterator->shadow_addr) + iterator->level = 0; + } +} + +static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) +{ + if (iterator->level < PT_PAGE_TABLE_LEVEL) + return false; + iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); + iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; + return true; +} + +static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) +{ + iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK; + --iterator->level; +} + static int walk_shadow(struct kvm_shadow_walk *walker, struct kvm_vcpu *vcpu, u64 addr) { - hpa_t shadow_addr; - int level; + struct kvm_shadow_walk_iterator iterator; int r; - u64 *sptep; - unsigned index; - - shadow_addr = vcpu->arch.mmu.root_hpa; - level = vcpu->arch.mmu.shadow_root_level; - if (level == PT32E_ROOT_LEVEL) { - shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; - shadow_addr &= PT64_BASE_ADDR_MASK; - if (!shadow_addr) - return 1; - --level; - } - while (level >= PT_PAGE_TABLE_LEVEL) { - index = SHADOW_PT_INDEX(addr, level); - sptep = ((u64 *)__va(shadow_addr)) + index; - r = walker->entry(walker, vcpu, addr, sptep, level); + for_each_shadow_entry(vcpu, addr, iterator) { + r = walker->entry(walker, vcpu, addr, + iterator.sptep, iterator.level); if (r) return r; - shadow_addr = *sptep & PT64_BASE_ADDR_MASK; - --level; } return 0; } -- cgit v1.2.3 From 9f652d21c3f887075a33abae85bf53fec64e67b1 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 14:54:25 +0200 Subject: KVM: MMU: Use for_each_shadow_entry() in __direct_map() Eliminating a callback and a useless structure. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 83 +++++++++++++++++++----------------------------------- 1 file changed, 29 insertions(+), 54 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c669f2af1d1..a25e1adb5cf 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1846,67 +1846,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) { } -struct direct_shadow_walk { - struct kvm_shadow_walk walker; - pfn_t pfn; - int write; - int largepage; - int pt_write; -}; - -static int direct_map_entry(struct kvm_shadow_walk *_walk, - struct kvm_vcpu *vcpu, - u64 addr, u64 *sptep, int level) +static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, + int largepage, gfn_t gfn, pfn_t pfn) { - struct direct_shadow_walk *walk = - container_of(_walk, struct direct_shadow_walk, walker); + struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; + int pt_write = 0; gfn_t pseudo_gfn; - gfn_t gfn = addr >> PAGE_SHIFT; - - if (level == PT_PAGE_TABLE_LEVEL - || (walk->largepage && level == PT_DIRECTORY_LEVEL)) { - mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, - 0, walk->write, 1, &walk->pt_write, - walk->largepage, 0, gfn, walk->pfn, false); - ++vcpu->stat.pf_fixed; - return 1; - } - if (*sptep == shadow_trap_nonpresent_pte) { - pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; - sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1, - 1, ACC_ALL, sptep); - if (!sp) { - pgprintk("nonpaging_map: ENOMEM\n"); - kvm_release_pfn_clean(walk->pfn); - return -ENOMEM; + for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { + if (iterator.level == PT_PAGE_TABLE_LEVEL + || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) { + mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, + 0, write, 1, &pt_write, + largepage, 0, gfn, pfn, false); + ++vcpu->stat.pf_fixed; + break; } - set_shadow_pte(sptep, - __pa(sp->spt) - | PT_PRESENT_MASK | PT_WRITABLE_MASK - | shadow_user_mask | shadow_x_mask); - } - return 0; -} + if (*iterator.sptep == shadow_trap_nonpresent_pte) { + pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; + sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, + iterator.level - 1, + 1, ACC_ALL, iterator.sptep); + if (!sp) { + pgprintk("nonpaging_map: ENOMEM\n"); + kvm_release_pfn_clean(pfn); + return -ENOMEM; + } -static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, - int largepage, gfn_t gfn, pfn_t pfn) -{ - int r; - struct direct_shadow_walk walker = { - .walker = { .entry = direct_map_entry, }, - .pfn = pfn, - .largepage = largepage, - .write = write, - .pt_write = 0, - }; - - r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT); - if (r < 0) - return r; - return walker.pt_write; + set_shadow_pte(iterator.sptep, + __pa(sp->spt) + | PT_PRESENT_MASK | PT_WRITABLE_MASK + | shadow_user_mask | shadow_x_mask); + } + } + return pt_write; } static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) -- cgit v1.2.3 From e7a04c99b54ad9acb98a56113ec3163bc1039e13 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 15:10:50 +0200 Subject: KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch() Effectively reverting to the pre walk_shadow() version -- but now with the reusable for_each(). Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 128 ++++++++++++++++++++------------------------- 1 file changed, 58 insertions(+), 70 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 9fd78b6e17a..69c7e3311b8 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -283,91 +283,79 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, /* * Fetch a shadow pte for a specific level in the paging hierarchy. */ -static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, - struct kvm_vcpu *vcpu, u64 addr, - u64 *sptep, int level) +static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, + struct guest_walker *gw, + int user_fault, int write_fault, int largepage, + int *ptwrite, pfn_t pfn) { - struct shadow_walker *sw = - container_of(_sw, struct shadow_walker, walker); - struct guest_walker *gw = sw->guest_walker; unsigned access = gw->pt_access; struct kvm_mmu_page *shadow_page; - u64 spte; + u64 spte, *sptep; int metaphysical; gfn_t table_gfn; int r; + int level; pt_element_t curr_pte; + struct kvm_shadow_walk_iterator iterator; - if (level == PT_PAGE_TABLE_LEVEL - || (sw->largepage && level == PT_DIRECTORY_LEVEL)) { - mmu_set_spte(vcpu, sptep, access, gw->pte_access & access, - sw->user_fault, sw->write_fault, - gw->ptes[gw->level-1] & PT_DIRTY_MASK, - sw->ptwrite, sw->largepage, - gw->ptes[gw->level-1] & PT_GLOBAL_MASK, - gw->gfn, sw->pfn, false); - sw->sptep = sptep; - return 1; - } - - if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) - return 0; - - if (is_large_pte(*sptep)) { - set_shadow_pte(sptep, shadow_trap_nonpresent_pte); - kvm_flush_remote_tlbs(vcpu->kvm); - rmap_remove(vcpu->kvm, sptep); - } + if (!is_present_pte(gw->ptes[gw->level - 1])) + return NULL; - if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) { - metaphysical = 1; - if (!is_dirty_pte(gw->ptes[level - 1])) - access &= ~ACC_WRITE_MASK; - table_gfn = gpte_to_gfn(gw->ptes[level - 1]); - } else { - metaphysical = 0; - table_gfn = gw->table_gfn[level - 2]; - } - shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1, - metaphysical, access, sptep); - if (!metaphysical) { - r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], - &curr_pte, sizeof(curr_pte)); - if (r || curr_pte != gw->ptes[level - 2]) { - kvm_mmu_put_page(shadow_page, sptep); - kvm_release_pfn_clean(sw->pfn); - sw->sptep = NULL; - return 1; + for_each_shadow_entry(vcpu, addr, iterator) { + level = iterator.level; + sptep = iterator.sptep; + if (level == PT_PAGE_TABLE_LEVEL + || (largepage && level == PT_DIRECTORY_LEVEL)) { + mmu_set_spte(vcpu, sptep, access, + gw->pte_access & access, + user_fault, write_fault, + gw->ptes[gw->level-1] & PT_DIRTY_MASK, + ptwrite, largepage, + gw->ptes[gw->level-1] & PT_GLOBAL_MASK, + gw->gfn, pfn, false); + break; } - } - spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK - | PT_WRITABLE_MASK | PT_USER_MASK; - *sptep = spte; - return 0; -} + if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) + continue; -static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, - struct guest_walker *guest_walker, - int user_fault, int write_fault, int largepage, - int *ptwrite, pfn_t pfn) -{ - struct shadow_walker walker = { - .walker = { .entry = FNAME(shadow_walk_entry), }, - .guest_walker = guest_walker, - .user_fault = user_fault, - .write_fault = write_fault, - .largepage = largepage, - .ptwrite = ptwrite, - .pfn = pfn, - }; + if (is_large_pte(*sptep)) { + set_shadow_pte(sptep, shadow_trap_nonpresent_pte); + kvm_flush_remote_tlbs(vcpu->kvm); + rmap_remove(vcpu->kvm, sptep); + } - if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1])) - return NULL; + if (level == PT_DIRECTORY_LEVEL + && gw->level == PT_DIRECTORY_LEVEL) { + metaphysical = 1; + if (!is_dirty_pte(gw->ptes[level - 1])) + access &= ~ACC_WRITE_MASK; + table_gfn = gpte_to_gfn(gw->ptes[level - 1]); + } else { + metaphysical = 0; + table_gfn = gw->table_gfn[level - 2]; + } + shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, + metaphysical, access, sptep); + if (!metaphysical) { + r = kvm_read_guest_atomic(vcpu->kvm, + gw->pte_gpa[level - 2], + &curr_pte, sizeof(curr_pte)); + if (r || curr_pte != gw->ptes[level - 2]) { + kvm_mmu_put_page(shadow_page, sptep); + kvm_release_pfn_clean(pfn); + sptep = NULL; + break; + } + } - walk_shadow(&walker.walker, vcpu, addr); + spte = __pa(shadow_page->spt) + | PT_PRESENT_MASK | PT_ACCESSED_MASK + | PT_WRITABLE_MASK | PT_USER_MASK; + *sptep = spte; + } - return walker.sptep; + return sptep; } /* -- cgit v1.2.3 From a461930bc3cece021f8f89a80dcc1d0691a92b52 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 15:19:00 +0200 Subject: KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in invlpg() Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 81 ++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 49 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 69c7e3311b8..46b68f941f6 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -25,7 +25,6 @@ #if PTTYPE == 64 #define pt_element_t u64 #define guest_walker guest_walker64 - #define shadow_walker shadow_walker64 #define FNAME(name) paging##64_##name #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK @@ -42,7 +41,6 @@ #elif PTTYPE == 32 #define pt_element_t u32 #define guest_walker guest_walker32 - #define shadow_walker shadow_walker32 #define FNAME(name) paging##32_##name #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK @@ -73,18 +71,6 @@ struct guest_walker { u32 error_code; }; -struct shadow_walker { - struct kvm_shadow_walk walker; - struct guest_walker *guest_walker; - int user_fault; - int write_fault; - int largepage; - int *ptwrite; - pfn_t pfn; - u64 *sptep; - gpa_t pte_gpa; -}; - static gfn_t gpte_to_gfn(pt_element_t gpte) { return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; @@ -453,54 +439,52 @@ out_unlock: return 0; } -static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, - struct kvm_vcpu *vcpu, u64 addr, - u64 *sptep, int level) +static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) { - struct shadow_walker *sw = - container_of(_sw, struct shadow_walker, walker); + struct kvm_shadow_walk_iterator iterator; + pt_element_t gpte; + gpa_t pte_gpa = -1; + int level; + u64 *sptep; + + spin_lock(&vcpu->kvm->mmu_lock); - /* FIXME: properly handle invlpg on large guest pages */ - if (level == PT_PAGE_TABLE_LEVEL || - ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { - struct kvm_mmu_page *sp = page_header(__pa(sptep)); + for_each_shadow_entry(vcpu, gva, iterator) { + level = iterator.level; + sptep = iterator.sptep; - sw->pte_gpa = (sp->gfn << PAGE_SHIFT); - sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + /* FIXME: properly handle invlpg on large guest pages */ + if (level == PT_PAGE_TABLE_LEVEL || + ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { + struct kvm_mmu_page *sp = page_header(__pa(sptep)); - if (is_shadow_present_pte(*sptep)) { - rmap_remove(vcpu->kvm, sptep); - if (is_large_pte(*sptep)) - --vcpu->kvm->stat.lpages; + pte_gpa = (sp->gfn << PAGE_SHIFT); + pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + + if (is_shadow_present_pte(*sptep)) { + rmap_remove(vcpu->kvm, sptep); + if (is_large_pte(*sptep)) + --vcpu->kvm->stat.lpages; + } + set_shadow_pte(sptep, shadow_trap_nonpresent_pte); + break; } - set_shadow_pte(sptep, shadow_trap_nonpresent_pte); - return 1; - } - if (!is_shadow_present_pte(*sptep)) - return 1; - return 0; -} -static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) -{ - pt_element_t gpte; - struct shadow_walker walker = { - .walker = { .entry = FNAME(shadow_invlpg_entry), }, - .pte_gpa = -1, - }; + if (!is_shadow_present_pte(*sptep)) + break; + } - spin_lock(&vcpu->kvm->mmu_lock); - walk_shadow(&walker.walker, vcpu, gva); spin_unlock(&vcpu->kvm->mmu_lock); - if (walker.pte_gpa == -1) + + if (pte_gpa == -1) return; - if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte, + if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, sizeof(pt_element_t))) return; if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { if (mmu_topup_memory_caches(vcpu)) return; - kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte, + kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, sizeof(pt_element_t), 0); } } @@ -607,7 +591,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) #undef pt_element_t #undef guest_walker -#undef shadow_walker #undef FNAME #undef PT_BASE_ADDR_MASK #undef PT_INDEX -- cgit v1.2.3 From e8c4a4e8a7cc047dfb3c26b2cbc8599ad3460364 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 15:20:07 +0200 Subject: KVM: MMU: Drop walk_shadow() No longer used. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a25e1adb5cf..15850809b55 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -145,11 +145,6 @@ struct kvm_rmap_desc { struct kvm_rmap_desc *more; }; -struct kvm_shadow_walk { - int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, - u64 addr, u64 *spte, int level); -}; - struct kvm_shadow_walk_iterator { u64 addr; hpa_t shadow_addr; @@ -1299,21 +1294,6 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) --iterator->level; } -static int walk_shadow(struct kvm_shadow_walk *walker, - struct kvm_vcpu *vcpu, u64 addr) -{ - struct kvm_shadow_walk_iterator iterator; - int r; - - for_each_shadow_entry(vcpu, addr, iterator) { - r = walker->entry(walker, vcpu, addr, - iterator.sptep, iterator.level); - if (r) - return r; - } - return 0; -} - static void kvm_mmu_page_unlink_children(struct kvm *kvm, struct kvm_mmu_page *sp) { -- cgit v1.2.3 From 53f658b3c33616a4997ee254311b335e59063289 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Thu, 11 Dec 2008 20:45:05 +0100 Subject: KVM: VMX: initialize TSC offset relative to vm creation time VMX initializes the TSC offset for each vcpu at different times, and also reinitializes it for vcpus other than 0 on APIC SIPI message. This bug causes the TSC's to appear unsynchronized in the guest, even if the host is good. Older Linux kernels don't handle the situation very well, so gettimeofday is likely to go backwards in time: http://www.mail-archive.com/kvm@vger.kernel.org/msg02955.html http://sourceforge.net/tracker/index.php?func=detail&aid=2025534&group_id=180599&atid=893831 Fix it by initializating the offset of each vcpu relative to vm creation time, and moving it from vmx_vcpu_reset to vmx_vcpu_setup, out of the APIC MP init path. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx.c | 19 +++++++++++-------- arch/x86/kvm/x86.c | 2 ++ 3 files changed, 14 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c2a01d0513f..9efc446b5ac 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -398,6 +398,7 @@ struct kvm_arch{ unsigned long irq_sources_bitmap; unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; + u64 vm_init_tsc; }; struct kvm_vm_stat { diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cee81c9a665..3312047664a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -865,11 +865,8 @@ static u64 guest_read_tsc(void) * writes 'guest_tsc' into guest's timestamp counter "register" * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc */ -static void guest_write_tsc(u64 guest_tsc) +static void guest_write_tsc(u64 guest_tsc, u64 host_tsc) { - u64 host_tsc; - - rdtscll(host_tsc); vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); } @@ -934,6 +931,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_msr_entry *msr; + u64 host_tsc; int ret = 0; switch (msr_index) { @@ -959,7 +957,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_TIME_STAMP_COUNTER: - guest_write_tsc(data); + rdtscll(host_tsc); + guest_write_tsc(data, host_tsc); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: @@ -2109,7 +2108,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { u32 host_sysenter_cs, msr_low, msr_high; u32 junk; - u64 host_pat; + u64 host_pat, tsc_this, tsc_base; unsigned long a; struct descriptor_table dt; int i; @@ -2237,6 +2236,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); + tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; + rdtscll(tsc_this); + if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc) + tsc_base = tsc_this; + + guest_write_tsc(0, tsc_base); return 0; } @@ -2328,8 +2333,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); - guest_write_tsc(0); - /* Special registers */ vmcs_write64(GUEST_IA32_DEBUGCTL, 0); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 873602b5edf..3b2acfd72d7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4170,6 +4170,8 @@ struct kvm *kvm_arch_create_vm(void) /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); + rdtscll(kvm->arch.vm_init_tsc); + return kvm; } -- cgit v1.2.3 From 77c2002e7c6f019f59a6f3cc5f8b16b41748dbe1 Mon Sep 17 00:00:00 2001 From: Izik Eidus Date: Mon, 29 Dec 2008 01:42:19 +0200 Subject: KVM: introduce kvm_read_guest_virt, kvm_write_guest_virt This commit change the name of emulator_read_std into kvm_read_guest_virt, and add new function name kvm_write_guest_virt that allow writing into a guest virtual address. Signed-off-by: Izik Eidus Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 4 --- arch/x86/kvm/x86.c | 56 ++++++++++++++++++++++++++++++----------- 2 files changed, 42 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9efc446b5ac..b74576aec19 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -609,10 +609,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu); void fx_init(struct kvm_vcpu *vcpu); -int emulator_read_std(unsigned long addr, - void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu); int emulator_write_emulated(unsigned long addr, const void *val, unsigned int bytes, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3b2acfd72d7..67f91764e99 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1976,10 +1976,8 @@ static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, return dev; } -int emulator_read_std(unsigned long addr, - void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu) +int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu) { void *data = val; int r = X86EMUL_CONTINUE; @@ -1987,27 +1985,57 @@ int emulator_read_std(unsigned long addr, while (bytes) { gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); unsigned offset = addr & (PAGE_SIZE-1); - unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); + unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) { r = X86EMUL_PROPAGATE_FAULT; goto out; } - ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy); + ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_UNHANDLEABLE; goto out; } - bytes -= tocopy; - data += tocopy; - addr += tocopy; + bytes -= toread; + data += toread; + addr += toread; } out: return r; } -EXPORT_SYMBOL_GPL(emulator_read_std); + +int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu) +{ + void *data = val; + int r = X86EMUL_CONTINUE; + + while (bytes) { + gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); + unsigned offset = addr & (PAGE_SIZE-1); + unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); + int ret; + + if (gpa == UNMAPPED_GVA) { + r = X86EMUL_PROPAGATE_FAULT; + goto out; + } + ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); + if (ret < 0) { + r = X86EMUL_UNHANDLEABLE; + goto out; + } + + bytes -= towrite; + data += towrite; + addr += towrite; + } +out: + return r; +} + static int emulator_read_emulated(unsigned long addr, void *val, @@ -2029,8 +2057,8 @@ static int emulator_read_emulated(unsigned long addr, if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; - if (emulator_read_std(addr, val, bytes, vcpu) - == X86EMUL_CONTINUE) + if (kvm_read_guest_virt(addr, val, bytes, vcpu) + == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; @@ -2233,7 +2261,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); - emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu); + kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu); printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); @@ -2241,7 +2269,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); static struct x86_emulate_ops emulate_ops = { - .read_std = emulator_read_std, + .read_std = kvm_read_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, -- cgit v1.2.3 From 0f346074403bc109f9569f14b45cb09e83729032 Mon Sep 17 00:00:00 2001 From: Izik Eidus Date: Mon, 29 Dec 2008 01:42:20 +0200 Subject: KVM: remove the vmap usage vmap() on guest pages hides those pages from the Linux mm for an extended (userspace determined) amount of time. Get rid of it. Signed-off-by: Izik Eidus Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 62 ++++++++++++------------------------------------------ 1 file changed, 13 insertions(+), 49 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 67f91764e99..2a4f3a69734 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2371,40 +2371,19 @@ int emulate_instruction(struct kvm_vcpu *vcpu, } EXPORT_SYMBOL_GPL(emulate_instruction); -static void free_pio_guest_pages(struct kvm_vcpu *vcpu) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i) - if (vcpu->arch.pio.guest_pages[i]) { - kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]); - vcpu->arch.pio.guest_pages[i] = NULL; - } -} - static int pio_copy_data(struct kvm_vcpu *vcpu) { void *p = vcpu->arch.pio_data; - void *q; + gva_t q = vcpu->arch.pio.guest_gva; unsigned bytes; - int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1; + int ret; - q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE, - PAGE_KERNEL); - if (!q) { - free_pio_guest_pages(vcpu); - return -ENOMEM; - } - q += vcpu->arch.pio.guest_page_offset; bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count; if (vcpu->arch.pio.in) - memcpy(q, p, bytes); + ret = kvm_write_guest_virt(q, p, bytes, vcpu); else - memcpy(p, q, bytes); - q -= vcpu->arch.pio.guest_page_offset; - vunmap(q); - free_pio_guest_pages(vcpu); - return 0; + ret = kvm_read_guest_virt(q, p, bytes, vcpu); + return ret; } int complete_pio(struct kvm_vcpu *vcpu) @@ -2515,7 +2494,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, vcpu->arch.pio.in = in; vcpu->arch.pio.string = 0; vcpu->arch.pio.down = 0; - vcpu->arch.pio.guest_page_offset = 0; vcpu->arch.pio.rep = 0; if (vcpu->run->io.direction == KVM_EXIT_IO_IN) @@ -2543,9 +2521,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, gva_t address, int rep, unsigned port) { unsigned now, in_page; - int i, ret = 0; - int nr_pages = 1; - struct page *page; + int ret = 0; struct kvm_io_device *pio_dev; vcpu->run->exit_reason = KVM_EXIT_IO; @@ -2557,7 +2533,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, vcpu->arch.pio.in = in; vcpu->arch.pio.string = 1; vcpu->arch.pio.down = down; - vcpu->arch.pio.guest_page_offset = offset_in_page(address); vcpu->arch.pio.rep = rep; if (vcpu->run->io.direction == KVM_EXIT_IO_IN) @@ -2577,15 +2552,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, else in_page = offset_in_page(address) + size; now = min(count, (unsigned long)in_page / size); - if (!now) { - /* - * String I/O straddles page boundary. Pin two guest pages - * so that we satisfy atomicity constraints. Do just one - * transaction to avoid complexity. - */ - nr_pages = 2; + if (!now) now = 1; - } if (down) { /* * String I/O in reverse. Yuck. Kill the guest, fix later. @@ -2600,15 +2568,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count) kvm_x86_ops->skip_emulated_instruction(vcpu); - for (i = 0; i < nr_pages; ++i) { - page = gva_to_page(vcpu, address + i * PAGE_SIZE); - vcpu->arch.pio.guest_pages[i] = page; - if (!page) { - kvm_inject_gp(vcpu, 0); - free_pio_guest_pages(vcpu); - return 1; - } - } + vcpu->arch.pio.guest_gva = address; pio_dev = vcpu_find_pio_dev(vcpu, port, vcpu->arch.pio.cur_count, @@ -2616,7 +2576,11 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, if (!vcpu->arch.pio.in) { /* string PIO write */ ret = pio_copy_data(vcpu); - if (ret >= 0 && pio_dev) { + if (ret == X86EMUL_PROPAGATE_FAULT) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if (ret == 0 && pio_dev) { pio_string_write(pio_dev, vcpu); complete_pio(vcpu); if (vcpu->arch.pio.count == 0) -- cgit v1.2.3 From 61a6bd672bda3b9468bf5895c1be085c4e481138 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 29 Dec 2008 17:32:28 +0200 Subject: KVM: Fallback support for MSR_VM_HSAVE_PA Since we advertise MSR_VM_HSAVE_PA, userspace will attempt to read it even on Intel. Implement fake support for this MSR to avoid the warnings. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2a4f3a69734..c3fbe8c55c1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -742,6 +742,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) break; case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: + case MSR_VM_HSAVE_PA: break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); @@ -863,6 +864,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: + case MSR_VM_HSAVE_PA: data = 0; break; case MSR_MTRRcap: -- cgit v1.2.3 From 52d939a0bf44081bc9f69b4fbdc9e7f416df27c7 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 30 Dec 2008 15:55:06 -0200 Subject: KVM: PIT: provide an option to disable interrupt reinjection Certain clocks (such as TSC) in older 2.6 guests overaccount for lost ticks, causing severe time drift. Interrupt reinjection magnifies the problem. Provide an option to disable it. [avi: allow room for expansion in case we want to disable reinjection of other timers] Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm.h | 5 +++++ arch/x86/kvm/i8254.c | 4 ++++ arch/x86/kvm/i8254.h | 1 + arch/x86/kvm/x86.c | 21 +++++++++++++++++++++ 4 files changed, 31 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 32eb96c7ca2..54bcf228152 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -233,4 +233,9 @@ struct kvm_guest_debug_arch { struct kvm_pit_state { struct kvm_pit_channel_state channels[3]; }; + +struct kvm_reinject_control { + __u8 pit_reinject; + __u8 reserved[31]; +}; #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 72bd275a9b5..528daadeba4 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -201,6 +201,9 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) if (!atomic_inc_and_test(&pt->pending)) set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); + if (!pt->reinject) + atomic_set(&pt->pending, 1); + if (vcpu0 && waitqueue_active(&vcpu0->wq)) wake_up_interruptible(&vcpu0->wq); @@ -580,6 +583,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) pit_state->irq_ack_notifier.gsi = 0; pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); + pit_state->pit_timer.reinject = true; mutex_unlock(&pit->pit_state.lock); kvm_pit_reset(pit); diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index 4178022b97a..76959c4b500 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -9,6 +9,7 @@ struct kvm_kpit_timer { s64 period; /* unit: ns */ s64 scheduled; atomic_t pending; + bool reinject; }; struct kvm_kpit_channel_state { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c3fbe8c55c1..a1f14611f4b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -993,6 +993,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: + case KVM_CAP_REINJECT_CONTROL: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -1728,6 +1729,15 @@ static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) return r; } +static int kvm_vm_ioctl_reinject(struct kvm *kvm, + struct kvm_reinject_control *control) +{ + if (!kvm->arch.vpit) + return -ENXIO; + kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject; + return 0; +} + /* * Get (and clear) the dirty memory log for a memory slot. */ @@ -1925,6 +1935,17 @@ long kvm_arch_vm_ioctl(struct file *filp, r = 0; break; } + case KVM_REINJECT_CONTROL: { + struct kvm_reinject_control control; + r = -EFAULT; + if (copy_from_user(&control, argp, sizeof(control))) + goto out; + r = kvm_vm_ioctl_reinject(kvm, &control); + if (r) + goto out; + r = 0; + break; + } default: ; } -- cgit v1.2.3 From 1c08364c3565242f1e1bd585bc2ce458967941af Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 12:39:07 +0200 Subject: KVM: Move struct kvm_pio_request into x86 kvm_host.h This is an x86 specific stucture and has no business living in common code. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b74576aec19..863ea73431a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -227,6 +227,18 @@ struct kvm_pv_mmu_op_buffer { char buf[512] __aligned(sizeof(long)); }; +struct kvm_pio_request { + unsigned long count; + int cur_count; + gva_t guest_gva; + int in; + int port; + int size; + int string; + int down; + int rep; +}; + /* * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level * 32-bit). The kvm_mmu structure abstracts the details of the current mmu -- cgit v1.2.3 From c46fb0211f8b93de45400bff814c0f29335af5fc Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:22:58 -0600 Subject: KVM: ppc: move struct kvmppc_44x_tlbe into 44x-specific header Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_44x.h | 7 +++++++ arch/powerpc/include/asm/kvm_host.h | 7 ------- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h index f49031b632c..d22d39942a9 100644 --- a/arch/powerpc/include/asm/kvm_44x.h +++ b/arch/powerpc/include/asm/kvm_44x.h @@ -28,6 +28,13 @@ * need to find some way of advertising it. */ #define KVM44x_GUEST_TLB_SIZE 64 +struct kvmppc_44x_tlbe { + u32 tid; /* Only the low 8 bits are used. */ + u32 word0; + u32 word1; + u32 word2; +}; + struct kvmppc_44x_shadow_ref { struct page *page; u16 gtlb_index; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index a22600537a8..65c4d492d72 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -64,13 +64,6 @@ struct kvm_vcpu_stat { u32 halt_wakeup; }; -struct kvmppc_44x_tlbe { - u32 tid; /* Only the low 8 bits are used. */ - u32 word0; - u32 word1; - u32 word2; -}; - enum kvm_exit_types { MMIO_EXITS, DCR_EXITS, -- cgit v1.2.3 From ecc0981ff07cbe7cdf95de20be5b24fee8e49cb5 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:22:59 -0600 Subject: KVM: ppc: cosmetic changes to mmu hook names Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_ppc.h | 5 +++-- arch/powerpc/kvm/44x_tlb.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 36d2a50a848..7ba95d28b83 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -52,13 +52,14 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run, extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); +/* Core-specific hooks */ + extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, u32 flags, u32 max_bytes, unsigned int gtlb_idx); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); - -/* Core-specific hooks */ +extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id); diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 9a34b8edb9e..8f9c09cbb83 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -248,7 +248,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler); } -void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) +void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); int i; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 7c2ad4017d6..f30be9ef231 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -216,7 +216,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { - kvmppc_core_destroy_mmu(vcpu); + kvmppc_mmu_destroy(vcpu); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -- cgit v1.2.3 From 475e7cdd69101939006659a63c2e4a32d5b71389 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:00 -0600 Subject: KVM: ppc: small cosmetic changes to Book E DTLB miss handler Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/booke.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 35485dd6927..d196ae61930 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -290,6 +290,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, struct kvmppc_44x_tlbe *gtlbe; unsigned long eaddr = vcpu->arch.fault_dear; int gtlb_index; + gpa_t gpaddr; gfn_t gfn; /* Check the guest TLB. */ @@ -305,8 +306,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; - vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); - gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; + gpaddr = tlb_xlate(gtlbe, eaddr); + gfn = gpaddr >> PAGE_SHIFT; if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { /* The guest TLB had a mapping, but the shadow TLB @@ -315,13 +316,14 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * b) the guest used a large mapping which we're faking * Either way, we need to satisfy the fault without * invoking the guest. */ - kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, + kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); r = RESUME_GUEST; } else { /* Guest has mapped and accessed a page which is not * actually RAM. */ + vcpu->arch.paddr_accessed = gpaddr; r = kvmppc_emulate_mmio(run, vcpu); kvmppc_account_exit(vcpu, MMIO_EXITS); } -- cgit v1.2.3 From 58a96214a306fc7fc66105097eea9c4f3bfa35bc Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:01 -0600 Subject: KVM: ppc: change kvmppc_mmu_map() parameters Passing just the TLB index will ease an e500 implementation. Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_ppc.h | 1 - arch/powerpc/kvm/44x_tlb.c | 15 +++++++-------- arch/powerpc/kvm/booke.c | 6 ++---- 3 files changed, 9 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 7ba95d28b83..f661f8ba3ab 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -55,7 +55,6 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); /* Core-specific hooks */ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, - u64 asid, u32 flags, u32 max_bytes, unsigned int gtlb_idx); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 8f9c09cbb83..e8ed22f28ea 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -269,15 +269,19 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) * Caller must ensure that the specified guest TLB entry is safe to insert into * the shadow TLB. */ -void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, - u32 flags, u32 max_bytes, unsigned int gtlb_index) +void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, + unsigned int gtlb_index) { struct kvmppc_44x_tlbe stlbe; struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); + struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; struct kvmppc_44x_shadow_ref *ref; struct page *new_page; hpa_t hpaddr; gfn_t gfn; + u32 asid = gtlbe->tid; + u32 flags = gtlbe->word2; + u32 max_bytes = get_tlb_bytes(gtlbe); unsigned int victim; /* Select TLB entry to clobber. Indirectly guard against races with the TLB @@ -448,10 +452,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) } if (tlbe_is_host_safe(vcpu, tlbe)) { - u64 asid; gva_t eaddr; gpa_t gpaddr; - u32 flags; u32 bytes; eaddr = get_tlb_eaddr(tlbe); @@ -462,10 +464,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) eaddr &= ~(bytes - 1); gpaddr &= ~(bytes - 1); - asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; - flags = tlbe->word2 & 0xffff; - - kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index); + kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); } KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index d196ae61930..85b9e2fc6c6 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -316,8 +316,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * b) the guest used a large mapping which we're faking * Either way, we need to satisfy the fault without * invoking the guest. */ - kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, - gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); + kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); r = RESUME_GUEST; } else { @@ -364,8 +363,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * b) the guest used a large mapping which we're faking * Either way, we need to satisfy the fault without * invoking the guest. */ - kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, - gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); + kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); } else { /* Guest mapped and leaped at non-RAM! */ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); -- cgit v1.2.3 From be8d1cae07d5acf4a61046d7def5eda40f0981e1 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:02 -0600 Subject: KVM: ppc: turn tlb_xlate() into a per-core hook (and give it a better name) Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_ppc.h | 2 ++ arch/powerpc/kvm/44x.c | 6 +----- arch/powerpc/kvm/44x_tlb.c | 10 ++++++++++ arch/powerpc/kvm/44x_tlb.h | 7 ------- arch/powerpc/kvm/booke.c | 10 ++-------- 5 files changed, 15 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index f661f8ba3ab..9fd70aa916d 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -59,6 +59,8 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); +extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, + gva_t eaddr); extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id); diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index a66bec57265..8383603dd8a 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c @@ -149,8 +149,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); - struct kvmppc_44x_tlbe *gtlbe; int index; gva_t eaddr; u8 pid; @@ -166,9 +164,7 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, return 0; } - gtlbe = &vcpu_44x->guest_tlb[index]; - - tr->physical_address = tlb_xlate(gtlbe, eaddr); + tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); /* XXX what does "writeable" and "usermode" even mean? */ tr->valid = 1; diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index e8ed22f28ea..2f14671e8a1 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -208,6 +208,16 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, return -1; } +gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, + gva_t eaddr) +{ + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); + struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; + unsigned int pgmask = get_tlb_bytes(gtlbe) - 1; + + return get_tlb_raddr(gtlbe) | (eaddr & pgmask); +} + int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) { unsigned int as = !!(vcpu->arch.msr & MSR_IS); diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h index 772191f29e6..05b6f7eef5b 100644 --- a/arch/powerpc/kvm/44x_tlb.h +++ b/arch/powerpc/kvm/44x_tlb.h @@ -85,11 +85,4 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu) return (vcpu->arch.mmucr >> 16) & 0x1; } -static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe, gva_t eaddr) -{ - unsigned int pgmask = get_tlb_bytes(tlbe) - 1; - - return get_tlb_raddr(tlbe) | (eaddr & pgmask); -} - #endif /* __KVM_POWERPC_TLB_H__ */ diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 85b9e2fc6c6..56d6ed69ed6 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -286,8 +286,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, /* XXX move to a 440-specific file. */ case BOOKE_INTERRUPT_DTLB_MISS: { - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); - struct kvmppc_44x_tlbe *gtlbe; unsigned long eaddr = vcpu->arch.fault_dear; int gtlb_index; gpa_t gpaddr; @@ -305,8 +303,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } - gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; - gpaddr = tlb_xlate(gtlbe, eaddr); + gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gfn = gpaddr >> PAGE_SHIFT; if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { @@ -332,8 +329,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, /* XXX move to a 440-specific file. */ case BOOKE_INTERRUPT_ITLB_MISS: { - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); - struct kvmppc_44x_tlbe *gtlbe; unsigned long eaddr = vcpu->arch.pc; gpa_t gpaddr; gfn_t gfn; @@ -352,8 +347,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); - gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; - gpaddr = tlb_xlate(gtlbe, eaddr); + gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gfn = gpaddr >> PAGE_SHIFT; if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { -- cgit v1.2.3 From fa86b8dda2e0faccefbeda61edc02a50bd588f4f Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:03 -0600 Subject: KVM: ppc: rename 44x MMU functions used in booke.c e500 will provide its own implementation of these. Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_ppc.h | 2 ++ arch/powerpc/kvm/44x_tlb.c | 4 ++-- arch/powerpc/kvm/44x_tlb.h | 2 -- arch/powerpc/kvm/booke.c | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 9fd70aa916d..5e80a20f32b 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -59,6 +59,8 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); +extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); +extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, gva_t eaddr); diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 2f14671e8a1..e67b7313ffc 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -218,14 +218,14 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, return get_tlb_raddr(gtlbe) | (eaddr & pgmask); } -int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) +int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) { unsigned int as = !!(vcpu->arch.msr & MSR_IS); return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); } -int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) +int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) { unsigned int as = !!(vcpu->arch.msr & MSR_DS); diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h index 05b6f7eef5b..a9ff80e5152 100644 --- a/arch/powerpc/kvm/44x_tlb.h +++ b/arch/powerpc/kvm/44x_tlb.h @@ -25,8 +25,6 @@ extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, unsigned int as); -extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); -extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc); diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 56d6ed69ed6..1e692ac16e9 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -292,7 +292,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, gfn_t gfn; /* Check the guest TLB. */ - gtlb_index = kvmppc_44x_dtlb_index(vcpu, eaddr); + gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); if (gtlb_index < 0) { /* The guest didn't have a mapping for it. */ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); @@ -337,7 +337,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; /* Check the guest TLB. */ - gtlb_index = kvmppc_44x_itlb_index(vcpu, eaddr); + gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); if (gtlb_index < 0) { /* The guest didn't have a mapping for it. */ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); -- cgit v1.2.3 From f44353610b584fcbc31e363f35594796c6446d63 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:04 -0600 Subject: KVM: ppc: remove last 44x-specific bits from booke.c Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/booke.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 1e692ac16e9..a73b3959e41 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -30,10 +30,8 @@ #include #include "timing.h" #include -#include #include "booke.h" -#include "44x_tlb.h" unsigned long kvmppc_booke_handlers; @@ -284,7 +282,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; - /* XXX move to a 440-specific file. */ case BOOKE_INTERRUPT_DTLB_MISS: { unsigned long eaddr = vcpu->arch.fault_dear; int gtlb_index; @@ -327,7 +324,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } - /* XXX move to a 440-specific file. */ case BOOKE_INTERRUPT_ITLB_MISS: { unsigned long eaddr = vcpu->arch.pc; gpa_t gpaddr; -- cgit v1.2.3 From cea5d8c9de669e30ed6d60930318376d5cc42e9e Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:05 -0600 Subject: KVM: ppc: use macros instead of hardcoded literals for instruction decoding Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/emulate.c | 93 +++++++++++++++++++++++++++++++--------------- 1 file changed, 63 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index d1d38daa93f..a561d6e8da1 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -30,6 +30,39 @@ #include #include "timing.h" +#define OP_TRAP 3 + +#define OP_31_XOP_LWZX 23 +#define OP_31_XOP_LBZX 87 +#define OP_31_XOP_STWX 151 +#define OP_31_XOP_STBX 215 +#define OP_31_XOP_STBUX 247 +#define OP_31_XOP_LHZX 279 +#define OP_31_XOP_LHZUX 311 +#define OP_31_XOP_MFSPR 339 +#define OP_31_XOP_STHX 407 +#define OP_31_XOP_STHUX 439 +#define OP_31_XOP_MTSPR 467 +#define OP_31_XOP_DCBI 470 +#define OP_31_XOP_LWBRX 534 +#define OP_31_XOP_TLBSYNC 566 +#define OP_31_XOP_STWBRX 662 +#define OP_31_XOP_LHBRX 790 +#define OP_31_XOP_STHBRX 918 + +#define OP_LWZ 32 +#define OP_LWZU 33 +#define OP_LBZ 34 +#define OP_LBZU 35 +#define OP_STW 36 +#define OP_STWU 37 +#define OP_STB 38 +#define OP_STBU 39 +#define OP_LHZ 40 +#define OP_LHZU 41 +#define OP_STH 44 +#define OP_STHU 45 + void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) { if (vcpu->arch.tcr & TCR_DIE) { @@ -78,7 +111,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); switch (get_op(inst)) { - case 3: /* trap */ + case OP_TRAP: vcpu->arch.esr |= ESR_PTR; kvmppc_core_queue_program(vcpu); advance = 0; @@ -87,31 +120,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) case 31: switch (get_xop(inst)) { - case 23: /* lwzx */ + case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; - case 87: /* lbzx */ + case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; - case 151: /* stwx */ + case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; - case 215: /* stbx */ + case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; - case 247: /* stbux */ + case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); @@ -126,12 +159,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) vcpu->arch.gpr[rs] = ea; break; - case 279: /* lhzx */ + case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; - case 311: /* lhzux */ + case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); @@ -144,7 +177,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) vcpu->arch.gpr[ra] = ea; break; - case 339: /* mfspr */ + case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); @@ -185,7 +218,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) } break; - case 407: /* sthx */ + case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); @@ -195,7 +228,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 2, 1); break; - case 439: /* sthux */ + case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); @@ -210,7 +243,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) vcpu->arch.gpr[ra] = ea; break; - case 467: /* mtspr */ + case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { @@ -246,7 +279,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) } break; - case 470: /* dcbi */ + case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as @@ -254,15 +287,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) * coherence. */ break; - case 534: /* lwbrx */ + case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; - case 566: /* tlbsync */ + case OP_31_XOP_TLBSYNC: break; - case 662: /* stwbrx */ + case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); @@ -272,12 +305,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 4, 0); break; - case 790: /* lhbrx */ + case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; - case 918: /* sthbrx */ + case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); @@ -293,37 +326,37 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) } break; - case 32: /* lwz */ + case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; - case 33: /* lwzu */ + case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; - case 34: /* lbz */ + case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; - case 35: /* lbzu */ + case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; - case 36: /* stw */ + case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; - case 37: /* stwu */ + case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], @@ -331,13 +364,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; - case 38: /* stb */ + case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; - case 39: /* stbu */ + case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], @@ -345,25 +378,25 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; - case 40: /* lhz */ + case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; - case 41: /* lhzu */ + case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; - case 44: /* sth */ + case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; - case 45: /* sthu */ + case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], -- cgit v1.2.3 From d0c7dc03442f7cbd8740d9a4e7a4e7f84bfa676d Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:06 -0600 Subject: KVM: ppc: split out common Book E instruction emulation The Book E code will be shared with e500. I've left PID in kvmppc_core_emulate_op() just so that we don't need to move kvmppc_set_pid() right now. Once we have the e500 implementation, we can probably share that too. Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/44x_emulate.c | 217 +++----------------------------- arch/powerpc/kvm/Makefile | 1 + arch/powerpc/kvm/booke.h | 6 + arch/powerpc/kvm/booke_emulate.c | 262 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 284 insertions(+), 202 deletions(-) create mode 100644 arch/powerpc/kvm/booke_emulate.c (limited to 'arch') diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 82489a743a6..61af58fcece 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c @@ -27,25 +27,12 @@ #include "booke.h" #include "44x_tlb.h" -#define OP_RFI 19 - -#define XOP_RFI 50 -#define XOP_MFMSR 83 -#define XOP_WRTEE 131 -#define XOP_MTMSR 146 -#define XOP_WRTEEI 163 #define XOP_MFDCR 323 #define XOP_MTDCR 451 #define XOP_TLBSX 914 #define XOP_ICCCI 966 #define XOP_TLBWE 978 -static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) -{ - vcpu->arch.pc = vcpu->arch.srr0; - kvmppc_set_msr(vcpu, vcpu->arch.srr1); -} - int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { @@ -59,48 +46,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int ws; switch (get_op(inst)) { - case OP_RFI: - switch (get_xop(inst)) { - case XOP_RFI: - kvmppc_emul_rfi(vcpu); - kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); - *advance = 0; - break; - - default: - emulated = EMULATE_FAIL; - break; - } - break; - case 31: switch (get_xop(inst)) { - case XOP_MFMSR: - rt = get_rt(inst); - vcpu->arch.gpr[rt] = vcpu->arch.msr; - kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); - break; - - case XOP_MTMSR: - rs = get_rs(inst); - kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); - kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); - break; - - case XOP_WRTEE: - rs = get_rs(inst); - vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) - | (vcpu->arch.gpr[rs] & MSR_EE); - kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); - break; - - case XOP_WRTEEI: - vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) - | (inst & MSR_EE); - kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); - break; - case XOP_MFDCR: dcrn = get_dcrn(inst); rt = get_rt(inst); @@ -186,186 +134,51 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, emulated = EMULATE_FAIL; } + if (emulated == EMULATE_FAIL) + emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); + return emulated; } int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) { + int emulated = EMULATE_DONE; + switch (sprn) { - case SPRN_MMUCR: - vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; case SPRN_PID: kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; + case SPRN_MMUCR: + vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; case SPRN_CCR0: vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; case SPRN_CCR1: vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; - case SPRN_DEAR: - vcpu->arch.dear = vcpu->arch.gpr[rs]; break; - case SPRN_ESR: - vcpu->arch.esr = vcpu->arch.gpr[rs]; break; - case SPRN_DBCR0: - vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; - case SPRN_DBCR1: - vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; - case SPRN_TSR: - vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; - case SPRN_TCR: - vcpu->arch.tcr = vcpu->arch.gpr[rs]; - kvmppc_emulate_dec(vcpu); - break; - - /* Note: SPRG4-7 are user-readable. These values are - * loaded into the real SPRGs when resuming the - * guest. */ - case SPRN_SPRG4: - vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; - case SPRN_SPRG5: - vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; - case SPRN_SPRG6: - vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; - case SPRN_SPRG7: - vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; - - case SPRN_IVPR: - vcpu->arch.ivpr = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR0: - vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR1: - vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR2: - vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR3: - vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR4: - vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR5: - vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR6: - vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR7: - vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR8: - vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR9: - vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR10: - vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR11: - vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR12: - vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR13: - vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR14: - vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; - break; - case SPRN_IVOR15: - vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; - break; - default: - return EMULATE_FAIL; + emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); } kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); - return EMULATE_DONE; + return emulated; } int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { + int emulated = EMULATE_DONE; + switch (sprn) { - /* 440 */ + case SPRN_PID: + vcpu->arch.gpr[rt] = vcpu->arch.pid; break; case SPRN_MMUCR: vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; case SPRN_CCR0: vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; case SPRN_CCR1: vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; - - /* Book E */ - case SPRN_PID: - vcpu->arch.gpr[rt] = vcpu->arch.pid; break; - case SPRN_IVPR: - vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; - case SPRN_DEAR: - vcpu->arch.gpr[rt] = vcpu->arch.dear; break; - case SPRN_ESR: - vcpu->arch.gpr[rt] = vcpu->arch.esr; break; - case SPRN_DBCR0: - vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; - case SPRN_DBCR1: - vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; - - case SPRN_IVOR0: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; - break; - case SPRN_IVOR1: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; - break; - case SPRN_IVOR2: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; - break; - case SPRN_IVOR3: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; - break; - case SPRN_IVOR4: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; - break; - case SPRN_IVOR5: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; - break; - case SPRN_IVOR6: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; - break; - case SPRN_IVOR7: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; - break; - case SPRN_IVOR8: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; - break; - case SPRN_IVOR9: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; - break; - case SPRN_IVOR10: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; - break; - case SPRN_IVOR11: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; - break; - case SPRN_IVOR12: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; - break; - case SPRN_IVOR13: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; - break; - case SPRN_IVOR14: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; - break; - case SPRN_IVOR15: - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; - break; - default: - return EMULATE_FAIL; + emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); } kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); - return EMULATE_DONE; + return emulated; } diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index df7ba59e6d5..3ef5261828e 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -16,6 +16,7 @@ AFLAGS_booke_interrupts.o := -I$(obj) kvm-440-objs := \ booke.o \ + booke_emulate.o \ booke_interrupts.o \ 44x.o \ 44x_tlb.o \ diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index cf7c94ca24b..311fdbc23fb 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -22,6 +22,7 @@ #include #include +#include #include "timing.h" /* interrupt priortity ordering */ @@ -57,4 +58,9 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) }; } +int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance); +int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); +int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); + #endif /* __KVM_BOOKE_H__ */ diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c new file mode 100644 index 00000000000..8aa78f1a1f8 --- /dev/null +++ b/arch/powerpc/kvm/booke_emulate.c @@ -0,0 +1,262 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard + */ + +#include +#include + +#include "booke.h" + +#define OP_19_XOP_RFI 50 + +#define OP_31_XOP_MFMSR 83 +#define OP_31_XOP_WRTEE 131 +#define OP_31_XOP_MTMSR 146 +#define OP_31_XOP_WRTEEI 163 + +static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) +{ + vcpu->arch.pc = vcpu->arch.srr0; + kvmppc_set_msr(vcpu, vcpu->arch.srr1); +} + +int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance) +{ + int emulated = EMULATE_DONE; + int rs; + int rt; + + switch (get_op(inst)) { + case 19: + switch (get_xop(inst)) { + case OP_19_XOP_RFI: + kvmppc_emul_rfi(vcpu); + kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); + *advance = 0; + break; + + default: + emulated = EMULATE_FAIL; + break; + } + break; + + case 31: + switch (get_xop(inst)) { + + case OP_31_XOP_MFMSR: + rt = get_rt(inst); + vcpu->arch.gpr[rt] = vcpu->arch.msr; + kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); + break; + + case OP_31_XOP_MTMSR: + rs = get_rs(inst); + kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); + kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); + break; + + case OP_31_XOP_WRTEE: + rs = get_rs(inst); + vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) + | (vcpu->arch.gpr[rs] & MSR_EE); + kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); + break; + + case OP_31_XOP_WRTEEI: + vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) + | (inst & MSR_EE); + kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); + break; + + default: + emulated = EMULATE_FAIL; + } + + break; + + default: + emulated = EMULATE_FAIL; + } + + return emulated; +} + +int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) +{ + int emulated = EMULATE_DONE; + + switch (sprn) { + case SPRN_DEAR: + vcpu->arch.dear = vcpu->arch.gpr[rs]; break; + case SPRN_ESR: + vcpu->arch.esr = vcpu->arch.gpr[rs]; break; + case SPRN_DBCR0: + vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; + case SPRN_DBCR1: + vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; + case SPRN_TSR: + vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; + case SPRN_TCR: + vcpu->arch.tcr = vcpu->arch.gpr[rs]; + kvmppc_emulate_dec(vcpu); + break; + + /* Note: SPRG4-7 are user-readable. These values are + * loaded into the real SPRGs when resuming the + * guest. */ + case SPRN_SPRG4: + vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; + case SPRN_SPRG5: + vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; + case SPRN_SPRG6: + vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; + case SPRN_SPRG7: + vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; + + case SPRN_IVPR: + vcpu->arch.ivpr = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR0: + vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR1: + vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR2: + vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR3: + vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR4: + vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR5: + vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR6: + vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR7: + vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR8: + vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR9: + vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR10: + vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR11: + vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR12: + vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR13: + vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR14: + vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR15: + vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; + break; + + default: + emulated = EMULATE_FAIL; + } + + return emulated; +} + +int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) +{ + int emulated = EMULATE_DONE; + + switch (sprn) { + case SPRN_IVPR: + vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; + case SPRN_DEAR: + vcpu->arch.gpr[rt] = vcpu->arch.dear; break; + case SPRN_ESR: + vcpu->arch.gpr[rt] = vcpu->arch.esr; break; + case SPRN_DBCR0: + vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; + case SPRN_DBCR1: + vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; + + case SPRN_IVOR0: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; + break; + case SPRN_IVOR1: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; + break; + case SPRN_IVOR2: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; + break; + case SPRN_IVOR3: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; + break; + case SPRN_IVOR4: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; + break; + case SPRN_IVOR5: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; + break; + case SPRN_IVOR6: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; + break; + case SPRN_IVOR7: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; + break; + case SPRN_IVOR8: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; + break; + case SPRN_IVOR9: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; + break; + case SPRN_IVOR10: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; + break; + case SPRN_IVOR11: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; + break; + case SPRN_IVOR12: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; + break; + case SPRN_IVOR13: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; + break; + case SPRN_IVOR14: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; + break; + case SPRN_IVOR15: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; + break; + + default: + emulated = EMULATE_FAIL; + } + + return emulated; +} -- cgit v1.2.3 From f7b200af8f1da748cc67993caeff3923d6014070 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:07 -0600 Subject: KVM: ppc: Add dbsr in kvm_vcpu_arch Kernel for E500 need clear dbsr when startup. So add dbsr register in kvm_vcpu_arch for BOOKE. Signed-off-by: Liu Yu Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/booke_emulate.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 65c4d492d72..50e5ce1b400 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -163,6 +163,7 @@ struct kvm_vcpu_arch { u32 ccr1; u32 dbcr0; u32 dbcr1; + u32 dbsr; #ifdef CONFIG_KVM_EXIT_TIMING struct kvmppc_exit_timing timing_exit; diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 8aa78f1a1f8..aebc65e93f4 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -111,6 +111,8 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; case SPRN_DBCR1: vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; + case SPRN_DBSR: + vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break; case SPRN_TSR: vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; case SPRN_TCR: @@ -204,6 +206,8 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; case SPRN_DBCR1: vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; + case SPRN_DBSR: + vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break; case SPRN_IVOR0: vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; -- cgit v1.2.3 From 366d4b9b9fdda282006b97316f8038cd36f8ecb7 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:08 -0600 Subject: KVM: ppc: No need to include core-header for KVM in asm-offsets.c currently Signed-off-by: Liu Yu Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kernel/asm-offsets.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 19ee491e9e2..42fe4da4e8a 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -49,7 +49,7 @@ #include #endif #ifdef CONFIG_KVM -#include +#include #endif #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) @@ -361,8 +361,6 @@ int main(void) DEFINE(PTE_SIZE, sizeof(pte_t)); #ifdef CONFIG_KVM - DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe)); - DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); -- cgit v1.2.3 From 17c885eb5c38f39a2bb3143a67687bd905dcd0fa Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:09 -0600 Subject: KVM: ppc: ifdef iccci with CONFIG_44x E500 deosn't support this instruction. Signed-off-by: Liu Yu Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/booke_interrupts.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 084ebcd7dd8..4679ec287e6 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -347,7 +347,9 @@ lightweight_exit: lwz r3, VCPU_SHADOW_PID(r4) mtspr SPRN_PID, r3 +#ifdef CONFIG_44x iccci 0, 0 /* XXX hack */ +#endif /* Load some guest volatiles. */ lwz r0, VCPU_GPR(r0)(r4) -- cgit v1.2.3 From bc8080cbcc8870178f0910edd537d0cb5706d703 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:10 -0600 Subject: KVM: ppc: E500 core-specific code Signed-off-by: Liu Yu Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_e500.h | 67 ++++ arch/powerpc/kvm/Kconfig | 13 + arch/powerpc/kvm/Makefile | 9 + arch/powerpc/kvm/e500.c | 151 ++++++++ arch/powerpc/kvm/e500_emulate.c | 169 +++++++++ arch/powerpc/kvm/e500_tlb.c | 737 ++++++++++++++++++++++++++++++++++++ arch/powerpc/kvm/e500_tlb.h | 184 +++++++++ 7 files changed, 1330 insertions(+) create mode 100644 arch/powerpc/include/asm/kvm_e500.h create mode 100644 arch/powerpc/kvm/e500.c create mode 100644 arch/powerpc/kvm/e500_emulate.c create mode 100644 arch/powerpc/kvm/e500_tlb.c create mode 100644 arch/powerpc/kvm/e500_tlb.h (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h new file mode 100644 index 00000000000..9d497ce4972 --- /dev/null +++ b/arch/powerpc/include/asm/kvm_e500.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, + * + * Description: + * This file is derived from arch/powerpc/include/asm/kvm_44x.h, + * by Hollis Blanchard . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_KVM_E500_H__ +#define __ASM_KVM_E500_H__ + +#include + +#define BOOKE_INTERRUPT_SIZE 36 + +#define E500_PID_NUM 3 +#define E500_TLB_NUM 2 + +struct tlbe{ + u32 mas1; + u32 mas2; + u32 mas3; + u32 mas7; +}; + +struct kvmppc_vcpu_e500 { + /* Unmodified copy of the guest's TLB. */ + struct tlbe *guest_tlb[E500_TLB_NUM]; + /* TLB that's actually used when the guest is running. */ + struct tlbe *shadow_tlb[E500_TLB_NUM]; + /* Pages which are referenced in the shadow TLB. */ + struct page **shadow_pages[E500_TLB_NUM]; + + unsigned int guest_tlb_size[E500_TLB_NUM]; + unsigned int shadow_tlb_size[E500_TLB_NUM]; + unsigned int guest_tlb_nv[E500_TLB_NUM]; + + u32 host_pid[E500_PID_NUM]; + u32 pid[E500_PID_NUM]; + + u32 mas0; + u32 mas1; + u32 mas2; + u32 mas3; + u32 mas4; + u32 mas5; + u32 mas6; + u32 mas7; + u32 l1csr1; + u32 hid0; + u32 hid1; + + struct kvm_vcpu vcpu; +}; + +static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) +{ + return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu); +} + +#endif /* __ASM_KVM_E500_H__ */ diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 6dbdc4817d8..14657055014 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -43,6 +43,19 @@ config KVM_EXIT_TIMING If unsure, say N. +config KVM_E500 + bool "KVM support for PowerPC E500 processors" + depends on EXPERIMENTAL && E500 + select KVM + ---help--- + Support running unmodified E500 guest kernels in virtual machines on + E500 host processors. + + This module provides access to the hardware capabilities through + a character device node named /dev/kvm. + + If unsure, say N. + config KVM_TRACE bool "KVM trace support" depends on KVM && MARKERS && SYSFS diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 3ef5261828e..4b2df66c79d 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -22,3 +22,12 @@ kvm-440-objs := \ 44x_tlb.o \ 44x_emulate.o obj-$(CONFIG_KVM_440) += kvm-440.o + +kvm-e500-objs := \ + booke.o \ + booke_emulate.o \ + booke_interrupts.o \ + e500.o \ + e500_tlb.o \ + e500_emulate.o +obj-$(CONFIG_KVM_E500) += kvm-e500.o diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c new file mode 100644 index 00000000000..7992da497cd --- /dev/null +++ b/arch/powerpc/kvm/e500.c @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, + * + * Description: + * This file is derived from arch/powerpc/kvm/44x.c, + * by Hollis Blanchard . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include "e500_tlb.h" + +void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) +{ +} + +void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) +{ +} + +void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + kvmppc_e500_tlb_load(vcpu, cpu); +} + +void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +{ + kvmppc_e500_tlb_put(vcpu); +} + +int kvmppc_core_check_processor_compat(void) +{ + int r; + + if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0) + r = 0; + else + r = -ENOTSUPP; + + return r; +} + +int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + + kvmppc_e500_tlb_setup(vcpu_e500); + + /* Use the same core vertion as host's */ + vcpu->arch.pvr = mfspr(SPRN_PVR); + + return 0; +} + +/* 'linear_address' is actually an encoding of AS|PID|EADDR . */ +int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + int index; + gva_t eaddr; + u8 pid; + u8 as; + + eaddr = tr->linear_address; + pid = (tr->linear_address >> 32) & 0xff; + as = (tr->linear_address >> 40) & 0x1; + + index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); + if (index < 0) { + tr->valid = 0; + return 0; + } + + tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); + /* XXX what does "writeable" and "usermode" even mean? */ + tr->valid = 1; + + return 0; +} + +struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +{ + struct kvmppc_vcpu_e500 *vcpu_e500; + struct kvm_vcpu *vcpu; + int err; + + vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + if (!vcpu_e500) { + err = -ENOMEM; + goto out; + } + + vcpu = &vcpu_e500->vcpu; + err = kvm_vcpu_init(vcpu, kvm, id); + if (err) + goto free_vcpu; + + err = kvmppc_e500_tlb_init(vcpu_e500); + if (err) + goto uninit_vcpu; + + return vcpu; + +uninit_vcpu: + kvm_vcpu_uninit(vcpu); +free_vcpu: + kmem_cache_free(kvm_vcpu_cache, vcpu_e500); +out: + return ERR_PTR(err); +} + +void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + + kvmppc_e500_tlb_uninit(vcpu_e500); + kvm_vcpu_uninit(vcpu); + kmem_cache_free(kvm_vcpu_cache, vcpu_e500); +} + +static int kvmppc_e500_init(void) +{ + int r; + + r = kvmppc_booke_init(); + if (r) + return r; + + return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE); +} + +static void kvmppc_e500_exit(void) +{ + kvmppc_booke_exit(); +} + +module_init(kvmppc_e500_init); +module_exit(kvmppc_e500_exit); diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c new file mode 100644 index 00000000000..a47f44cd2cf --- /dev/null +++ b/arch/powerpc/kvm/e500_emulate.c @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, + * + * Description: + * This file is derived from arch/powerpc/kvm/44x_emulate.c, + * by Hollis Blanchard . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "booke.h" +#include "e500_tlb.h" + +#define XOP_TLBIVAX 786 +#define XOP_TLBSX 914 +#define XOP_TLBRE 946 +#define XOP_TLBWE 978 + +int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance) +{ + int emulated = EMULATE_DONE; + int ra; + int rb; + int rs; + int rt; + + switch (get_op(inst)) { + case 31: + switch (get_xop(inst)) { + + case XOP_TLBRE: + emulated = kvmppc_e500_emul_tlbre(vcpu); + break; + + case XOP_TLBWE: + emulated = kvmppc_e500_emul_tlbwe(vcpu); + break; + + case XOP_TLBSX: + rb = get_rb(inst); + emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); + break; + + case XOP_TLBIVAX: + ra = get_ra(inst); + rb = get_rb(inst); + emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); + break; + + default: + emulated = EMULATE_FAIL; + } + + break; + + default: + emulated = EMULATE_FAIL; + } + + if (emulated == EMULATE_FAIL) + emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); + + return emulated; +} + +int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int emulated = EMULATE_DONE; + + switch (sprn) { + case SPRN_PID: + vcpu_e500->pid[0] = vcpu->arch.shadow_pid = + vcpu->arch.pid = vcpu->arch.gpr[rs]; + break; + case SPRN_PID1: + vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break; + case SPRN_PID2: + vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break; + case SPRN_MAS0: + vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break; + case SPRN_MAS1: + vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break; + case SPRN_MAS2: + vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break; + case SPRN_MAS3: + vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break; + case SPRN_MAS4: + vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break; + case SPRN_MAS6: + vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break; + case SPRN_MAS7: + vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break; + case SPRN_L1CSR1: + vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break; + case SPRN_HID0: + vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break; + case SPRN_HID1: + vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break; + + default: + emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); + } + + return emulated; +} + +int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int emulated = EMULATE_DONE; + + switch (sprn) { + case SPRN_PID: + vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break; + case SPRN_PID1: + vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break; + case SPRN_PID2: + vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break; + case SPRN_MAS0: + vcpu->arch.gpr[rt] = vcpu_e500->mas0; break; + case SPRN_MAS1: + vcpu->arch.gpr[rt] = vcpu_e500->mas1; break; + case SPRN_MAS2: + vcpu->arch.gpr[rt] = vcpu_e500->mas2; break; + case SPRN_MAS3: + vcpu->arch.gpr[rt] = vcpu_e500->mas3; break; + case SPRN_MAS4: + vcpu->arch.gpr[rt] = vcpu_e500->mas4; break; + case SPRN_MAS6: + vcpu->arch.gpr[rt] = vcpu_e500->mas6; break; + case SPRN_MAS7: + vcpu->arch.gpr[rt] = vcpu_e500->mas7; break; + + case SPRN_TLB0CFG: + vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG); + vcpu->arch.gpr[rt] &= ~0xfffUL; + vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0]; + break; + + case SPRN_TLB1CFG: + vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG); + vcpu->arch.gpr[rt] &= ~0xfffUL; + vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1]; + break; + + case SPRN_L1CSR1: + vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break; + case SPRN_HID0: + vcpu->arch.gpr[rt] = vcpu_e500->hid0; break; + case SPRN_HID1: + vcpu->arch.gpr[rt] = vcpu_e500->hid1; break; + + default: + emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); + } + + return emulated; +} + diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c new file mode 100644 index 00000000000..6a50340f575 --- /dev/null +++ b/arch/powerpc/kvm/e500_tlb.c @@ -0,0 +1,737 @@ +/* + * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, yu.liu@freescale.com + * + * Description: + * This file is based on arch/powerpc/kvm/44x_tlb.c, + * by Hollis Blanchard . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "e500_tlb.h" + +#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) + +static unsigned int tlb1_entry_num; + +void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct tlbe *tlbe; + int i, tlbsel; + + printk("| %8s | %8s | %8s | %8s | %8s |\n", + "nr", "mas1", "mas2", "mas3", "mas7"); + + for (tlbsel = 0; tlbsel < 2; tlbsel++) { + printk("Guest TLB%d:\n", tlbsel); + for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) { + tlbe = &vcpu_e500->guest_tlb[tlbsel][i]; + if (tlbe->mas1 & MAS1_VALID) + printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n", + tlbsel, i, tlbe->mas1, tlbe->mas2, + tlbe->mas3, tlbe->mas7); + } + } + + for (tlbsel = 0; tlbsel < 2; tlbsel++) { + printk("Shadow TLB%d:\n", tlbsel); + for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) { + tlbe = &vcpu_e500->shadow_tlb[tlbsel][i]; + if (tlbe->mas1 & MAS1_VALID) + printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n", + tlbsel, i, tlbe->mas1, tlbe->mas2, + tlbe->mas3, tlbe->mas7); + } + } +} + +static inline unsigned int tlb0_get_next_victim( + struct kvmppc_vcpu_e500 *vcpu_e500) +{ + unsigned int victim; + + victim = vcpu_e500->guest_tlb_nv[0]++; + if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM)) + vcpu_e500->guest_tlb_nv[0] = 0; + + return victim; +} + +static inline unsigned int tlb1_max_shadow_size(void) +{ + return tlb1_entry_num - tlbcam_index; +} + +static inline int tlbe_is_writable(struct tlbe *tlbe) +{ + return tlbe->mas3 & (MAS3_SW|MAS3_UW); +} + +static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) +{ + /* Mask off reserved bits. */ + mas3 &= MAS3_ATTRIB_MASK; + + if (!usermode) { + /* Guest is in supervisor mode, + * so we need to translate guest + * supervisor permissions into user permissions. */ + mas3 &= ~E500_TLB_USER_PERM_MASK; + mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; + } + + return mas3 | E500_TLB_SUPER_PERM_MASK; +} + +static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) +{ + return mas2 & MAS2_ATTRIB_MASK; +} + +/* + * writing shadow tlb entry to host TLB + */ +static inline void __write_host_tlbe(struct tlbe *stlbe) +{ + mtspr(SPRN_MAS1, stlbe->mas1); + mtspr(SPRN_MAS2, stlbe->mas2); + mtspr(SPRN_MAS3, stlbe->mas3); + mtspr(SPRN_MAS7, stlbe->mas7); + __asm__ __volatile__ ("tlbwe\n" : : ); +} + +static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int esel) +{ + struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; + + local_irq_disable(); + if (tlbsel == 0) { + __write_host_tlbe(stlbe); + } else { + unsigned register mas0; + + mas0 = mfspr(SPRN_MAS0); + + mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel))); + __write_host_tlbe(stlbe); + + mtspr(SPRN_MAS0, mas0); + } + local_irq_enable(); +} + +void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int i; + unsigned register mas0; + + /* Load all valid TLB1 entries to reduce guest tlb miss fault */ + local_irq_disable(); + mas0 = mfspr(SPRN_MAS0); + for (i = 0; i < tlb1_max_shadow_size(); i++) { + struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i]; + + if (get_tlb_v(stlbe)) { + mtspr(SPRN_MAS0, MAS0_TLBSEL(1) + | MAS0_ESEL(to_htlb1_esel(i))); + __write_host_tlbe(stlbe); + } + } + mtspr(SPRN_MAS0, mas0); + local_irq_enable(); +} + +void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) +{ + _tlbia(); +} + +/* Search the guest TLB for a matching entry. */ +static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, + gva_t eaddr, int tlbsel, unsigned int pid, int as) +{ + int i; + + /* XXX Replace loop with fancy data structures. */ + for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) { + struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i]; + unsigned int tid; + + if (eaddr < get_tlb_eaddr(tlbe)) + continue; + + if (eaddr > get_tlb_end(tlbe)) + continue; + + tid = get_tlb_tid(tlbe); + if (tid && (tid != pid)) + continue; + + if (!get_tlb_v(tlbe)) + continue; + + if (get_tlb_ts(tlbe) != as && as != -1) + continue; + + return i; + } + + return -1; +} + +static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int esel) +{ + struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; + struct page *page = vcpu_e500->shadow_pages[tlbsel][esel]; + + if (page) { + vcpu_e500->shadow_pages[tlbsel][esel] = NULL; + + if (get_tlb_v(stlbe)) { + if (tlbe_is_writable(stlbe)) + kvm_release_page_dirty(page); + else + kvm_release_page_clean(page); + } + } +} + +static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int esel) +{ + struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; + + kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); + stlbe->mas1 = 0; + KVMTRACE_5D(STLB_INVAL, &vcpu_e500->vcpu, index_of(tlbsel, esel), + stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7, + handler); +} + +static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, + gva_t eaddr, gva_t eend, u32 tid) +{ + unsigned int pid = tid & 0xff; + unsigned int i; + + /* XXX Replace loop with fancy data structures. */ + for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) { + struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i]; + unsigned int tid; + + if (!get_tlb_v(stlbe)) + continue; + + if (eend < get_tlb_eaddr(stlbe)) + continue; + + if (eaddr > get_tlb_end(stlbe)) + continue; + + tid = get_tlb_tid(stlbe); + if (tid && (tid != pid)) + continue; + + kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i); + write_host_tlbe(vcpu_e500, 1, i); + } +} + +static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, + unsigned int eaddr, int as) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + unsigned int victim, pidsel, tsized; + int tlbsel; + + /* since we only have tow TLBs, only lower bit is used. */ + tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; + victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; + pidsel = (vcpu_e500->mas4 >> 16) & 0xf; + tsized = (vcpu_e500->mas4 >> 8) & 0xf; + + vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) + | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); + vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) + | MAS1_TID(vcpu_e500->pid[pidsel]) + | MAS1_TSIZE(tsized); + vcpu_e500->mas2 = (eaddr & MAS2_EPN) + | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK); + vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; + vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1) + | (get_cur_pid(vcpu) << 16) + | (as ? MAS6_SAS : 0); + vcpu_e500->mas7 = 0; +} + +static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, + u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel) +{ + struct page *new_page; + struct tlbe *stlbe; + hpa_t hpaddr; + + stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; + + /* Get reference to new page. */ + new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn); + if (is_error_page(new_page)) { + printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); + kvm_release_page_clean(new_page); + return; + } + hpaddr = page_to_phys(new_page); + + /* Drop reference to old page. */ + kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); + + vcpu_e500->shadow_pages[tlbsel][esel] = new_page; + + /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */ + stlbe->mas1 = MAS1_TSIZE(BOOKE_PAGESZ_4K) + | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; + stlbe->mas2 = (gvaddr & MAS2_EPN) + | e500_shadow_mas2_attrib(gtlbe->mas2, + vcpu_e500->vcpu.arch.msr & MSR_PR); + stlbe->mas3 = (hpaddr & MAS3_RPN) + | e500_shadow_mas3_attrib(gtlbe->mas3, + vcpu_e500->vcpu.arch.msr & MSR_PR); + stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; + + KVMTRACE_5D(STLB_WRITE, &vcpu_e500->vcpu, index_of(tlbsel, esel), + stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7, + handler); +} + +/* XXX only map the one-one case, for now use TLB0 */ +static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int esel) +{ + struct tlbe *gtlbe; + + gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; + + kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), + get_tlb_raddr(gtlbe) >> PAGE_SHIFT, + gtlbe, tlbsel, esel); + + return esel; +} + +/* Caller must ensure that the specified guest TLB entry is safe to insert into + * the shadow TLB. */ +/* XXX for both one-one and one-to-many , for now use TLB1 */ +static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, + u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe) +{ + unsigned int victim; + + victim = vcpu_e500->guest_tlb_nv[1]++; + + if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size())) + vcpu_e500->guest_tlb_nv[1] = 0; + + kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim); + + return victim; +} + +/* Invalidate all guest kernel mappings when enter usermode, + * so that when they fault back in they will get the + * proper permission bits. */ +void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) +{ + if (usermode) { + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int i; + + /* XXX Replace loop with fancy data structures. */ + /* needn't set modified since tlbia will make TLB1 coherent */ + for (i = 0; i < tlb1_max_shadow_size(); i++) + kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i); + + _tlbia(); + } +} + +static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int esel) +{ + struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; + + if (unlikely(get_tlb_iprot(gtlbe))) + return -1; + + if (tlbsel == 1) { + kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe), + get_tlb_end(gtlbe), + get_tlb_tid(gtlbe)); + } else { + kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel); + } + + gtlbe->mas1 = 0; + + return 0; +} + +int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + unsigned int ia; + int esel, tlbsel; + gva_t ea; + + ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb]; + + ia = (ea >> 2) & 0x1; + + /* since we only have tow TLBs, only lower bit is used. */ + tlbsel = (ea >> 3) & 0x1; + + if (ia) { + /* invalidate all entries */ + for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++) + kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); + } else { + ea &= 0xfffff000; + esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, + get_cur_pid(vcpu), -1); + if (esel >= 0) + kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); + } + + _tlbia(); + + return EMULATE_DONE; +} + +int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int tlbsel, esel; + struct tlbe *gtlbe; + + tlbsel = get_tlb_tlbsel(vcpu_e500); + esel = get_tlb_esel(vcpu_e500, tlbsel); + + gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; + vcpu_e500->mas0 &= MAS0_NV(0); + vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); + vcpu_e500->mas1 = gtlbe->mas1; + vcpu_e500->mas2 = gtlbe->mas2; + vcpu_e500->mas3 = gtlbe->mas3; + vcpu_e500->mas7 = gtlbe->mas7; + + return EMULATE_DONE; +} + +int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int as = !!get_cur_sas(vcpu_e500); + unsigned int pid = get_cur_spid(vcpu_e500); + int esel, tlbsel; + struct tlbe *gtlbe = NULL; + gva_t ea; + + ea = vcpu->arch.gpr[rb]; + + for (tlbsel = 0; tlbsel < 2; tlbsel++) { + esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); + if (esel >= 0) { + gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; + break; + } + } + + if (gtlbe) { + vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) + | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); + vcpu_e500->mas1 = gtlbe->mas1; + vcpu_e500->mas2 = gtlbe->mas2; + vcpu_e500->mas3 = gtlbe->mas3; + vcpu_e500->mas7 = gtlbe->mas7; + } else { + int victim; + + /* since we only have tow TLBs, only lower bit is used. */ + tlbsel = vcpu_e500->mas4 >> 28 & 0x1; + victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; + + vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) + | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); + vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0) + | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0)) + | (vcpu_e500->mas4 & MAS4_TSIZED(~0)); + vcpu_e500->mas2 &= MAS2_EPN; + vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK; + vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; + vcpu_e500->mas7 = 0; + } + + return EMULATE_DONE; +} + +int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + u64 eaddr; + u64 raddr; + u32 tid; + struct tlbe *gtlbe; + int tlbsel, esel, stlbsel, sesel; + + tlbsel = get_tlb_tlbsel(vcpu_e500); + esel = get_tlb_esel(vcpu_e500, tlbsel); + + gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; + + if (get_tlb_v(gtlbe) && tlbsel == 1) { + eaddr = get_tlb_eaddr(gtlbe); + tid = get_tlb_tid(gtlbe); + kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr, + get_tlb_end(gtlbe), tid); + } + + gtlbe->mas1 = vcpu_e500->mas1; + gtlbe->mas2 = vcpu_e500->mas2; + gtlbe->mas3 = vcpu_e500->mas3; + gtlbe->mas7 = vcpu_e500->mas7; + + KVMTRACE_5D(GTLB_WRITE, vcpu, vcpu_e500->mas0, + gtlbe->mas1, gtlbe->mas2, gtlbe->mas3, gtlbe->mas7, + handler); + + /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ + if (tlbe_is_host_safe(vcpu, gtlbe)) { + switch (tlbsel) { + case 0: + /* TLB0 */ + gtlbe->mas1 &= ~MAS1_TSIZE(~0); + gtlbe->mas1 |= MAS1_TSIZE(BOOKE_PAGESZ_4K); + + stlbsel = 0; + sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel); + + break; + + case 1: + /* TLB1 */ + eaddr = get_tlb_eaddr(gtlbe); + raddr = get_tlb_raddr(gtlbe); + + /* Create a 4KB mapping on the host. + * If the guest wanted a large page, + * only the first 4KB is mapped here and the rest + * are mapped on the fly. */ + stlbsel = 1; + sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, + raddr >> PAGE_SHIFT, gtlbe); + break; + + default: + BUG(); + } + write_host_tlbe(vcpu_e500, stlbsel, sesel); + } + + return EMULATE_DONE; +} + +int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) +{ + unsigned int as = !!(vcpu->arch.msr & MSR_IS); + + return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); +} + +int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) +{ + unsigned int as = !!(vcpu->arch.msr & MSR_DS); + + return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); +} + +void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) +{ + unsigned int as = !!(vcpu->arch.msr & MSR_IS); + + kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); +} + +void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) +{ + unsigned int as = !!(vcpu->arch.msr & MSR_DS); + + kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); +} + +gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, + gva_t eaddr) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct tlbe *gtlbe = + &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)]; + u64 pgmask = get_tlb_bytes(gtlbe) - 1; + + return get_tlb_raddr(gtlbe) | (eaddr & pgmask); +} + +void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int tlbsel, i; + + for (tlbsel = 0; tlbsel < 2; tlbsel++) + for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) + kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i); + + /* discard all guest mapping */ + _tlbia(); +} + +void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, + unsigned int index) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int tlbsel = tlbsel_of(index); + int esel = esel_of(index); + int stlbsel, sesel; + + switch (tlbsel) { + case 0: + stlbsel = 0; + sesel = esel; + break; + + case 1: { + gfn_t gfn = gpaddr >> PAGE_SHIFT; + struct tlbe *gtlbe + = &vcpu_e500->guest_tlb[tlbsel][esel]; + + stlbsel = 1; + sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe); + break; + } + + default: + BUG(); + break; + } + write_host_tlbe(vcpu_e500, stlbsel, sesel); +} + +int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, + gva_t eaddr, unsigned int pid, int as) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int esel, tlbsel; + + for (tlbsel = 0; tlbsel < 2; tlbsel++) { + esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); + if (esel >= 0) + return index_of(tlbsel, esel); + } + + return -1; +} + +void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + struct tlbe *tlbe; + + /* Insert large initial mapping for guest. */ + tlbe = &vcpu_e500->guest_tlb[1][0]; + tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOKE_PAGESZ_256M); + tlbe->mas2 = 0; + tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; + tlbe->mas7 = 0; + + /* 4K map for serial output. Used by kernel wrapper. */ + tlbe = &vcpu_e500->guest_tlb[1][1]; + tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOKE_PAGESZ_4K); + tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; + tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; + tlbe->mas7 = 0; +} + +int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF; + + vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE; + vcpu_e500->guest_tlb[0] = + kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL); + if (vcpu_e500->guest_tlb[0] == NULL) + goto err_out; + + vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE; + vcpu_e500->shadow_tlb[0] = + kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL); + if (vcpu_e500->shadow_tlb[0] == NULL) + goto err_out_guest0; + + vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE; + vcpu_e500->guest_tlb[1] = + kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL); + if (vcpu_e500->guest_tlb[1] == NULL) + goto err_out_shadow0; + + vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num; + vcpu_e500->shadow_tlb[1] = + kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL); + if (vcpu_e500->shadow_tlb[1] == NULL) + goto err_out_guest1; + + vcpu_e500->shadow_pages[0] = (struct page **) + kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL); + if (vcpu_e500->shadow_pages[0] == NULL) + goto err_out_shadow1; + + vcpu_e500->shadow_pages[1] = (struct page **) + kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL); + if (vcpu_e500->shadow_pages[1] == NULL) + goto err_out_page0; + + return 0; + +err_out_page0: + kfree(vcpu_e500->shadow_pages[0]); +err_out_shadow1: + kfree(vcpu_e500->shadow_tlb[1]); +err_out_guest1: + kfree(vcpu_e500->guest_tlb[1]); +err_out_shadow0: + kfree(vcpu_e500->shadow_tlb[0]); +err_out_guest0: + kfree(vcpu_e500->guest_tlb[0]); +err_out: + return -1; +} + +void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + kfree(vcpu_e500->shadow_pages[1]); + kfree(vcpu_e500->shadow_pages[0]); + kfree(vcpu_e500->shadow_tlb[1]); + kfree(vcpu_e500->guest_tlb[1]); + kfree(vcpu_e500->shadow_tlb[0]); + kfree(vcpu_e500->guest_tlb[0]); +} diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h new file mode 100644 index 00000000000..d8833f95516 --- /dev/null +++ b/arch/powerpc/kvm/e500_tlb.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, yu.liu@freescale.com + * + * Description: + * This file is based on arch/powerpc/kvm/44x_tlb.h, + * by Hollis Blanchard . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#ifndef __KVM_E500_TLB_H__ +#define __KVM_E500_TLB_H__ + +#include +#include +#include +#include + +#define KVM_E500_TLB0_WAY_SIZE_BIT 7 /* Fixed */ +#define KVM_E500_TLB0_WAY_SIZE (1UL << KVM_E500_TLB0_WAY_SIZE_BIT) +#define KVM_E500_TLB0_WAY_SIZE_MASK (KVM_E500_TLB0_WAY_SIZE - 1) + +#define KVM_E500_TLB0_WAY_NUM_BIT 1 /* No greater than 7 */ +#define KVM_E500_TLB0_WAY_NUM (1UL << KVM_E500_TLB0_WAY_NUM_BIT) +#define KVM_E500_TLB0_WAY_NUM_MASK (KVM_E500_TLB0_WAY_NUM - 1) + +#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM) +#define KVM_E500_TLB1_SIZE 16 + +#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF)) +#define tlbsel_of(index) ((index) >> 16) +#define esel_of(index) ((index) & 0xFFFF) + +#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) +#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) +#define MAS2_ATTRIB_MASK \ + (MAS2_X0 | MAS2_X1 | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E) +#define MAS3_ATTRIB_MASK \ + (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ + | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) + +extern void kvmppc_dump_tlbs(struct kvm_vcpu *); +extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *); +extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *); +extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int); +extern int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *, int); +extern int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int); +extern void kvmppc_e500_tlb_put(struct kvm_vcpu *); +extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int); +extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *); +extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *); +extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *); + +/* TLB helper functions */ +static inline unsigned int get_tlb_size(const struct tlbe *tlbe) +{ + return (tlbe->mas1 >> 8) & 0xf; +} + +static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) +{ + return tlbe->mas2 & 0xfffff000; +} + +static inline u64 get_tlb_bytes(const struct tlbe *tlbe) +{ + unsigned int pgsize = get_tlb_size(tlbe); + return 1ULL << 10 << (pgsize << 1); +} + +static inline gva_t get_tlb_end(const struct tlbe *tlbe) +{ + u64 bytes = get_tlb_bytes(tlbe); + return get_tlb_eaddr(tlbe) + bytes - 1; +} + +static inline u64 get_tlb_raddr(const struct tlbe *tlbe) +{ + u64 rpn = tlbe->mas7; + return (rpn << 32) | (tlbe->mas3 & 0xfffff000); +} + +static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) +{ + return (tlbe->mas1 >> 16) & 0xff; +} + +static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) +{ + return (tlbe->mas1 >> 12) & 0x1; +} + +static inline unsigned int get_tlb_v(const struct tlbe *tlbe) +{ + return (tlbe->mas1 >> 31) & 0x1; +} + +static inline unsigned int get_tlb_iprot(const struct tlbe *tlbe) +{ + return (tlbe->mas1 >> 30) & 0x1; +} + +static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.pid & 0xff; +} + +static inline unsigned int get_cur_spid( + const struct kvmppc_vcpu_e500 *vcpu_e500) +{ + return (vcpu_e500->mas6 >> 16) & 0xff; +} + +static inline unsigned int get_cur_sas( + const struct kvmppc_vcpu_e500 *vcpu_e500) +{ + return vcpu_e500->mas6 & 0x1; +} + +static inline unsigned int get_tlb_tlbsel( + const struct kvmppc_vcpu_e500 *vcpu_e500) +{ + /* + * Manual says that tlbsel has 2 bits wide. + * Since we only have tow TLBs, only lower bit is used. + */ + return (vcpu_e500->mas0 >> 28) & 0x1; +} + +static inline unsigned int get_tlb_nv_bit( + const struct kvmppc_vcpu_e500 *vcpu_e500) +{ + return vcpu_e500->mas0 & 0xfff; +} + +static inline unsigned int get_tlb_esel_bit( + const struct kvmppc_vcpu_e500 *vcpu_e500) +{ + return (vcpu_e500->mas0 >> 16) & 0xfff; +} + +static inline unsigned int get_tlb_esel( + const struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel) +{ + unsigned int esel = get_tlb_esel_bit(vcpu_e500); + + if (tlbsel == 0) { + esel &= KVM_E500_TLB0_WAY_NUM_MASK; + esel |= ((vcpu_e500->mas2 >> 12) & KVM_E500_TLB0_WAY_SIZE_MASK) + << KVM_E500_TLB0_WAY_NUM_BIT; + } else { + esel &= KVM_E500_TLB1_SIZE - 1; + } + + return esel; +} + +static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, + const struct tlbe *tlbe) +{ + gpa_t gpa; + + if (!get_tlb_v(tlbe)) + return 0; + + /* Does it match current guest AS? */ + /* XXX what about IS != DS? */ + if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) + return 0; + + gpa = get_tlb_raddr(tlbe); + if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) + /* Mapping is not for RAM. */ + return 0; + + return 1; +} + +#endif /* __KVM_E500_TLB_H__ */ -- cgit v1.2.3 From b52a638c391c5c7b013180f5374274698b8535c8 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:11 -0600 Subject: KVM: ppc: Add kvmppc_mmu_dtlb/itlb_miss for booke When itlb or dtlb miss happens, E500 needs to update some mmu registers. So that the auto-load mechanism can work on E500 when write a tlb entry. Signed-off-by: Liu Yu Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_ppc.h | 2 ++ arch/powerpc/kvm/44x_tlb.c | 8 ++++++++ arch/powerpc/kvm/booke.c | 2 ++ 3 files changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 5e80a20f32b..6052779dbb5 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -63,6 +63,8 @@ extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, gva_t eaddr); +extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); +extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id); diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index e67b7313ffc..4a16f472cc1 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -232,6 +232,14 @@ int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); } +void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) +{ +} + +void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) +{ +} + static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, unsigned int stlb_index) { diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index a73b3959e41..933c406c791 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -295,6 +295,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.esr = vcpu->arch.fault_esr; + kvmppc_mmu_dtlb_miss(vcpu); kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); r = RESUME_GUEST; break; @@ -337,6 +338,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, if (gtlb_index < 0) { /* The guest didn't have a mapping for it. */ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); + kvmppc_mmu_itlb_miss(vcpu); kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); break; } -- cgit v1.2.3 From bdc89f13ec955c14777d5caf02dfca3f51d639bd Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:12 -0600 Subject: KVM: ppc: distinguish between interrupts and priorities Although BOOKE_MAX_INTERRUPT has the right value, the meaning is not match. Signed-off-by: Liu Yu Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/booke.c | 2 +- arch/powerpc/kvm/booke.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 933c406c791..f192fbee2f2 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -163,7 +163,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) unsigned int priority; priority = __ffs(*pending); - while (priority <= BOOKE_MAX_INTERRUPT) { + while (priority <= BOOKE_IRQPRIO_MAX) { if (kvmppc_booke_irqprio_deliver(vcpu, priority)) break; diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 311fdbc23fb..7ceeb3e739b 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -42,6 +42,7 @@ #define BOOKE_IRQPRIO_EXTERNAL 13 #define BOOKE_IRQPRIO_FIT 14 #define BOOKE_IRQPRIO_DECREMENTER 15 +#define BOOKE_IRQPRIO_MAX 15 /* Helper function for "full" MSR writes. No need to call this if only EE is * changing. */ -- cgit v1.2.3 From bb3a8a178dec1e46df3138a30f76acf67fe12397 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sat, 3 Jan 2009 16:23:13 -0600 Subject: KVM: ppc: Add extra E500 exceptions e500 has additional interrupt vectors (and corresponding IVORs) for SPE and performance monitoring interrupts. Signed-off-by: Liu Yu Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_asm.h | 7 ++++++- arch/powerpc/include/asm/kvm_host.h | 2 +- arch/powerpc/kvm/booke.c | 18 ++++++++++++++++++ arch/powerpc/kvm/booke.h | 30 ++++++++++++++++++------------ arch/powerpc/kvm/booke_interrupts.S | 3 +++ arch/powerpc/kvm/e500.c | 20 +++++++++++++++++++- arch/powerpc/kvm/e500_emulate.c | 27 +++++++++++++++++++++++++++ 7 files changed, 92 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 2197764796d..56bfae59837 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -42,7 +42,12 @@ #define BOOKE_INTERRUPT_DTLB_MISS 13 #define BOOKE_INTERRUPT_ITLB_MISS 14 #define BOOKE_INTERRUPT_DEBUG 15 -#define BOOKE_MAX_INTERRUPT 15 + +/* E500 */ +#define BOOKE_INTERRUPT_SPE_UNAVAIL 32 +#define BOOKE_INTERRUPT_SPE_FP_DATA 33 +#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 +#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 50e5ce1b400..1c618769752 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -150,7 +150,7 @@ struct kvm_vcpu_arch { u32 tbu; u32 tcr; u32 tsr; - u32 ivor[16]; + u32 ivor[64]; ulong ivpr; u32 pir; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index f192fbee2f2..642e4204cf2 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -118,6 +118,9 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, case BOOKE_IRQPRIO_DATA_STORAGE: case BOOKE_IRQPRIO_INST_STORAGE: case BOOKE_IRQPRIO_FP_UNAVAIL: + case BOOKE_IRQPRIO_SPE_UNAVAIL: + case BOOKE_IRQPRIO_SPE_FP_DATA: + case BOOKE_IRQPRIO_SPE_FP_ROUND: case BOOKE_IRQPRIO_AP_UNAVAIL: case BOOKE_IRQPRIO_ALIGNMENT: allowed = 1; @@ -261,6 +264,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; + case BOOKE_INTERRUPT_SPE_UNAVAIL: + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL); + r = RESUME_GUEST; + break; + + case BOOKE_INTERRUPT_SPE_FP_DATA: + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); + r = RESUME_GUEST; + break; + + case BOOKE_INTERRUPT_SPE_FP_ROUND: + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); + r = RESUME_GUEST; + break; + case BOOKE_INTERRUPT_DATA_STORAGE: vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.esr = vcpu->arch.fault_esr; diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 7ceeb3e739b..d59bcca1f9d 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -31,18 +31,24 @@ #define BOOKE_IRQPRIO_ALIGNMENT 2 #define BOOKE_IRQPRIO_PROGRAM 3 #define BOOKE_IRQPRIO_FP_UNAVAIL 4 -#define BOOKE_IRQPRIO_SYSCALL 5 -#define BOOKE_IRQPRIO_AP_UNAVAIL 6 -#define BOOKE_IRQPRIO_DTLB_MISS 7 -#define BOOKE_IRQPRIO_ITLB_MISS 8 -#define BOOKE_IRQPRIO_MACHINE_CHECK 9 -#define BOOKE_IRQPRIO_DEBUG 10 -#define BOOKE_IRQPRIO_CRITICAL 11 -#define BOOKE_IRQPRIO_WATCHDOG 12 -#define BOOKE_IRQPRIO_EXTERNAL 13 -#define BOOKE_IRQPRIO_FIT 14 -#define BOOKE_IRQPRIO_DECREMENTER 15 -#define BOOKE_IRQPRIO_MAX 15 +#define BOOKE_IRQPRIO_SPE_UNAVAIL 5 +#define BOOKE_IRQPRIO_SPE_FP_DATA 6 +#define BOOKE_IRQPRIO_SPE_FP_ROUND 7 +#define BOOKE_IRQPRIO_SYSCALL 8 +#define BOOKE_IRQPRIO_AP_UNAVAIL 9 +#define BOOKE_IRQPRIO_DTLB_MISS 10 +#define BOOKE_IRQPRIO_ITLB_MISS 11 +#define BOOKE_IRQPRIO_MACHINE_CHECK 12 +#define BOOKE_IRQPRIO_DEBUG 13 +#define BOOKE_IRQPRIO_CRITICAL 14 +#define BOOKE_IRQPRIO_WATCHDOG 15 +#define BOOKE_IRQPRIO_EXTERNAL 16 +#define BOOKE_IRQPRIO_FIT 17 +#define BOOKE_IRQPRIO_DECREMENTER 18 +#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 +#define BOOKE_IRQPRIO_MAX 19 + +extern unsigned long kvmppc_booke_handlers; /* Helper function for "full" MSR writes. No need to call this if only EE is * changing. */ diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 4679ec287e6..d0c6f841bbd 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -86,6 +86,9 @@ KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS KVM_HANDLER BOOKE_INTERRUPT_DEBUG +KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL +KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA +KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND _GLOBAL(kvmppc_handler_len) .long kvmppc_handler_1 - kvmppc_handler_0 diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 7992da497cd..d8067fd81cd 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -21,6 +21,7 @@ #include #include +#include "booke.h" #include "e500_tlb.h" void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) @@ -133,12 +134,29 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) static int kvmppc_e500_init(void) { - int r; + int r, i; + unsigned long ivor[3]; + unsigned long max_ivor = 0; r = kvmppc_booke_init(); if (r) return r; + /* copy extra E500 exception handlers */ + ivor[0] = mfspr(SPRN_IVOR32); + ivor[1] = mfspr(SPRN_IVOR33); + ivor[2] = mfspr(SPRN_IVOR34); + for (i = 0; i < 3; i++) { + if (ivor[i] > max_ivor) + max_ivor = ivor[i]; + + memcpy((void *)kvmppc_booke_handlers + ivor[i], + kvmppc_handlers_start + (i + 16) * kvmppc_handler_len, + kvmppc_handler_len); + } + flush_icache_range(kvmppc_booke_handlers, + kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); + return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE); } diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index a47f44cd2cf..d3c0c7c7f20 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -107,6 +107,20 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_HID1: vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break; + /* extra exceptions */ + case SPRN_IVOR32: + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR33: + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR34: + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs]; + break; + case SPRN_IVOR35: + vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs]; + break; + default: emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); } @@ -160,6 +174,19 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) case SPRN_HID1: vcpu->arch.gpr[rt] = vcpu_e500->hid1; break; + /* extra exceptions */ + case SPRN_IVOR32: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; + break; + case SPRN_IVOR33: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; + break; + case SPRN_IVOR34: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; + break; + case SPRN_IVOR35: + vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; + break; default: emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); } -- cgit v1.2.3 From 1872a3f411ffe95c8e92300e0986a3532db55ce9 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 23:26:52 +0200 Subject: KVM: VMX: Fix guest state validity checks The vmx guest state validity checks are full of bugs. Make them conform to the manual. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3312047664a..be9441056ff 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1784,14 +1784,16 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs_rpl = cs.selector & SELECTOR_RPL_MASK; + if (cs.unusable) + return false; if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; - if (!(~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK))) { + if (cs.type & AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; - } else if (cs.type & AR_TYPE_CODE_MASK) { + } else { if (cs.dpl != cs_rpl) return false; } @@ -1810,7 +1812,9 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); ss_rpl = ss.selector & SELECTOR_RPL_MASK; - if ((ss.type != 3) || (ss.type != 7)) + if (ss.unusable) + return true; + if (ss.type != 3 && ss.type != 7) return false; if (!ss.s) return false; @@ -1830,6 +1834,8 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) vmx_get_segment(vcpu, &var, seg); rpl = var.selector & SELECTOR_RPL_MASK; + if (var.unusable) + return true; if (!var.s) return false; if (!var.present) @@ -1851,9 +1857,11 @@ static bool tr_valid(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); + if (tr.unusable) + return false; if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; - if ((tr.type != 3) || (tr.type != 11)) /* TODO: Check if guest is in IA32e mode */ + if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ return false; if (!tr.present) return false; @@ -1867,6 +1875,8 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); + if (ldtr.unusable) + return true; if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) -- cgit v1.2.3 From 9fd4a3b7a412f983696b23121413a79d2132fed6 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 23:43:42 +0200 Subject: KVM: VMX: don't clobber segment AR if emulating invalid state The ususable bit is important for determining state validity; don't clobber it. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index be9441056ff..a6598cbaa00 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1649,7 +1649,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, var->limit = vmcs_read32(sf->limit); var->selector = vmcs_read16(sf->selector); ar = vmcs_read32(sf->ar_bytes); - if (ar & AR_UNUSABLE_MASK) + if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) ar = 0; var->type = ar & 15; var->s = (ar >> 4) & 1; -- cgit v1.2.3 From 10f32d84c750ccf8c0afb3a4ea9d4059aa3e9ffc Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 00:53:19 +0200 Subject: KVM: VMX: Prevent exit handler from running if emulating due to invalid state If we've just emulated an instruction, we won't have any valid exit reason and associated information. Fix by moving the clearing of the emulation_required flag to the exit handler. This way the exit handler can notice that we've been emulating and abort early. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a6598cbaa00..a309be6788e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3130,7 +3130,6 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { - struct vcpu_vmx *vmx = to_vmx(vcpu); int err; preempt_enable(); @@ -3155,11 +3154,6 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, local_irq_disable(); preempt_disable(); - - /* Guest state should be valid now except if we need to - * emulate an MMIO */ - if (guest_state_valid(vcpu)) - vmx->emulation_required = 0; } /* @@ -3208,8 +3202,11 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* If we need to emulate an MMIO from handle_invalid_guest_state * we just return 0 */ - if (vmx->emulation_required && emulate_invalid_guest_state) + if (vmx->emulation_required && emulate_invalid_guest_state) { + if (guest_state_valid(vcpu)) + vmx->emulation_required = 0; return 0; + } /* Access CR3 don't cause VMExit in paging mode, so we need * to sync with guest real CR3. */ -- cgit v1.2.3 From 350f69dcd169d536307aa4a8c38c480e3a51c0db Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 11:12:40 +0200 Subject: KVM: x86 emulator: Make emulate_pop() a little more generic Allow emulate_pop() to read into arbitrary memory rather than just the source operand. Needed for complicated instructions like far returns. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 54fb09889a8..94459f313f1 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1136,18 +1136,19 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt) } static int emulate_pop(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) + struct x86_emulate_ops *ops, + void *dest, int len) { struct decode_cache *c = &ctxt->decode; int rc; rc = ops->read_emulated(register_address(c, ss_base(ctxt), c->regs[VCPU_REGS_RSP]), - &c->src.val, c->src.bytes, ctxt->vcpu); + dest, len, ctxt->vcpu); if (rc != 0) return rc; - register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.bytes); + register_address_increment(c, &c->regs[VCPU_REGS_RSP], len); return rc; } @@ -1157,11 +1158,9 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, struct decode_cache *c = &ctxt->decode; int rc; - c->src.bytes = c->dst.bytes; - rc = emulate_pop(ctxt, ops); + rc = emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes); if (rc != 0) return rc; - c->dst.val = c->src.val; return 0; } @@ -1467,11 +1466,9 @@ special_insn: break; case 0x58 ... 0x5f: /* pop reg */ pop_instruction: - c->src.bytes = c->op_bytes; - rc = emulate_pop(ctxt, ops); + rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes); if (rc != 0) goto done; - c->dst.val = c->src.val; break; case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) -- cgit v1.2.3 From 8b3079a5c0c031de07c8390aa160a4229088274f Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 12:10:54 +0200 Subject: KVM: VMX: When emulating on invalid vmx state, don't return to userspace unnecessarily If we aren't doing mmio there's no need to exit to userspace (which will just be confused). Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a309be6788e..df454de8acf 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -91,6 +91,7 @@ struct vcpu_vmx { } rmode; int vpid; bool emulation_required; + enum emulation_result invalid_state_emulation_result; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; @@ -3130,7 +3131,8 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { - int err; + struct vcpu_vmx *vmx = to_vmx(vcpu); + enum emulation_result err = EMULATE_DONE; preempt_enable(); local_irq_enable(); @@ -3154,6 +3156,8 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, local_irq_disable(); preempt_disable(); + + vmx->invalid_state_emulation_result = err; } /* @@ -3205,7 +3209,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if (vmx->emulation_required && emulate_invalid_guest_state) { if (guest_state_valid(vcpu)) vmx->emulation_required = 0; - return 0; + return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO; } /* Access CR3 don't cause VMExit in paging mode, so we need -- cgit v1.2.3 From a77ab5ead5c1fef2c6c5a9b3cf3765e52643a2aa Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 13:27:34 +0200 Subject: KVM: x86 emulator: implement 'ret far' instruction (opcode 0xcb) Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 94459f313f1..ca91749d208 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -178,7 +178,7 @@ static u32 opcode_table[256] = { 0, ImplicitOps | Stack, 0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov, /* 0xC8 - 0xCF */ - 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, ImplicitOps | Stack, 0, 0, 0, 0, /* 0xD0 - 0xD7 */ ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, @@ -1278,6 +1278,25 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, return 0; } +static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) +{ + struct decode_cache *c = &ctxt->decode; + int rc; + unsigned long cs; + + rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes); + if (rc) + return rc; + if (c->op_bytes == 4) + c->eip = (u32)c->eip; + rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); + if (rc) + return rc; + rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS); + return rc; +} + static inline int writeback(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { @@ -1735,6 +1754,11 @@ special_insn: mov: c->dst.val = c->src.val; break; + case 0xcb: /* ret far */ + rc = emulate_ret_far(ctxt, ops); + if (rc) + goto done; + break; case 0xd0 ... 0xd1: /* Grp2 */ c->src.val = 1; emulate_grp2(ctxt); -- cgit v1.2.3 From 269e05e48502f1cc06802e9fba90f5100dd6bb0d Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 15:21:42 +0200 Subject: KVM: Properly lock PIT creation Otherwise, two threads can create a PIT in parallel and cause a memory leak. Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 2 -- arch/x86/kvm/x86.c | 6 ++++++ 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 528daadeba4..69d1bbff3fd 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -548,9 +548,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) if (!pit) return NULL; - mutex_lock(&kvm->lock); pit->irq_source_id = kvm_request_irq_source_id(kvm); - mutex_unlock(&kvm->lock); if (pit->irq_source_id < 0) { kfree(pit); return NULL; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a1f14611f4b..6fbc3460337 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1837,10 +1837,16 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; break; case KVM_CREATE_PIT: + mutex_lock(&kvm->lock); + r = -EEXIST; + if (kvm->arch.vpit) + goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm); if (kvm->arch.vpit) r = 0; + create_pit_unlock: + mutex_unlock(&kvm->lock); break; case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; -- cgit v1.2.3 From f5d0906b5bafd7faea553ed1cc92bd06755b66b9 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Sun, 4 Jan 2009 13:51:09 -0600 Subject: KVM: ppc: remove debug support broken by KVM debug rewrite After the rewrite of KVM's debug support, this code doesn't even build any more. Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 5 --- arch/powerpc/include/asm/kvm_ppc.h | 3 -- arch/powerpc/kvm/44x.c | 66 ------------------------------------- arch/powerpc/kvm/powerpc.c | 27 ++------------- 4 files changed, 2 insertions(+), 99 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1c618769752..dfdf13c9fef 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -111,11 +111,6 @@ struct kvm_arch { struct kvm_vcpu_arch { u32 host_stack; u32 host_pid; - u32 host_dbcr0; - u32 host_dbcr1; - u32 host_dbcr2; - u32 host_iac[4]; - u32 host_msr; u64 fpr[32]; ulong gpr[32]; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 6052779dbb5..2c6ee349df5 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -77,9 +77,6 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); -extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu); -extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu); - extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 8383603dd8a..0cef809cec2 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c @@ -28,72 +28,6 @@ #include "44x_tlb.h" -/* Note: clearing MSR[DE] just means that the debug interrupt will not be - * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. - * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt - * will be delivered as an "imprecise debug event" (which is indicated by - * DBSR[IDE]. - */ -static void kvm44x_disable_debug_interrupts(void) -{ - mtmsr(mfmsr() & ~MSR_DE); -} - -void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) -{ - kvm44x_disable_debug_interrupts(); - - mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); - mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); - mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); - mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); - mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); - mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); - mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); - mtmsr(vcpu->arch.host_msr); -} - -void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) -{ - struct kvm_guest_debug *dbg = &vcpu->guest_debug; - u32 dbcr0 = 0; - - vcpu->arch.host_msr = mfmsr(); - kvm44x_disable_debug_interrupts(); - - /* Save host debug register state. */ - vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); - vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); - vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); - vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); - vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); - vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); - vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); - - /* set registers up for guest */ - - if (dbg->bp[0]) { - mtspr(SPRN_IAC1, dbg->bp[0]); - dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; - } - if (dbg->bp[1]) { - mtspr(SPRN_IAC2, dbg->bp[1]); - dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; - } - if (dbg->bp[2]) { - mtspr(SPRN_IAC3, dbg->bp[2]); - dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; - } - if (dbg->bp[3]) { - mtspr(SPRN_IAC4, dbg->bp[3]); - dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; - } - - mtspr(SPRN_DBCR0, dbcr0); - mtspr(SPRN_DBCR1, 0); - mtspr(SPRN_DBCR2, 0); -} - void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { kvmppc_44x_tlb_load(vcpu); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f30be9ef231..9057335fdc6 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -221,41 +221,18 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - if (vcpu->guest_debug.enabled) - kvmppc_core_load_guest_debugstate(vcpu); - kvmppc_core_vcpu_load(vcpu, cpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - if (vcpu->guest_debug.enabled) - kvmppc_core_load_host_debugstate(vcpu); - - /* Don't leave guest TLB entries resident when being de-scheduled. */ - /* XXX It would be nice to differentiate between heavyweight exit and - * sched_out here, since we could avoid the TLB flush for heavyweight - * exits. */ - _tlbil_all(); kvmppc_core_vcpu_put(vcpu); } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) + struct kvm_guest_debug *dbg) { - int i; - - vcpu->guest_debug.enabled = dbg->enabled; - if (vcpu->guest_debug.enabled) { - for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) { - if (dbg->breakpoints[i].enabled) - vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; - else - vcpu->guest_debug.bp[i] = 0; - } - } - - return 0; + return -EINVAL; } static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, -- cgit v1.2.3 From c8a73f186bf62235b6fb5dd52601d641917dd50b Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 5 Jan 2009 16:02:47 +0100 Subject: KVM: SVM: Add microcode patch level dummy VMware ESX checks if the microcode level is correct when using a barcelona CPU, in order to see if it actually can use SVM. Let's tell it we're on the safe side... Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b3a7a314d55..3a60c3f04e6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1928,6 +1928,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) case MSR_VM_CR: *data = 0; break; + case MSR_IA32_UCODE_REV: + *data = 0x01000065; + break; default: return kvm_get_msr_common(vcpu, ecx, data); } -- cgit v1.2.3 From 4677a3b693e035f186e2875259b9a0bb94c42fbe Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 6 Jan 2009 13:00:27 +0200 Subject: KVM: MMU: Optimize page unshadowing Using kvm_mmu_lookup_page() will result in multiple scans of the hash chains; use hlist_for_each_entry_safe() to achieve a single scan instead. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 15850809b55..aac0499947d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1471,11 +1471,20 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) { + unsigned index; + struct hlist_head *bucket; struct kvm_mmu_page *sp; + struct hlist_node *node, *nn; - while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { - pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); - kvm_mmu_zap_page(kvm, sp); + index = kvm_page_table_hashfn(gfn); + bucket = &kvm->arch.mmu_page_hash[index]; + hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { + if (sp->gfn == gfn && !sp->role.metaphysical + && !sp->role.invalid) { + pgprintk("%s: zap %lx %x\n", + __func__, gfn, sp->role.word); + kvm_mmu_zap_page(kvm, sp); + } } } -- cgit v1.2.3 From 5d9b8e30f543a9f21a968a4cda71e8f6d1c66a61 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 18:04:18 +0200 Subject: KVM: Add CONFIG_HAVE_KVM_IRQCHIP Two KVM archs support irqchips and two don't. Add a Kconfig item to make selecting between the two models easier. Signed-off-by: Avi Kivity --- arch/ia64/kvm/Kconfig | 4 ++++ arch/powerpc/kvm/Kconfig | 3 +++ arch/s390/kvm/Kconfig | 3 +++ arch/x86/kvm/Kconfig | 4 ++++ 4 files changed, 14 insertions(+) (limited to 'arch') diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index f833a0b4188..0a2d6b86075 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -4,6 +4,10 @@ config HAVE_KVM bool +config HAVE_KVM_IRQCHIP + bool + default y + menuconfig VIRTUALIZATION bool "Virtualization" depends on HAVE_KVM || IA64 diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 14657055014..5a152a52796 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -2,6 +2,9 @@ # KVM configuration # +config HAVE_KVM_IRQCHIP + bool + menuconfig VIRTUALIZATION bool "Virtualization" ---help--- diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index e051cad1f1e..3e260b7e37b 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -4,6 +4,9 @@ config HAVE_KVM bool +config HAVE_KVM_IRQCHIP + bool + menuconfig VIRTUALIZATION bool "Virtualization" default y diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index b81125f0bde..0a303c3ed11 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -4,6 +4,10 @@ config HAVE_KVM bool +config HAVE_KVM_IRQCHIP + bool + default y + menuconfig VIRTUALIZATION bool "Virtualization" depends on HAVE_KVM || X86 -- cgit v1.2.3 From 4780c65904f0fc4e312ee2da9383eacbe04e61ea Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 18:06:06 +0200 Subject: KVM: Reset PIT irq injection logic when the PIT IRQ is unmasked While the PIT is masked the guest cannot ack the irq, so the reinject logic will never allow the interrupt to be injected. Fix by resetting the reinjection counters on unmask. Unbreaks Xen. Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 15 +++++++++++++++ arch/x86/kvm/i8254.h | 1 + 2 files changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 69d1bbff3fd..c13bb92d315 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -539,6 +539,16 @@ void kvm_pit_reset(struct kvm_pit *pit) pit->pit_state.irq_ack = 1; } +static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) +{ + struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); + + if (!mask) { + atomic_set(&pit->pit_state.pit_timer.pending, 0); + pit->pit_state.irq_ack = 1; + } +} + struct kvm_pit *kvm_create_pit(struct kvm *kvm) { struct kvm_pit *pit; @@ -586,6 +596,9 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) kvm_pit_reset(pit); + pit->mask_notifier.func = pit_mask_notifer; + kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); + return pit; } @@ -594,6 +607,8 @@ void kvm_free_pit(struct kvm *kvm) struct hrtimer *timer; if (kvm->arch.vpit) { + kvm_unregister_irq_mask_notifier(kvm, 0, + &kvm->arch.vpit->mask_notifier); mutex_lock(&kvm->arch.vpit->pit_state.lock); timer = &kvm->arch.vpit->pit_state.pit_timer.timer; hrtimer_cancel(timer); diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index 76959c4b500..6acbe4b505d 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -46,6 +46,7 @@ struct kvm_pit { struct kvm *kvm; struct kvm_kpit_state pit_state; int irq_source_id; + struct kvm_irq_mask_notifier mask_notifier; }; #define KVM_PIT_BASE_ADDRESS 0x40 -- cgit v1.2.3 From a26b73ad5e763db1b01cca6965ac3b65fe36e82a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 8 Jan 2009 13:58:48 +0100 Subject: KVM: ia64: expose registers in struct kvm_regs Provide register layout for struct kvm_regs exposed to userland. Signed-off-by: Jes Sorensen Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm.h | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index b5145784233..0ee5bd7a988 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -166,7 +166,40 @@ struct saved_vpd { unsigned long vcpuid[5]; unsigned long vpsr; unsigned long vpr; - unsigned long vcr[128]; + union { + unsigned long vcr[128]; + struct { + unsigned long dcr; + unsigned long itm; + unsigned long iva; + unsigned long rsv1[5]; + unsigned long pta; + unsigned long rsv2[7]; + unsigned long ipsr; + unsigned long isr; + unsigned long rsv3; + unsigned long iip; + unsigned long ifa; + unsigned long itir; + unsigned long iipa; + unsigned long ifs; + unsigned long iim; + unsigned long iha; + unsigned long rsv4[38]; + unsigned long lid; + unsigned long ivr; + unsigned long tpr; + unsigned long eoi; + unsigned long irr[4]; + unsigned long itv; + unsigned long pmv; + unsigned long cmcv; + unsigned long rsv5[5]; + unsigned long lrr0; + unsigned long lrr1; + unsigned long rsv6[46]; + }; + }; }; struct kvm_regs { -- cgit v1.2.3 From ff81ff10b4417952919dcc0bd67effa9ceb411c0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 8 Jan 2009 11:05:17 -0800 Subject: KVM: SVM: Fix typo in has_svm() Signed-off-by: Joe Perches Acked-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 3a60c3f04e6..db5021b2b5a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -250,7 +250,7 @@ static int has_svm(void) const char *msg; if (!cpu_has_svm(&msg)) { - printk(KERN_INFO "has_svn: %s\n", msg); + printk(KERN_INFO "has_svm: %s\n", msg); return 0; } -- cgit v1.2.3 From 9903a927a4aea4b1ea42356a8453fca664df0b18 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Thu, 8 Jan 2009 17:44:19 -0200 Subject: KVM: MMU: drop zeroing on mmu_memory_cache_alloc Zeroing on mmu_memory_cache_alloc is unnecessary since: - Smaller areas are pre-allocated with kmem_cache_zalloc. - Page pointed by ->spt is overwritten with prefetch_page and entries in page pointed by ->gfns are initialized before reading. [avi: zeroing pages is unnecessary] Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index aac0499947d..de9a9fbc16e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -352,7 +352,6 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, BUG_ON(!mc->nobjs); p = mc->objects[--mc->nobjs]; - memset(p, 0, size); return p; } -- cgit v1.2.3 From f6e2c02b6d28ddabe99377c5640a833407a62632 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 11 Jan 2009 13:02:10 +0200 Subject: KVM: MMU: Rename "metaphysical" attribute to "direct" This actually describes what is going on, rather than alerting the reader that something strange is going on. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 5 +++-- arch/x86/kvm/mmu.c | 32 ++++++++++++++++---------------- arch/x86/kvm/paging_tmpl.h | 12 ++++++------ 3 files changed, 25 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 863ea73431a..55fd4c5fd38 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -170,7 +170,8 @@ struct kvm_pte_chain { * bits 0:3 - total guest paging levels (2-4, or zero for real mode) * bits 4:7 - page table level for this shadow (1-4) * bits 8:9 - page table quadrant for 2-level guests - * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) + * bit 16 - direct mapping of virtual to physical mapping at gfn + * used for real mode and two-dimensional paging * bits 17:19 - common access permissions for all ptes in this shadow page */ union kvm_mmu_page_role { @@ -180,7 +181,7 @@ union kvm_mmu_page_role { unsigned level:4; unsigned quadrant:2; unsigned pad_for_nice_hex_output:6; - unsigned metaphysical:1; + unsigned direct:1; unsigned access:3; unsigned invalid:1; unsigned cr4_pge:1; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index de9a9fbc16e..ef060ec444a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1066,7 +1066,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) index = kvm_page_table_hashfn(gfn); bucket = &kvm->arch.mmu_page_hash[index]; hlist_for_each_entry(sp, node, bucket, hash_link) - if (sp->gfn == gfn && !sp->role.metaphysical + if (sp->gfn == gfn && !sp->role.direct && !sp->role.invalid) { pgprintk("%s: found role %x\n", __func__, sp->role.word); @@ -1200,7 +1200,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gaddr, unsigned level, - int metaphysical, + int direct, unsigned access, u64 *parent_pte) { @@ -1213,7 +1213,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, role = vcpu->arch.mmu.base_role; role.level = level; - role.metaphysical = metaphysical; + role.direct = direct; role.access = access; if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); @@ -1250,7 +1250,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, sp->role = role; sp->global = role.cr4_pge; hlist_add_head(&sp->hash_link, bucket); - if (!metaphysical) { + if (!direct) { if (rmap_write_protect(vcpu->kvm, gfn)) kvm_flush_remote_tlbs(vcpu->kvm); account_shadowed(vcpu->kvm, gfn); @@ -1395,7 +1395,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) kvm_mmu_page_unlink_children(kvm, sp); kvm_mmu_unlink_parents(kvm, sp); kvm_flush_remote_tlbs(kvm); - if (!sp->role.invalid && !sp->role.metaphysical) + if (!sp->role.invalid && !sp->role.direct) unaccount_shadowed(kvm, sp->gfn); if (sp->unsync) kvm_unlink_unsync_page(kvm, sp); @@ -1458,7 +1458,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) index = kvm_page_table_hashfn(gfn); bucket = &kvm->arch.mmu_page_hash[index]; hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) - if (sp->gfn == gfn && !sp->role.metaphysical) { + if (sp->gfn == gfn && !sp->role.direct) { pgprintk("%s: gfn %lx role %x\n", __func__, gfn, sp->role.word); r = 1; @@ -1478,7 +1478,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) index = kvm_page_table_hashfn(gfn); bucket = &kvm->arch.mmu_page_hash[index]; hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { - if (sp->gfn == gfn && !sp->role.metaphysical + if (sp->gfn == gfn && !sp->role.direct && !sp->role.invalid) { pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); @@ -1638,7 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) bucket = &vcpu->kvm->arch.mmu_page_hash[index]; /* don't unsync if pagetable is shadowed with multiple roles */ hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { - if (s->gfn != sp->gfn || s->role.metaphysical) + if (s->gfn != sp->gfn || s->role.direct) continue; if (s->role.word != sp->role.word) return 1; @@ -1951,7 +1951,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) int i; gfn_t root_gfn; struct kvm_mmu_page *sp; - int metaphysical = 0; + int direct = 0; root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; @@ -1960,18 +1960,18 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ASSERT(!VALID_PAGE(root)); if (tdp_enabled) - metaphysical = 1; + direct = 1; sp = kvm_mmu_get_page(vcpu, root_gfn, 0, - PT64_ROOT_LEVEL, metaphysical, + PT64_ROOT_LEVEL, direct, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; vcpu->arch.mmu.root_hpa = root; return; } - metaphysical = !is_paging(vcpu); + direct = !is_paging(vcpu); if (tdp_enabled) - metaphysical = 1; + direct = 1; for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; @@ -1985,7 +1985,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) } else if (vcpu->arch.mmu.root_level == 0) root_gfn = 0; sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, - PT32_ROOT_LEVEL, metaphysical, + PT32_ROOT_LEVEL, direct, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; @@ -2487,7 +2487,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, index = kvm_page_table_hashfn(gfn); bucket = &vcpu->kvm->arch.mmu_page_hash[index]; hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { - if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid) + if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) continue; pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); @@ -3125,7 +3125,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) gfn_t gfn; list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { - if (sp->role.metaphysical) + if (sp->role.direct) continue; gfn = unalias_gfn(vcpu->kvm, sp->gfn); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 46b68f941f6..7314c0944c5 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -277,7 +277,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, unsigned access = gw->pt_access; struct kvm_mmu_page *shadow_page; u64 spte, *sptep; - int metaphysical; + int direct; gfn_t table_gfn; int r; int level; @@ -313,17 +313,17 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) { - metaphysical = 1; + direct = 1; if (!is_dirty_pte(gw->ptes[level - 1])) access &= ~ACC_WRITE_MASK; table_gfn = gpte_to_gfn(gw->ptes[level - 1]); } else { - metaphysical = 0; + direct = 0; table_gfn = gw->table_gfn[level - 2]; } shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, - metaphysical, access, sptep); - if (!metaphysical) { + direct, access, sptep); + if (!direct) { r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], &curr_pte, sizeof(curr_pte)); @@ -512,7 +512,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, pt_element_t pt[256 / sizeof(pt_element_t)]; gpa_t pte_gpa; - if (sp->role.metaphysical + if (sp->role.direct || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { nonpaging_prefetch_page(vcpu, sp); return; -- cgit v1.2.3 From 5a41accd3f69c6ef1d58f0c148980bae70515937 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 11 Jan 2009 17:19:35 +0200 Subject: KVM: MMU: Only enable cr4_pge role in shadow mode Two dimensional paging is only confused by it. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6fbc3460337..19ccde621cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -364,7 +364,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) } kvm_x86_ops->set_cr4(vcpu, cr4); vcpu->arch.cr4 = cr4; - vcpu->arch.mmu.base_role.cr4_pge = !!(cr4 & X86_CR4_PGE); + vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled; kvm_mmu_sync_global(vcpu); kvm_mmu_reset_context(vcpu); } -- cgit v1.2.3 From 87c656b4147cd08c6efba95d8d70c12cba181255 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Wed, 14 Jan 2009 10:47:36 -0600 Subject: powerpc/fsl-booke: declare tlbcam_index for use in c So, KVM needs to read tlbcam_index to know exactly which TLB1 entry is unused by host. Signed-off-by: Liu Yu Acked-by: Kumar Gala Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/mmu-fsl-booke.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-fsl-booke.h index 3f941c0f7e8..4285b64a65e 100644 --- a/arch/powerpc/include/asm/mmu-fsl-booke.h +++ b/arch/powerpc/include/asm/mmu-fsl-booke.h @@ -75,6 +75,8 @@ #ifndef __ASSEMBLY__ +extern unsigned int tlbcam_index; + typedef struct { unsigned int id; unsigned int active; -- cgit v1.2.3 From fb2838d446f6ee7e13e4149e08ec138753c5c450 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Wed, 14 Jan 2009 10:47:37 -0600 Subject: KVM: ppc: Fix e500 warnings and some spelling problems Signed-off-by: Avi Kivity --- arch/powerpc/kvm/e500_emulate.c | 2 -- arch/powerpc/kvm/e500_tlb.c | 6 +++--- arch/powerpc/kvm/e500_tlb.h | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index d3c0c7c7f20..7a98d4a4c1e 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -30,8 +30,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int emulated = EMULATE_DONE; int ra; int rb; - int rs; - int rt; switch (get_op(inst)) { case 31: diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 6a50340f575..e3daf57b5f5 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -260,7 +260,7 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, unsigned int victim, pidsel, tsized; int tlbsel; - /* since we only have tow TLBs, only lower bit is used. */ + /* since we only have two TLBs, only lower bit is used. */ tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; pidsel = (vcpu_e500->mas4 >> 16) & 0xf; @@ -402,7 +402,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) ia = (ea >> 2) & 0x1; - /* since we only have tow TLBs, only lower bit is used. */ + /* since we only have two TLBs, only lower bit is used. */ tlbsel = (ea >> 3) & 0x1; if (ia) { @@ -471,7 +471,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) } else { int victim; - /* since we only have tow TLBs, only lower bit is used. */ + /* since we only have two TLBs, only lower bit is used. */ tlbsel = vcpu_e500->mas4 >> 28 & 0x1; victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h index d8833f95516..ab49e935518 100644 --- a/arch/powerpc/kvm/e500_tlb.h +++ b/arch/powerpc/kvm/e500_tlb.h @@ -126,7 +126,7 @@ static inline unsigned int get_tlb_tlbsel( { /* * Manual says that tlbsel has 2 bits wide. - * Since we only have tow TLBs, only lower bit is used. + * Since we only have two TLBs, only lower bit is used. */ return (vcpu_e500->mas0 >> 28) & 0x1; } -- cgit v1.2.3 From 9aa4dd5e5f47a5feb96fc394fccf0265ab7d85a4 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Wed, 14 Jan 2009 10:47:38 -0600 Subject: KVM: ppc: Move to new TLB invalidate interface Commit 2a4aca1144394653269720ffbb5a325a77abd5fa removed old method _tlbia(). Signed-off-by: Liu Yu Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/e500_tlb.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index e3daf57b5f5..d437160d388 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -20,6 +20,7 @@ #include #include +#include "../mm/mmu_decl.h" #include "e500_tlb.h" #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) @@ -158,7 +159,7 @@ void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) { - _tlbia(); + _tlbil_all(); } /* Search the guest TLB for a matching entry. */ @@ -362,11 +363,10 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) int i; /* XXX Replace loop with fancy data structures. */ - /* needn't set modified since tlbia will make TLB1 coherent */ for (i = 0; i < tlb1_max_shadow_size(); i++) kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i); - _tlbia(); + _tlbil_all(); } } @@ -417,7 +417,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } - _tlbia(); + _tlbil_all(); return EMULATE_DONE; } @@ -604,7 +604,7 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i); /* discard all guest mapping */ - _tlbia(); + _tlbil_all(); } void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, -- cgit v1.2.3 From 193554750441d91e127dd5066b8aebe0f769101c Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Wed, 14 Jan 2009 16:56:00 +0000 Subject: KVM: x86: Fix typos and whitespace errors Some typos, comments, whitespace errors corrected in the cpuid code Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 19ccde621cd..141a0166e51 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1067,7 +1067,7 @@ long kvm_arch_dev_ioctl(struct file *filp, if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, - cpuid_arg->entries); + cpuid_arg->entries); if (r) goto out; @@ -1165,8 +1165,8 @@ out: } static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, - struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) + struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) { int r; @@ -1185,8 +1185,8 @@ out: } static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, - struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) + struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) { int r; @@ -1195,7 +1195,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, goto out; r = -EFAULT; if (copy_to_user(entries, &vcpu->arch.cpuid_entries, - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) + vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) goto out; return 0; @@ -1205,12 +1205,12 @@ out: } static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, - u32 index) + u32 index) { entry->function = function; entry->index = index; cpuid_count(entry->function, entry->index, - &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); + &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); entry->flags = 0; } @@ -1249,7 +1249,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) | bit(X86_FEATURE_SVM); - /* all func 2 cpuid_count() should be called on the same cpu */ + /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); do_cpuid_1_ent(entry, function, index); ++*nent; @@ -1323,7 +1323,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, } static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) + struct kvm_cpuid_entry2 __user *entries) { struct kvm_cpuid_entry2 *cpuid_entries; int limit, nent = 0, r = -E2BIG; @@ -1340,7 +1340,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, limit = cpuid_entries[0].eax; for (func = 1; func <= limit && nent < cpuid->nent; ++func) do_cpuid_ent(&cpuid_entries[nent], func, 0, - &nent, cpuid->nent); + &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; @@ -1349,10 +1349,10 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, limit = cpuid_entries[nent - 1].eax; for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func) do_cpuid_ent(&cpuid_entries[nent], func, 0, - &nent, cpuid->nent); + &nent, cpuid->nent); r = -EFAULT; if (copy_to_user(entries, cpuid_entries, - nent * sizeof(struct kvm_cpuid_entry2))) + nent * sizeof(struct kvm_cpuid_entry2))) goto out_free; cpuid->nent = nent; r = 0; @@ -1496,7 +1496,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, - cpuid_arg->entries); + cpuid_arg->entries); if (r) goto out; break; @@ -1509,7 +1509,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, - cpuid_arg->entries); + cpuid_arg->entries); if (r) goto out; r = -EFAULT; @@ -2864,7 +2864,7 @@ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) return 0; if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && - !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) + !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) return 0; return 1; } @@ -2892,7 +2892,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, if (!best || e->function > best->function) best = e; } - return best; } -- cgit v1.2.3 From 399ec807ddc38ecccf8c06dbde04531cbdc63e11 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 19 Nov 2008 13:58:46 +0200 Subject: KVM: Userspace controlled irq routing Currently KVM has a static routing from GSI numbers to interrupts (namely, 0-15 are mapped 1:1 to both PIC and IOAPIC, and 16:23 are mapped 1:1 to the IOAPIC). This is insufficient for several reasons: - HPET requires non 1:1 mapping for the timer interrupt - MSIs need a new method to assign interrupt numbers and dispatch them - ACPI APIC mode needs to be able to reassign the PCI LINK interrupts to the ioapics This patch implements an interrupt routing table (as a linked list, but this can be easily changed) and a userspace interface to replace the table. The routing table is initialized according to the current hardwired mapping. Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 5 +++++ arch/x86/kvm/x86.c | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 1477f91617a..dbf527a5734 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -919,6 +919,11 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_ioapic_init(kvm); if (r) goto out; + r = kvm_setup_default_irq_routing(kvm); + if (r) { + kfree(kvm->arch.vioapic); + goto out; + } break; case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 141a0166e51..32e3a7ec6ad 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1835,6 +1835,12 @@ long kvm_arch_vm_ioctl(struct file *filp, } } else goto out; + r = kvm_setup_default_irq_routing(kvm); + if (r) { + kfree(kvm->arch.vpic); + kfree(kvm->arch.vioapic); + goto out; + } break; case KVM_CREATE_PIT: mutex_lock(&kvm->lock); -- cgit v1.2.3 From 91b2ae773d3b168b763237fac33f75b13d891f20 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 19 Jan 2009 14:57:52 +0200 Subject: KVM: Avoid using CONFIG_ in userspace visible headers Kconfig symbols are not available in userspace, and are not stripped by headers-install. Avoid their use by adding #defines in to suit each architecture. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 54bcf228152..dc3f6cf1170 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -15,6 +15,7 @@ #define __KVM_HAVE_DEVICE_ASSIGNMENT #define __KVM_HAVE_MSI #define __KVM_HAVE_USER_NMI +#define __KVM_HAVE_GUEST_DEBUG /* Architectural interrupt line count. */ #define KVM_NR_INTERRUPTS 256 -- cgit v1.2.3 From 27d146449ccaad16480888129bb32a000ee33717 Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Thu, 15 Jan 2009 17:58:19 +0800 Subject: KVM: ia64: vTLB change for enabling windows 2008 boot Simply the logic of hash vTLB, and export kvm_gpa_to_mpa. Signed-off-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/vcpu.h | 4 ++-- arch/ia64/kvm/vtlb.c | 39 +++++++++++++++++---------------------- 2 files changed, 19 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index b2f12a562bd..042af92ced8 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h @@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); -extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, +extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 ifa, int type); extern void thash_purge_all(struct kvm_vcpu *v); extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, @@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v); void thash_init(struct thash_cb *hcb, u64 sz); void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); - +u64 kvm_gpa_to_mpa(u64 gpa); extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index ac94867f826..38232b37668 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) unsigned long ps, gpaddr; ps = itir_ps(itir); + rr.val = ia64_get_rr(ifa); - gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | - (ifa & ((1UL << ps) - 1)); + gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | + (ifa & ((1UL << ps) - 1)); - rr.val = ia64_get_rr(ifa); head = (struct thash_data *)ia64_thash(ifa); head->etag = INVALID_TI_TAG; ia64_mf(); @@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) /* * Purge overlap TCs and then insert the new entry to emulate itc ops. - * Notes: Only TC entry can purge and insert. - * 1 indicates this is MMIO + * Notes: Only TC entry can purge and insert. */ -int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, +void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 ifa, int type) { u64 ps; u64 phy_pte, io_mask, index; union ia64_rr vrr, mrr; - int ret = 0; ps = itir_ps(itir); vrr.val = vcpu_get_rr(v, ifa); @@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, phy_pte &= ~_PAGE_MA_MASK; } - if (pte & VTLB_PTE_IO) - ret = 1; - vtlb_purge(v, ifa, ps); vhpt_purge(v, ifa, ps); - if (ps == mrr.ps) { - if (!(pte&VTLB_PTE_IO)) { - vhpt_insert(phy_pte, itir, ifa, pte); - } else { - vtlb_insert(v, pte, itir, ifa); - vcpu_quick_region_set(VMX(v, tc_regions), ifa); - } - } else if (ps > mrr.ps) { + if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) { vtlb_insert(v, pte, itir, ifa); vcpu_quick_region_set(VMX(v, tc_regions), ifa); - if (!(pte&VTLB_PTE_IO)) - vhpt_insert(phy_pte, itir, ifa, pte); - } else { + } + if (pte & VTLB_PTE_IO) + return; + + if (ps >= mrr.ps) + vhpt_insert(phy_pte, itir, ifa, pte); + else { u64 psr; phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); @@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, if (!(pte&VTLB_PTE_IO)) mark_pages_dirty(v, pte, ps); - return ret; } /* @@ -570,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz) u64 kvm_get_mpt_entry(u64 gpfn) { u64 *base = (u64 *) KVM_P2M_BASE; + + if (gpfn >= (KVM_P2M_SIZE >> 3)) + panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn); + return *(base + gpfn); } -- cgit v1.2.3 From 4b7bb626e3133eab131328a0770864b322c1bfe6 Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Thu, 15 Jan 2009 18:08:36 +0800 Subject: KVM: ia64: Add the support for translating PAL Call's pointer args Add the support to translate PAL Call's pointer args. Signed-off-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/process.c | 48 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index f9c9504144f..0e727ce0d55 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c @@ -550,18 +550,60 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim, inject_guest_interruption(vcpu, vector); } +static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu, + unsigned long arg) +{ + struct thash_data *data; + unsigned long gpa, poff; + + if (!is_physical_mode(vcpu)) { + /* Depends on caller to provide the DTR or DTC mapping.*/ + data = vtlb_lookup(vcpu, arg, D_TLB); + if (data) + gpa = data->page_flags & _PAGE_PPN_MASK; + else { + data = vhpt_lookup(arg); + if (!data) + return 0; + gpa = data->gpaddr & _PAGE_PPN_MASK; + } + + poff = arg & (PSIZE(data->ps) - 1); + arg = PAGEALIGN(gpa, data->ps) | poff; + } + arg = kvm_gpa_to_mpa(arg << 1 >> 1); + + return (unsigned long)__va(arg); +} + static void set_pal_call_data(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p = &vcpu->arch.exit_data; + unsigned long gr28 = vcpu_get_gr(vcpu, 28); + unsigned long gr29 = vcpu_get_gr(vcpu, 29); + unsigned long gr30 = vcpu_get_gr(vcpu, 30); /*FIXME:For static and stacked convention, firmware * has put the parameters in gr28-gr31 before * break to vmm !!*/ - p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28); - p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29); - p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); + switch (gr28) { + case PAL_PERF_MON_INFO: + case PAL_HALT_INFO: + p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29); + p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); + break; + case PAL_BRAND_INFO: + p->u.pal_data.gr29 = gr29;; + p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); + break; + default: + p->u.pal_data.gr29 = gr29;; + p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); + } + p->u.pal_data.gr28 = gr28; p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); + p->exit_reason = EXIT_REASON_PAL_CALL; } -- cgit v1.2.3 From 7d656bd996b68737d5d07643bc57311059038d67 Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Wed, 21 Jan 2009 11:21:27 +0800 Subject: KVM: ia64: Implement some pal calls needed for windows 2008 For windows 2008, it needs more pal calls to implement for booting. In addition, also changes the name of set_{sal, pal}_call_result to get_{sal,pal}_call_result for readability. Signed-off-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm_fw.c | 151 +++++++++++++++++++++++++++++++++++++++++++++++- arch/ia64/kvm/process.c | 8 +-- 2 files changed, 152 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index cb7600bdff9..a8ae52ed563 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c @@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) return result; } +static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu) +{ + + struct ia64_pal_retval result = {0, 0, 0, 0}; + long in0, in1, in2, in3; + + kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); + result.status = ia64_pal_register_info(in1, &result.v1, &result.v2); + + return result; +} + static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) { @@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result; + unsigned long in0, in1, in2, in3; - INIT_PAL_STATUS_UNIMPLEMENTED(result); + kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); + + result.status = ia64_pal_vm_info(in1, in2, + (pal_tc_info_u_t *)&result.v1, &result.v2); return result; } @@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu) vcpu->arch.timer_fired = 0; } +static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu) +{ + long status; + unsigned long in0, in1, in2, in3, r9; + unsigned long pm_buffer[16]; + + kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); + status = ia64_pal_perf_mon_info(pm_buffer, + (pal_perf_mon_info_u_t *) &r9); + if (status != 0) { + printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status); + } else { + if (in1) + memcpy((void *)in1, pm_buffer, sizeof(pm_buffer)); + else { + status = PAL_STATUS_EINVAL; + printk(KERN_WARNING"Invalid parameters " + "for PAL call:0x%lx!\n", in0); + } + } + return (struct ia64_pal_retval){status, r9, 0, 0}; +} + +static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu) +{ + unsigned long in0, in1, in2, in3; + long status; + unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32) + | (1UL << 61) | (1UL << 60); + + kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); + if (in1) { + memcpy((void *)in1, &res, sizeof(res)); + status = 0; + } else{ + status = PAL_STATUS_EINVAL; + printk(KERN_WARNING"Invalid parameters " + "for PAL call:0x%lx!\n", in0); + } + + return (struct ia64_pal_retval){status, 0, 0, 0}; +} + +static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu) +{ + unsigned long r9; + long status; + + status = ia64_pal_mem_attrib(&r9); + + return (struct ia64_pal_retval){status, r9, 0, 0}; +} + +static void remote_pal_prefetch_visibility(void *v) +{ + s64 trans_type = (s64)v; + ia64_pal_prefetch_visibility(trans_type); +} + +static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu) +{ + struct ia64_pal_retval result = {0, 0, 0, 0}; + unsigned long in0, in1, in2, in3; + kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); + result.status = ia64_pal_prefetch_visibility(in1); + if (result.status == 0) { + /* Must be performed on all remote processors + in the coherence domain. */ + smp_call_function(remote_pal_prefetch_visibility, + (void *)in1, 1); + /* Unnecessary on remote processor for other vcpus!*/ + result.status = 1; + } + return result; +} + +static void remote_pal_mc_drain(void *v) +{ + ia64_pal_mc_drain(); +} + +static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu) +{ + struct ia64_pal_retval result = {0, 0, 0, 0}; + unsigned long in0, in1, in2, in3; + + kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); + + if (in1 == 0 && in2) { + char brand_info[128]; + result.status = ia64_pal_get_brand_info(brand_info); + if (result.status == PAL_STATUS_SUCCESS) + memcpy((void *)in2, brand_info, 128); + } else { + result.status = PAL_STATUS_REQUIRES_MEMORY; + printk(KERN_WARNING"Invalid parameters for " + "PAL call:0x%lx!\n", in0); + } + + return result; +} + int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) { @@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret = 1; gr28 = kvm_get_pal_call_index(vcpu); - /*printk("pal_call index:%lx\n",gr28);*/ switch (gr28) { case PAL_CACHE_FLUSH: result = pal_cache_flush(vcpu); break; + case PAL_MEM_ATTRIB: + result = pal_mem_attrib(vcpu); + break; case PAL_CACHE_SUMMARY: result = pal_cache_summary(vcpu); break; + case PAL_PERF_MON_INFO: + result = pal_perf_mon_info(vcpu); + break; + case PAL_HALT_INFO: + result = pal_halt_info(vcpu); + break; case PAL_HALT_LIGHT: { INIT_PAL_STATUS_SUCCESS(result); @@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) } break; + case PAL_PREFETCH_VISIBILITY: + result = pal_prefetch_visibility(vcpu); + break; + case PAL_MC_DRAIN: + result.status = ia64_pal_mc_drain(); + /* FIXME: All vcpus likely call PAL_MC_DRAIN. + That causes the congestion. */ + smp_call_function(remote_pal_mc_drain, NULL, 1); + break; + case PAL_FREQ_RATIOS: result = pal_freq_ratios(vcpu); break; @@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) INIT_PAL_STATUS_SUCCESS(result); result.v1 = (1L << 32) | 1L; break; + case PAL_REGISTER_INFO: + result = pal_register_info(vcpu); + break; case PAL_VM_PAGE_SIZE: result.status = ia64_pal_vm_page_size(&result.v0, &result.v1); @@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) result.status = ia64_pal_version( (pal_version_u_t *)&result.v0, (pal_version_u_t *)&result.v1); - break; case PAL_FIXED_ADDR: result.status = PAL_STATUS_SUCCESS; result.v0 = vcpu->vcpu_id; break; + case PAL_BRAND_INFO: + result = pal_get_brand_info(vcpu); + break; + case PAL_GET_PSTATE: + case PAL_CACHE_SHARED_INFO: + INIT_PAL_STATUS_UNIMPLEMENTED(result); + break; default: INIT_PAL_STATUS_UNIMPLEMENTED(result); printk(KERN_WARNING"kvm: Unsupported pal call," diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 0e727ce0d55..b1dc80952d9 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c @@ -607,7 +607,7 @@ static void set_pal_call_data(struct kvm_vcpu *vcpu) p->exit_reason = EXIT_REASON_PAL_CALL; } -static void set_pal_call_result(struct kvm_vcpu *vcpu) +static void get_pal_call_result(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p = &vcpu->arch.exit_data; @@ -635,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu) p->exit_reason = EXIT_REASON_SAL_CALL; } -static void set_sal_call_result(struct kvm_vcpu *vcpu) +static void get_sal_call_result(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p = &vcpu->arch.exit_data; @@ -658,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, if (iim == DOMN_PAL_REQUEST) { set_pal_call_data(v); vmm_transition(v); - set_pal_call_result(v); + get_pal_call_result(v); vcpu_increment_iip(v); return; } else if (iim == DOMN_SAL_REQUEST) { set_sal_call_data(v); vmm_transition(v); - set_sal_call_result(v); + get_sal_call_result(v); vcpu_increment_iip(v); return; } -- cgit v1.2.3 From 2c411b48af3e3c534b9cfb6a79be1df384d1ca1a Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 22 Jan 2009 14:20:27 +0100 Subject: KVM: s390: Fix printk on SIGP set arch KVM on s390 does not support the ESA/390 architecture. We refuse to change the architecture mode and print a warning. This patch removes the printk for several reasons: o A malicious guest can flood host dmesg o The old message had no newline o there is no connection between the message and the failing guest This patch simply removes the printk. We already set the condition code to 3 - the guest knows that something went wrong. Reported-by: Heiko Carstens Signed-off-by: Christian Borntraeger Signed-off-by: Avi Kivity --- arch/s390/kvm/sigp.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 2a01b9e0280..f27dbedf086 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -153,8 +153,6 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) switch (parameter & 0xff) { case 0: - printk(KERN_WARNING "kvm: request to switch to ESA/390 mode" - " not supported"); rc = 3; /* not operational */ break; case 1: -- cgit v1.2.3 From 70455a36a073cbb83ca17f92d135a6128c73cb3c Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 22 Jan 2009 10:28:29 +0100 Subject: KVM: s390: Fix problem state check for b2 intercepts The kernel handles some priviledged instruction exits. While I was unable to trigger such an exit from guest userspace, the code should check for supervisor state before emulating a priviledged instruction. I also renamed kvm_s390_handle_priv to kvm_s390_handle_b2. After all there are non priviledged b2 instructions like stck (store clock). Signed-off-by: Christian Borntraeger Signed-off-by: Avi Kivity --- arch/s390/kvm/intercept.c | 2 +- arch/s390/kvm/kvm-s390.h | 2 +- arch/s390/kvm/priv.c | 18 +++++++++++++++--- 3 files changed, 17 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 61236102203..9d19803111b 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -103,7 +103,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu) static intercept_handler_t instruction_handlers[256] = { [0x83] = kvm_s390_handle_diag, [0xae] = kvm_s390_handle_sigp, - [0xb2] = kvm_s390_handle_priv, + [0xb2] = kvm_s390_handle_b2, [0xb7] = handle_lctl, [0xeb] = handle_lctlg, }; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 3893cf12eac..00bbe69b78d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -50,7 +50,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); /* implemented in priv.c */ -int kvm_s390_handle_priv(struct kvm_vcpu *vcpu); +int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); /* implemented in sigp.c */ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 3605df45dd4..4b88834b8dd 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -304,12 +304,24 @@ static intercept_handler_t priv_handlers[256] = { [0xb1] = handle_stfl, }; -int kvm_s390_handle_priv(struct kvm_vcpu *vcpu) +int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) { intercept_handler_t handler; + /* + * a lot of B2 instructions are priviledged. We first check for + * the priviledges ones, that we can handle in the kernel. If the + * kernel can handle this instruction, we check for the problem + * state bit and (a) handle the instruction or (b) send a code 2 + * program check. + * Anything else goes to userspace.*/ handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; - if (handler) - return handler(vcpu); + if (handler) { + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, + PGM_PRIVILEGED_OPERATION); + else + return handler(vcpu); + } return -ENOTSUPP; } -- cgit v1.2.3 From b7e6e4d3602c738b8f61225d9f4514945df52f07 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 22 Jan 2009 10:29:08 +0100 Subject: KVM: s390: Fix SIGP set prefix ioctl This patch fixes the SET PREFIX interrupt if triggered by userspace. Until now, it was not necessary, but life migration will need it. In addition, it helped me creating SMP support for my kvm_crashme tool (lets kvm execute random guest memory content). Signed-off-by: Christian Borntraeger Signed-off-by: Avi Kivity --- arch/s390/kvm/interrupt.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f4fe28a2521..0189356fe20 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -555,9 +555,14 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", s390int->parm); break; + case KVM_S390_SIGP_SET_PREFIX: + inti->prefix.address = s390int->parm; + inti->type = s390int->type; + VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", + s390int->parm); + break; case KVM_S390_SIGP_STOP: case KVM_S390_RESTART: - case KVM_S390_SIGP_SET_PREFIX: case KVM_S390_INT_EMERGENCY: VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); inti->type = s390int->type; -- cgit v1.2.3 From 934d534f8a5a39e20d5682b3a3a45ff351706b59 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 21 Jan 2009 15:16:43 +0100 Subject: KVM: ia64: dynamic nr online cpus Account for number of online cpus and use that in loops iterating over the list of vpus instead of scanning the full array unconditionally. This patch is a building block to facilitate allowing to bump up the size of MAX_VCPUS significantly. Signed-off-by: Jes Sorensen Acked-by : Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm_host.h | 10 ++++++---- arch/ia64/kvm/kvm-ia64.c | 12 ++++++++---- arch/ia64/kvm/vcpu.c | 5 ++++- 3 files changed, 18 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 46f8b0eb568..4542651e6ac 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -157,10 +157,10 @@ struct kvm_vm_data { struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; }; -#define VCPU_BASE(n) KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, vcpu_data[n]) -#define VM_BASE KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, kvm_vm_struct) +#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \ + offsetof(struct kvm_vm_data, vcpu_data[n])) +#define KVM_VM_BASE (KVM_VM_DATA_BASE + \ + offsetof(struct kvm_vm_data, kvm_vm_struct)) #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ offsetof(struct kvm_vm_data, kvm_mem_dirty_log) @@ -464,6 +464,8 @@ struct kvm_arch { unsigned long metaphysical_rr4; unsigned long vmm_init_rr; + int online_vcpus; + struct kvm_ioapic *vioapic; struct kvm_vm_stat stat; struct kvm_sal_data rdv_sal_data; diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index dbf527a5734..9c77e3939e9 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, union ia64_lid lid; int i; - for (i = 0; i < KVM_MAX_VCPUS; i++) { + for (i = 0; i < kvm->arch.online_vcpus; i++) { if (kvm->vcpus[i]) { lid.val = VCPU_LID(kvm->vcpus[i]); if (lid.id == id && lid.eid == eid) @@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) call_data.ptc_g_data = p->u.ptc_g_data; - for (i = 0; i < KVM_MAX_VCPUS; i++) { + for (i = 0; i < kvm->arch.online_vcpus; i++) { if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || vcpu == kvm->vcpus[i]) @@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void) return ERR_PTR(-ENOMEM); kvm_init_vm(kvm); + kvm->arch.online_vcpus = 0; + return kvm; } @@ -1154,7 +1156,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) /*Initialize itc offset for vcpus*/ itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); - for (i = 0; i < KVM_MAX_VCPUS; i++) { + for (i = 0; i < kvm->arch.online_vcpus; i++) { v = (struct kvm_vcpu *)((char *)vcpu + sizeof(struct kvm_vcpu_data) * i); v->arch.itc_offset = itc_offset; @@ -1288,6 +1290,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, goto fail; } + kvm->arch.online_vcpus++; + return vcpu; fail: return ERR_PTR(r); @@ -1828,7 +1832,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, struct kvm_vcpu *lvcpu = kvm->vcpus[0]; int i; - for (i = 1; i < KVM_MAX_VCPUS; i++) { + for (i = 1; i < kvm->arch.online_vcpus; i++) { if (!kvm->vcpus[i]) continue; if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index 4d8be4c252f..d4d28050587 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c @@ -807,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) { struct kvm_vcpu *v; + struct kvm *kvm; int i; long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); unsigned long vitv = VCPU(vcpu, itv); + kvm = (struct kvm *)KVM_VM_BASE; + if (vcpu->vcpu_id == 0) { - for (i = 0; i < KVM_MAX_VCPUS; i++) { + for (i = 0; i < kvm->arch.online_vcpus; i++) { v = (struct kvm_vcpu *)((char *)vcpu + sizeof(struct kvm_vcpu_data) * i); VMX(v, itc_offset) = itc_offset; -- cgit v1.2.3 From 44882eed2ebe7f75f8cdae5671ab1d6e0fa40dbc Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 27 Jan 2009 15:12:38 -0200 Subject: KVM: make irq ack notifications aware of routing table IRQ ack notifications assume an identity mapping between pin->gsi, which might not be the case with, for example, HPET. Translate before acking. Signed-off-by: Marcelo Tosatti Acked-by: Gleb Natapov --- arch/x86/kvm/i8259.c | 5 +++-- arch/x86/kvm/irq.h | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 179dcb0103f..93160375c84 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -49,7 +49,8 @@ static void pic_unlock(struct kvm_pic *s) spin_unlock(&s->lock); while (acks) { - kvm_notify_acked_irq(kvm, __ffs(acks)); + kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)), + __ffs(acks)); acks &= acks - 1; } @@ -232,7 +233,7 @@ int kvm_pic_read_irq(struct kvm *kvm) } pic_update_irq(s); pic_unlock(s); - kvm_notify_acked_irq(kvm, irq); + kvm_notify_acked_irq(kvm, SELECT_PIC(irq), irq); return intno; } diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 82579ee538d..9f593188129 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -32,6 +32,8 @@ #include "lapic.h" #define PIC_NUM_PINS 16 +#define SELECT_PIC(irq) \ + ((irq) < 8 ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE) struct kvm; struct kvm_vcpu; -- cgit v1.2.3 From d20626936dd6aa783760e780dae5abb127564316 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 2 Feb 2009 16:23:50 +0100 Subject: x86: Add EFER descriptions for FFXSR AMD k10 includes support for the FFXSR feature, which leaves out XMM registers on FXSAVE/FXSAVE when the EFER_FFXSR bit is set in EFER. The CPUID feature bit exists already, but the EFER bit is missing currently, so this patch adds it to the list of known EFER bits. Signed-off-by: Alexander Graf CC: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/include/asm/msr-index.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 46e9646e7a6..f4e505f286b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -19,12 +19,14 @@ #define _EFER_LMA 10 /* Long mode active (read-only) */ #define _EFER_NX 11 /* No execute enable */ #define _EFER_SVME 12 /* Enable virtualization */ +#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ #define EFER_SCE (1<<_EFER_SCE) #define EFER_LME (1<<_EFER_LME) #define EFER_LMA (1<<_EFER_LMA) #define EFER_NX (1<<_EFER_NX) #define EFER_SVME (1<<_EFER_SVME) +#define EFER_FFXSR (1<<_EFER_FFXSR) /* Intel MSRs. Some also available on other CPUs */ #define MSR_IA32_PERFCTR0 0x000000c1 -- cgit v1.2.3 From 1b2fd70c4eddef53f32639296818c0253e7ca48d Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 2 Feb 2009 16:23:51 +0100 Subject: KVM: Add FFXSR support AMD K10 CPUs implement the FFXSR feature that gets enabled using EFER. Let's check if the virtual CPU description includes that CPUID feature bit and allow enabling it then. This is required for Windows Server 2008 in Hyper-V mode. v2 adds CPUID capability exposure Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 3 +++ arch/x86/kvm/x86.c | 12 ++++++++++++ 2 files changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index db5021b2b5a..1c4a018bf4b 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -417,6 +417,9 @@ static __init int svm_hardware_setup(void) if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); + if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) + kvm_enable_efer_bits(EFER_FFXSR); + if (nested) { printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); kvm_enable_efer_bits(EFER_SVME); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 32e3a7ec6ad..8f83590b47d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -490,6 +490,17 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) return; } + if (efer & EFER_FFXSR) { + struct kvm_cpuid_entry2 *feat; + + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { + printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); + kvm_inject_gp(vcpu, 0); + return; + } + } + if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; @@ -1240,6 +1251,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, #ifdef CONFIG_X86_64 bit(X86_FEATURE_LM) | #endif + bit(X86_FEATURE_FXSR_OPT) | bit(X86_FEATURE_MMXEXT) | bit(X86_FEATURE_3DNOWEXT) | bit(X86_FEATURE_3DNOW); -- cgit v1.2.3 From 34c33d163fe509da8414a736c6328855f8c164e5 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Sun, 8 Feb 2009 13:28:15 +0100 Subject: KVM: Drop unused evaluations from string pio handlers Looks like neither the direction nor the rep prefix are used anymore. Drop related evaluations from SVM's and VMX's I/O exit handlers. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 4 +--- arch/x86/kvm/vmx.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1c4a018bf4b..22e88a4a51c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1194,7 +1194,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ - int size, down, in, string, rep; + int size, in, string; unsigned port; ++svm->vcpu.stat.io_exits; @@ -1213,8 +1213,6 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) in = (io_info & SVM_IOIO_TYPE_MASK) != 0; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; - rep = (io_info & SVM_IOIO_REP_MASK) != 0; - down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; skip_emulated_instruction(&svm->vcpu); return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index df454de8acf..509b3530540 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2698,7 +2698,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { unsigned long exit_qualification; - int size, down, in, string, rep; + int size, in, string; unsigned port; ++vcpu->stat.io_exits; @@ -2714,8 +2714,6 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) size = (exit_qualification & 7) + 1; in = (exit_qualification & 8) != 0; - down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; - rep = (exit_qualification & 32) != 0; port = exit_qualification >> 16; skip_emulated_instruction(vcpu); -- cgit v1.2.3 From 49cd7d2238e44f7ee4269481cd8a1261cc8f93a5 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Wed, 11 Feb 2009 13:50:40 +0800 Subject: KVM: VMX: Use kvm_mmu_page_fault() handle EPT violation mmio Removed duplicated code. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 509b3530540..cb27ffccf46 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3058,11 +3058,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u64 exit_qualification; - enum emulation_result er; gpa_t gpa; - unsigned long hva; int gla_validity; - int r; exit_qualification = vmcs_read64(EXIT_QUALIFICATION); @@ -3085,32 +3082,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); - hva = gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT); - if (!kvm_is_error_hva(hva)) { - r = kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); - if (r < 0) { - printk(KERN_ERR "EPT: Not enough memory!\n"); - return -ENOMEM; - } - return 1; - } else { - /* must be MMIO */ - er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); - - if (er == EMULATE_FAIL) { - printk(KERN_ERR - "EPT: Fail to handle EPT violation vmexit!er is %d\n", - er); - printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", - (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), - (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS)); - printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", - (long unsigned int)exit_qualification); - return -ENOTSUPP; - } else if (er == EMULATE_DO_MMIO) - return 0; - } - return 1; + return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); } static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -- cgit v1.2.3 From c807660407a695f390034e402edfe544a1d2e40c Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Wed, 4 Feb 2009 17:52:04 +0100 Subject: KVM: Fix kvmclock on !constant_tsc boxes kvmclock currently falls apart on machines without constant tsc. This patch fixes it. Changes: * keep tsc frequency in a per-cpu variable. * handle kvmclock update using a new request flag, thus checking whenever we need an update each time we enter guest context. * use a cpufreq notifier to track frequency changes and force kvmclock updates. * send ipis to kick cpu out of guest context if needed to make sure the guest doesn't see stale values. Signed-off-by: Gerd Hoffmann Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 94 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8f83590b47d..05d7be89b5e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -617,6 +618,8 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info * hv_clock->tsc_to_system_mul); } +static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); + static void kvm_write_guest_time(struct kvm_vcpu *v) { struct timespec ts; @@ -627,9 +630,9 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) if ((!vcpu->time_page)) return; - if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) { - kvm_set_time_scale(tsc_khz, &vcpu->hv_clock); - vcpu->hv_clock_tsc_khz = tsc_khz; + if (unlikely(vcpu->hv_clock_tsc_khz != __get_cpu_var(cpu_tsc_khz))) { + kvm_set_time_scale(__get_cpu_var(cpu_tsc_khz), &vcpu->hv_clock); + vcpu->hv_clock_tsc_khz = __get_cpu_var(cpu_tsc_khz); } /* Keep irq disabled to prevent changes to the clock */ @@ -660,6 +663,16 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); } +static int kvm_request_guest_time_update(struct kvm_vcpu *v) +{ + struct kvm_vcpu_arch *vcpu = &v->arch; + + if (!vcpu->time_page) + return 0; + set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests); + return 1; +} + static bool msr_mtrr_valid(unsigned msr) { switch (msr) { @@ -790,7 +803,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) vcpu->arch.time_page = NULL; } - kvm_write_guest_time(vcpu); + kvm_request_guest_time_update(vcpu); break; } default: @@ -1000,6 +1013,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: + case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: @@ -1025,9 +1039,6 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_IOMMU: r = iommu_found(); break; - case KVM_CAP_CLOCKSOURCE: - r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC); - break; default: r = 0; break; @@ -1098,7 +1109,7 @@ out: void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { kvm_x86_ops->vcpu_load(vcpu, cpu); - kvm_write_guest_time(vcpu); + kvm_request_guest_time_update(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -2642,9 +2653,72 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, } EXPORT_SYMBOL_GPL(kvm_emulate_pio_string); +static void bounce_off(void *info) +{ + /* nothing */ +} + +static unsigned int ref_freq; +static unsigned long tsc_khz_ref; + +static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct kvm *kvm; + struct kvm_vcpu *vcpu; + int i, send_ipi = 0; + + if (!ref_freq) + ref_freq = freq->old; + + if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) + return 0; + if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) + return 0; + per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); + + spin_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) { + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + vcpu = kvm->vcpus[i]; + if (!vcpu) + continue; + if (vcpu->cpu != freq->cpu) + continue; + if (!kvm_request_guest_time_update(vcpu)) + continue; + if (vcpu->cpu != smp_processor_id()) + send_ipi++; + } + } + spin_unlock(&kvm_lock); + + if (freq->old < freq->new && send_ipi) { + /* + * We upscale the frequency. Must make the guest + * doesn't see old kvmclock values while running with + * the new frequency, otherwise we risk the guest sees + * time go backwards. + * + * In case we update the frequency for another cpu + * (which might be in guest context) send an interrupt + * to kick the cpu out of guest context. Next time + * guest context is entered kvmclock will be updated, + * so the guest will not see stale values. + */ + smp_call_function_single(freq->cpu, bounce_off, NULL, 1); + } + return 0; +} + +static struct notifier_block kvmclock_cpufreq_notifier_block = { + .notifier_call = kvmclock_cpufreq_notifier +}; + int kvm_arch_init(void *opaque) { - int r; + int r, cpu; struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; if (kvm_x86_ops) { @@ -2675,6 +2749,15 @@ int kvm_arch_init(void *opaque) kvm_mmu_set_base_ptes(PT_PRESENT_MASK); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); + + for_each_possible_cpu(cpu) + per_cpu(cpu_tsc_khz, cpu) = tsc_khz; + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { + tsc_khz_ref = tsc_khz; + cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + return 0; out: @@ -3010,6 +3093,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vcpu->requests) { if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) __kvm_migrate_timers(vcpu); + if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests)) + kvm_write_guest_time(vcpu); if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) kvm_mmu_sync_roots(vcpu); if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) -- cgit v1.2.3 From 6bed6b9e84bf5e6b468847a50c0751389fdd01d4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 18 Feb 2009 14:08:59 +0100 Subject: KVM: MMU: remove redundant check in mmu_set_spte The following code flow is unnecessary: if (largepage) was_rmapped = is_large_pte(*shadow_pte); else was_rmapped = 1; The is_large_pte() function will always evaluate to one here because the (largepage && !is_large_pte) case is already handled in the first if-clause. So we can remove this check and set was_rmapped to one always here. Signed-off-by: Joerg Roedel Acked-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ef060ec444a..c90b4b2f2ab 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1791,12 +1791,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, pgprintk("hfn old %lx new %lx\n", spte_to_pfn(*shadow_pte), pfn); rmap_remove(vcpu->kvm, shadow_pte); - } else { - if (largepage) - was_rmapped = is_large_pte(*shadow_pte); - else - was_rmapped = 1; - } + } else + was_rmapped = 1; } if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, dirty, largepage, global, gfn, pfn, speculative, true)) { -- cgit v1.2.3 From 452425dbaa1974e9fc489e64a8de46a47b4c2754 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 18 Feb 2009 14:54:37 +0100 Subject: KVM: MMU: remove assertion in kvm_mmu_alloc_page The assertion no longer makes sense since we don't clear page tables on allocation; instead we clear them during prefetch. Signed-off-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c90b4b2f2ab..4a21b0f8491 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -802,7 +802,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, set_page_private(virt_to_page(sp->spt), (unsigned long)sp); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&sp->oos_link); - ASSERT(is_empty_shadow_page(sp->spt)); bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); sp->multimapped = 0; sp->parent_pte = parent_pte; -- cgit v1.2.3 From 4925663a079c77d95d8685228ad6675fc5639c8e Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 4 Feb 2009 17:28:14 +0200 Subject: KVM: Report IRQ injection status to userspace. IRQ injection status is either -1 (if there was no CPU found that should except the interrupt because IRQ was masked or ioapic was misconfigured or ...) or >= 0 in that case the number indicates to how many CPUs interrupt was injected. If the value is 0 it means that the interrupt was coalesced and probably should be reinjected. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 12 ++++++++++-- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/i8259.c | 18 +++++++++++++----- arch/x86/kvm/x86.c | 13 +++++++++++-- 4 files changed, 35 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 9c77e3939e9..076b00d1dbf 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext) switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_MP_STATE: - + case KVM_CAP_IRQ_INJECT_STATUS: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -927,6 +927,7 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; } break; + case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; @@ -934,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp, if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; if (irqchip_in_kernel(kvm)) { + __s32 status; mutex_lock(&kvm->lock); - kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); mutex_unlock(&kvm->lock); + if (ioctl == KVM_IRQ_LINE_STATUS) { + irq_event.status = status; + if (copy_to_user(argp, &irq_event, + sizeof irq_event)) + goto out; + } r = 0; } break; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 55fd4c5fd38..f0faf58044f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -616,7 +616,7 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, u32 error_code); -void kvm_pic_set_irq(void *opaque, int irq, int level); +int kvm_pic_set_irq(void *opaque, int irq, int level); void kvm_inject_nmi(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 93160375c84..b4e662e94dd 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -77,12 +77,13 @@ void kvm_pic_clear_isr_ack(struct kvm *kvm) /* * set irq level. If an edge is detected, then the IRR is set to 1 */ -static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) +static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) { - int mask; + int mask, ret = 1; mask = 1 << irq; if (s->elcr & mask) /* level triggered */ if (level) { + ret = !(s->irr & mask); s->irr |= mask; s->last_irr |= mask; } else { @@ -91,11 +92,15 @@ static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) } else /* edge triggered */ if (level) { - if ((s->last_irr & mask) == 0) + if ((s->last_irr & mask) == 0) { + ret = !(s->irr & mask); s->irr |= mask; + } s->last_irr |= mask; } else s->last_irr &= ~mask; + + return (s->imr & mask) ? -1 : ret; } /* @@ -172,16 +177,19 @@ void kvm_pic_update_irq(struct kvm_pic *s) pic_unlock(s); } -void kvm_pic_set_irq(void *opaque, int irq, int level) +int kvm_pic_set_irq(void *opaque, int irq, int level) { struct kvm_pic *s = opaque; + int ret = -1; pic_lock(s); if (irq >= 0 && irq < PIC_NUM_PINS) { - pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); + ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); pic_update_irq(s); } pic_unlock(s); + + return ret; } /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 05d7be89b5e..e4db5be7c95 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1019,6 +1019,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_REINJECT_CONTROL: + case KVM_CAP_IRQ_INJECT_STATUS: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -1877,6 +1878,7 @@ long kvm_arch_vm_ioctl(struct file *filp, create_pit_unlock: mutex_unlock(&kvm->lock); break; + case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; @@ -1884,10 +1886,17 @@ long kvm_arch_vm_ioctl(struct file *filp, if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; if (irqchip_in_kernel(kvm)) { + __s32 status; mutex_lock(&kvm->lock); - kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, - irq_event.irq, irq_event.level); + status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + irq_event.irq, irq_event.level); mutex_unlock(&kvm->lock); + if (ioctl == KVM_IRQ_LINE_STATUS) { + irq_event.status = status; + if (copy_to_user(argp, &irq_event, + sizeof irq_event)) + goto out; + } r = 0; } break; -- cgit v1.2.3 From 1fbdc7a58512a6283e10fd27108197679db95ffa Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Sun, 11 Jan 2009 22:39:44 +0100 Subject: KVM: SVM: set accessed bit for VMCB segment selectors In the segment descriptor _cache_ the accessed bit is always set (although it can be cleared in the descriptor itself). Since Intel checks for this condition on a VMENTRY, set this bit in the AMD path to enable cross vendor migration. Cc: stable@kernel.org Signed-off-by: Andre Przywara Acked-By: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 22e88a4a51c..1821c207819 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -796,20 +796,37 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; - /* - * SVM always stores 0 for the 'G' bit in the CS selector in - * the VMCB on a VMEXIT. This hurts cross-vendor migration: - * Intel's VMENTRY has a check on the 'G' bit. - */ - if (seg == VCPU_SREG_CS) + switch (seg) { + case VCPU_SREG_CS: + /* + * SVM always stores 0 for the 'G' bit in the CS selector in + * the VMCB on a VMEXIT. This hurts cross-vendor migration: + * Intel's VMENTRY has a check on the 'G' bit. + */ var->g = s->limit > 0xfffff; - - /* - * Work around a bug where the busy flag in the tr selector - * isn't exposed - */ - if (seg == VCPU_SREG_TR) + break; + case VCPU_SREG_TR: + /* + * Work around a bug where the busy flag in the tr selector + * isn't exposed + */ var->type |= 0x2; + break; + case VCPU_SREG_DS: + case VCPU_SREG_ES: + case VCPU_SREG_FS: + case VCPU_SREG_GS: + /* + * The accessed bit must always be set in the segment + * descriptor cache, although it can be cleared in the + * descriptor, the cached bit always remains at 1. Since + * Intel has a check on this, set it here to support + * cross-vendor migration. + */ + if (!var->unusable) + var->type |= 0x1; + break; + } var->unusable = !var->present; } -- cgit v1.2.3 From c5bc22424021cabda862727fb3f5098b866f074d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 19 Feb 2009 12:18:56 +0100 Subject: KVM: MMU: Fix another largepage memory leak In the paging_fetch function rmap_remove is called after setting a large pte to non-present. This causes rmap_remove to not drop the reference to the large page. The result is a memory leak of that page. Cc: stable@kernel.org Signed-off-by: Joerg Roedel Acked-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 7314c0944c5..0f11792fafa 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -306,9 +306,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, continue; if (is_large_pte(*sptep)) { + rmap_remove(vcpu->kvm, sptep); set_shadow_pte(sptep, shadow_trap_nonpresent_pte); kvm_flush_remote_tlbs(vcpu->kvm); - rmap_remove(vcpu->kvm, sptep); } if (level == PT_DIRECTORY_LEVEL -- cgit v1.2.3 From b0a1835d53c57bc38b36867c04436b60454cb610 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Tue, 17 Feb 2009 16:52:08 +0800 Subject: KVM: ppc: Add emulation of E500 register mmucsr0 Latest kernel flushes TLB via mmucsr0. Signed-off-by: Liu Yu Acked-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/kvm/e500_emulate.c | 8 ++++++++ arch/powerpc/kvm/e500_tlb.c | 16 ++++++++++++++++ arch/powerpc/kvm/e500_tlb.h | 1 + 3 files changed, 25 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 7a98d4a4c1e..3f760414b9f 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -105,6 +105,11 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_HID1: vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break; + case SPRN_MMUCSR0: + emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, + vcpu->arch.gpr[rs]); + break; + /* extra exceptions */ case SPRN_IVOR32: vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs]; @@ -172,6 +177,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) case SPRN_HID1: vcpu->arch.gpr[rt] = vcpu_e500->hid1; break; + case SPRN_MMUCSR0: + vcpu->arch.gpr[rt] = 0; break; + /* extra exceptions */ case SPRN_IVOR32: vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index d437160d388..72386ddbd9d 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -391,6 +391,22 @@ static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, return 0; } +int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) +{ + int esel; + + if (value & MMUCSR0_TLB0FI) + for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++) + kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); + if (value & MMUCSR0_TLB1FI) + for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++) + kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); + + _tlbil_all(); + + return EMULATE_DONE; +} + int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h index ab49e935518..4d5cc0f7d79 100644 --- a/arch/powerpc/kvm/e500_tlb.h +++ b/arch/powerpc/kvm/e500_tlb.h @@ -44,6 +44,7 @@ | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) extern void kvmppc_dump_tlbs(struct kvm_vcpu *); +extern int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *, ulong); extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *); extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *); extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int); -- cgit v1.2.3 From 2fa8937f3af8873e006ce324e7b2d3f9b2077b87 Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Mon, 16 Feb 2009 15:14:48 +0800 Subject: ia64: Move the macro definitions related to MSI to one header file. For kvm's MSI support, it needs these macros defined in ia64_msi.c, and to avoid duplicate them, move them to one header file and share with kvm. Signed-off-by: Xiantao Zhang Acked-by: Tony Luck Signed-off-by: Avi Kivity --- arch/ia64/include/asm/msidef.h | 42 ++++++++++++++++++++++++++++++++ arch/ia64/kernel/msi_ia64.c | 55 +++++++----------------------------------- 2 files changed, 51 insertions(+), 46 deletions(-) create mode 100644 arch/ia64/include/asm/msidef.h (limited to 'arch') diff --git a/arch/ia64/include/asm/msidef.h b/arch/ia64/include/asm/msidef.h new file mode 100644 index 00000000000..592c1047a0c --- /dev/null +++ b/arch/ia64/include/asm/msidef.h @@ -0,0 +1,42 @@ +#ifndef _IA64_MSI_DEF_H +#define _IA64_MSI_DEF_H + +/* + * Shifts for APIC-based data + */ + +#define MSI_DATA_VECTOR_SHIFT 0 +#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) +#define MSI_DATA_VECTOR_MASK 0xffffff00 + +#define MSI_DATA_DELIVERY_MODE_SHIFT 8 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) +#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) + +#define MSI_DATA_LEVEL_SHIFT 14 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) +#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) + +#define MSI_DATA_TRIGGER_SHIFT 15 +#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) +#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) + +/* + * Shift/mask fields for APIC-based bus address + */ + +#define MSI_ADDR_DEST_ID_SHIFT 4 +#define MSI_ADDR_HEADER 0xfee00000 + +#define MSI_ADDR_DEST_ID_MASK 0xfff0000f +#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT) + +#define MSI_ADDR_DEST_MODE_SHIFT 2 +#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT) +#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT) + +#define MSI_ADDR_REDIRECTION_SHIFT 3 +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) + +#endif/* _IA64_MSI_DEF_H */ diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 89033933903..368ee4e5266 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -7,44 +7,7 @@ #include #include #include - -/* - * Shifts for APIC-based data - */ - -#define MSI_DATA_VECTOR_SHIFT 0 -#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) -#define MSI_DATA_VECTOR_MASK 0xffffff00 - -#define MSI_DATA_DELIVERY_SHIFT 8 -#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) -#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT) - -#define MSI_DATA_LEVEL_SHIFT 14 -#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) -#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) - -#define MSI_DATA_TRIGGER_SHIFT 15 -#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) -#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) - -/* - * Shift/mask fields for APIC-based bus address - */ - -#define MSI_TARGET_CPU_SHIFT 4 -#define MSI_ADDR_HEADER 0xfee00000 - -#define MSI_ADDR_DESTID_MASK 0xfff0000f -#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT) - -#define MSI_ADDR_DESTMODE_SHIFT 2 -#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT) -#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT) - -#define MSI_ADDR_REDIRECTION_SHIFT 3 -#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) -#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) +#include static struct irq_chip ia64_msi_chip; @@ -65,8 +28,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, read_msi_msg(irq, &msg); addr = msg.address_lo; - addr &= MSI_ADDR_DESTID_MASK; - addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); + addr &= MSI_ADDR_DEST_ID_MASK; + addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); msg.address_lo = addr; data = msg.data; @@ -98,9 +61,9 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) msg.address_hi = 0; msg.address_lo = MSI_ADDR_HEADER | - MSI_ADDR_DESTMODE_PHYS | + MSI_ADDR_DEST_MODE_PHYS | MSI_ADDR_REDIRECTION_CPU | - MSI_ADDR_DESTID_CPU(dest_phys_id); + MSI_ADDR_DEST_ID_CPU(dest_phys_id); msg.data = MSI_DATA_TRIGGER_EDGE | @@ -183,8 +146,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); - msg.address_lo &= ~MSI_ADDR_DESTID_MASK; - msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); dmar_msi_write(irq, &msg); irq_desc[irq].affinity = *mask; @@ -215,9 +178,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) msg->address_hi = 0; msg->address_lo = MSI_ADDR_HEADER | - MSI_ADDR_DESTMODE_PHYS | + MSI_ADDR_DEST_MODE_PHYS | MSI_ADDR_REDIRECTION_CPU | - MSI_ADDR_DESTID_CPU(dest); + MSI_ADDR_DEST_ID_CPU(dest); msg->data = MSI_DATA_TRIGGER_EDGE | -- cgit v1.2.3 From 6b08035f3e64d8e474be166d682b52c95941662e Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Mon, 16 Feb 2009 15:24:05 +0800 Subject: KVM: ia64: Fix the build errors due to lack of macros related to MSI. Include the newly introduced msidef.h to solve the build issues. Signed-off-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/irq.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h index c6786e8b1bf..c0785a72827 100644 --- a/arch/ia64/kvm/irq.h +++ b/arch/ia64/kvm/irq.h @@ -23,6 +23,8 @@ #ifndef __IRQ_H #define __IRQ_H +#include "lapic.h" + static inline int irqchip_in_kernel(struct kvm *kvm) { return 1; -- cgit v1.2.3 From 401d10dee083bda281f2fdcdf654080313ba30ec Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Fri, 20 Feb 2009 22:53:37 +0530 Subject: KVM: VMX: Update necessary state when guest enters long mode setup_msrs() should be called when entering long mode to save the shadow state for the 64-bit guest state. Using vmx_set_efer() in enter_lmode() removes some duplicated code and also ensures we call setup_msrs(). We can safely pass the value of shadow_efer to vmx_set_efer() as no other bits in the efer change while enabling long mode (guest first sets EFER.LME, then sets CR0.PG which causes a vmexit where we activate long mode). With this fix, is_long_mode() can check for EFER.LMA set instead of EFER.LME and 5e23049e86dd298b72e206b420513dbc3a240cd9 can be reverted. Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 54 ++++++++++++++++++++++++------------------------------ 1 file changed, 24 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cb27ffccf46..48063a0aa24 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1430,6 +1430,29 @@ continue_rmode: init_rmode(vcpu->kvm); } +static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); + + vcpu->arch.shadow_efer = efer; + if (!msr) + return; + if (efer & EFER_LMA) { + vmcs_write32(VM_ENTRY_CONTROLS, + vmcs_read32(VM_ENTRY_CONTROLS) | + VM_ENTRY_IA32E_MODE); + msr->data = efer; + } else { + vmcs_write32(VM_ENTRY_CONTROLS, + vmcs_read32(VM_ENTRY_CONTROLS) & + ~VM_ENTRY_IA32E_MODE); + + msr->data = efer & ~EFER_LME; + } + setup_msrs(vmx); +} + #ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) @@ -1444,13 +1467,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu) (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS); } - vcpu->arch.shadow_efer |= EFER_LMA; - - find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) - | VM_ENTRY_IA32E_MODE); + vmx_set_efer(vcpu, vcpu->arch.shadow_efer); } static void exit_lmode(struct kvm_vcpu *vcpu) @@ -1609,30 +1627,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) vmcs_writel(GUEST_CR4, hw_cr4); } -static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); - - vcpu->arch.shadow_efer = efer; - if (!msr) - return; - if (efer & EFER_LMA) { - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) | - VM_ENTRY_IA32E_MODE); - msr->data = efer; - - } else { - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) & - ~VM_ENTRY_IA32E_MODE); - - msr->data = efer & ~EFER_LME; - } - setup_msrs(vmx); -} - static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; -- cgit v1.2.3 From 41d6af119206e98764b4ae6d264d63acefcf851e Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Thu, 28 Feb 2008 16:06:15 +0530 Subject: KVM: is_long_mode() should check for EFER.LMA is_long_mode currently checks the LongModeEnable bit in EFER instead of the LongModeActive bit. This is wrong, but we survived this till now since it wasn't triggered. This breaks guests that go from long mode to compatibility mode. This is noticed on a solaris guest and fixes bug #1842160 Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 258e5d56298..eaab2145f62 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) static inline int is_long_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 - return vcpu->arch.shadow_efer & EFER_LME; + return vcpu->arch.shadow_efer & EFER_LMA; #else return 0; #endif -- cgit v1.2.3 From d7364a29b305862cbc92b3f8b6ad9968270d068c Mon Sep 17 00:00:00 2001 From: Hannes Eder Date: Sat, 21 Feb 2009 02:18:13 +0100 Subject: KVM: fix sparse warnings: context imbalance Impact: Attribute function with __acquires(...) resp. __releases(...). Fix this sparse warnings: arch/x86/kvm/i8259.c:34:13: warning: context imbalance in 'pic_lock' - wrong count at exit arch/x86/kvm/i8259.c:39:13: warning: context imbalance in 'pic_unlock' - unexpected unlock Signed-off-by: Hannes Eder Signed-off-by: Avi Kivity --- arch/x86/kvm/i8259.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index b4e662e94dd..1ccb50c74f1 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -32,11 +32,13 @@ #include static void pic_lock(struct kvm_pic *s) + __acquires(&s->lock) { spin_lock(&s->lock); } static void pic_unlock(struct kvm_pic *s) + __releases(&s->lock) { struct kvm *kvm = s->kvm; unsigned acks = s->pending_acks; -- cgit v1.2.3 From cded19f396bc3c9d299331709d0a9fbc6ecc148a Mon Sep 17 00:00:00 2001 From: Hannes Eder Date: Sat, 21 Feb 2009 02:19:13 +0100 Subject: KVM: fix sparse warnings: Should it be static? Impact: Make symbols static. Fix this sparse warnings: arch/x86/kvm/mmu.c:992:5: warning: symbol 'mmu_pages_add' was not declared. Should it be static? arch/x86/kvm/mmu.c:1124:5: warning: symbol 'mmu_pages_next' was not declared. Should it be static? arch/x86/kvm/mmu.c:1144:6: warning: symbol 'mmu_pages_clear_parents' was not declared. Should it be static? arch/x86/kvm/x86.c:2037:5: warning: symbol 'kvm_read_guest_virt' was not declared. Should it be static? arch/x86/kvm/x86.c:2067:5: warning: symbol 'kvm_write_guest_virt' was not declared. Should it be static? virt/kvm/irq_comm.c:220:5: warning: symbol 'setup_routing_entry' was not declared. Should it be static? Signed-off-by: Hannes Eder Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 11 ++++++----- arch/x86/kvm/x86.c | 8 ++++---- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4a21b0f8491..2a36f7f7c4c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -989,8 +989,8 @@ struct kvm_mmu_pages { idx < 512; \ idx = find_next_bit(bitmap, 512, idx+1)) -int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, - int idx) +static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, + int idx) { int i; @@ -1121,8 +1121,9 @@ struct mmu_page_path { i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ i = mmu_pages_next(&pvec, &parents, i)) -int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, - int i) +static int mmu_pages_next(struct kvm_mmu_pages *pvec, + struct mmu_page_path *parents, + int i) { int n; @@ -1141,7 +1142,7 @@ int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, return n; } -void mmu_pages_clear_parents(struct mmu_page_path *parents) +static void mmu_pages_clear_parents(struct mmu_page_path *parents) { struct kvm_mmu_page *sp; unsigned int level = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e4db5be7c95..8ca100a9eca 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2043,8 +2043,8 @@ static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, return dev; } -int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu) +static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu) { void *data = val; int r = X86EMUL_CONTINUE; @@ -2073,8 +2073,8 @@ out: return r; } -int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu) +static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu) { void *data = val; int r = X86EMUL_CONTINUE; -- cgit v1.2.3 From 4539b35881ae9664b0e2953438dd83f5ee02c0b4 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 12 Mar 2009 18:18:43 +0100 Subject: KVM: Fix missing smp tlb flush in invlpg When kvm emulates an invlpg instruction, it can drop a shadow pte, but leaves the guest tlbs intact. This can cause memory corruption when swapping out. Without this the other cpu can still write to a freed host physical page. tlb smp flush must happen if rmap_remove is called always before mmu_lock is released because the VM will take the mmu_lock before it can finally add the page to the freelist after swapout. mmu notifier makes it safe to flush the tlb after freeing the page (otherwise it would never be safe) so we can do a single flush for multiple sptes invalidated. Cc: stable@kernel.org Signed-off-by: Andrea Arcangeli Acked-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 0f11792fafa..6bd70206c56 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -446,6 +446,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) gpa_t pte_gpa = -1; int level; u64 *sptep; + int need_flush = 0; spin_lock(&vcpu->kvm->mmu_lock); @@ -465,6 +466,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) rmap_remove(vcpu->kvm, sptep); if (is_large_pte(*sptep)) --vcpu->kvm->stat.lpages; + need_flush = 1; } set_shadow_pte(sptep, shadow_trap_nonpresent_pte); break; @@ -474,6 +476,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) break; } + if (need_flush) + kvm_flush_remote_tlbs(vcpu->kvm); spin_unlock(&vcpu->kvm->mmu_lock); if (pte_gpa == -1) -- cgit v1.2.3 From bc35cbc85cd78213590761618a13da6a9707652c Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Tue, 17 Mar 2009 16:57:45 +0800 Subject: KVM: ppc: e500: Fix the bug that mas0 update to wrong value when read TLB entry Should clear and then update the next victim area here. Guest kernel only read TLB1 when startup kernel, this bug result in an extra 4K TLB1 mapping in guest from 0x0 to 0x0. As the problem has no impact to bootup a guest, we didn't notice it before. Signed-off-by: Liu Yu Signed-off-by: Avi Kivity --- arch/powerpc/kvm/e500_tlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 72386ddbd9d..ec933209e8a 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -448,7 +448,7 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) esel = get_tlb_esel(vcpu_e500, tlbsel); gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; - vcpu_e500->mas0 &= MAS0_NV(0); + vcpu_e500->mas0 &= ~MAS0_NV(~0); vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); vcpu_e500->mas1 = gtlbe->mas1; vcpu_e500->mas2 = gtlbe->mas2; -- cgit v1.2.3 From 046a48b35baa7c66d0d0331256ba12ca51665411 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Tue, 17 Mar 2009 16:57:46 +0800 Subject: KVM: ppc: e500: Fix the bug that KVM is unstable in SMP TLB entry should enable memory coherence in SMP. And like commit 631fba9dd3aca519355322cef035730609e91593, remove guard attribute to enable the prefetch of guest memory. Signed-off-by: Liu Yu Signed-off-by: Avi Kivity --- arch/powerpc/kvm/e500_tlb.c | 4 ++++ arch/powerpc/kvm/e500_tlb.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index ec933209e8a..0e773fc2d5e 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -99,7 +99,11 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) { +#ifdef CONFIG_SMP + return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; +#else return mas2 & MAS2_ATTRIB_MASK; +#endif } /* diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h index 4d5cc0f7d79..45b064b7690 100644 --- a/arch/powerpc/kvm/e500_tlb.h +++ b/arch/powerpc/kvm/e500_tlb.h @@ -38,7 +38,7 @@ #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) #define MAS2_ATTRIB_MASK \ - (MAS2_X0 | MAS2_X1 | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E) + (MAS2_X0 | MAS2_X1) #define MAS3_ATTRIB_MASK \ (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) -- cgit v1.2.3 From 16175a796d061833aacfbd9672235f2d2725df65 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 23 Mar 2009 22:13:44 +0200 Subject: KVM: VMX: Don't allow uninhibited access to EFER on i386 vmx_set_msr() does not allow i386 guests to touch EFER, but they can still do so through the default: label in the switch. If they set EFER_LME, they can oops the host. Fix by having EFER access through the normal channel (which will check for EFER_LME) even on i386. Reported-and-tested-by: Benjamin Gilbert Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 48063a0aa24..bb481330716 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -936,11 +936,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) int ret = 0; switch (msr_index) { -#ifdef CONFIG_X86_64 case MSR_EFER: vmx_load_host_state(vmx); ret = kvm_set_msr_common(vcpu, msr_index, data); break; +#ifdef CONFIG_X86_64 case MSR_FS_BASE: vmcs_writel(GUEST_FS_BASE, data); break; -- cgit v1.2.3 From 49121aa14c2a372a5fd01982df900257784be63d Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Sat, 7 Mar 2009 11:44:21 +0000 Subject: USB: S3C: Move usb-control.h to platform include The usb-control.h is needed by ohci-s3c2410.c for both S3C24XX and S3C64XX architectures, so move it to Signed-off-by: Ben Dooks Cc: David Brownell Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-s3c2410/include/mach/usb-control.h | 41 ------------------------ arch/arm/mach-s3c2410/usb-simtec.c | 3 +- arch/arm/plat-s3c/include/plat/usb-control.h | 41 ++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 42 deletions(-) delete mode 100644 arch/arm/mach-s3c2410/include/mach/usb-control.h create mode 100644 arch/arm/plat-s3c/include/plat/usb-control.h (limited to 'arch') diff --git a/arch/arm/mach-s3c2410/include/mach/usb-control.h b/arch/arm/mach-s3c2410/include/mach/usb-control.h deleted file mode 100644 index cd91d1591f3..00000000000 --- a/arch/arm/mach-s3c2410/include/mach/usb-control.h +++ /dev/null @@ -1,41 +0,0 @@ -/* arch/arm/mach-s3c2410/include/mach/usb-control.h - * - * Copyright (c) 2004 Simtec Electronics - * Ben Dooks - * - * S3C2410 - usb port information - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef __ASM_ARCH_USBCONTROL_H -#define __ASM_ARCH_USBCONTROL_H "arch/arm/mach-s3c2410/include/mach/usb-control.h" - -#define S3C_HCDFLG_USED (1) - -struct s3c2410_hcd_port { - unsigned char flags; - unsigned char power; - unsigned char oc_status; - unsigned char oc_changed; -}; - -struct s3c2410_hcd_info { - struct usb_hcd *hcd; - struct s3c2410_hcd_port port[2]; - - void (*power_control)(int port, int to); - void (*enable_oc)(struct s3c2410_hcd_info *, int on); - void (*report_oc)(struct s3c2410_hcd_info *, int ports); -}; - -static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports) -{ - if (info->report_oc != NULL) { - (info->report_oc)(info, ports); - } -} - -#endif /*__ASM_ARCH_USBCONTROL_H */ diff --git a/arch/arm/mach-s3c2410/usb-simtec.c b/arch/arm/mach-s3c2410/usb-simtec.c index 6078f09b7df..8331e8d97e2 100644 --- a/arch/arm/mach-s3c2410/usb-simtec.c +++ b/arch/arm/mach-s3c2410/usb-simtec.c @@ -29,13 +29,14 @@ #include #include -#include #include #include #include +#include #include + #include "usb-simtec.h" /* control power and monitor over-current events on various Simtec diff --git a/arch/arm/plat-s3c/include/plat/usb-control.h b/arch/arm/plat-s3c/include/plat/usb-control.h new file mode 100644 index 00000000000..822c87fe948 --- /dev/null +++ b/arch/arm/plat-s3c/include/plat/usb-control.h @@ -0,0 +1,41 @@ +/* arch/arm/plat-s3c/include/plat/usb-control.h + * + * Copyright (c) 2004 Simtec Electronics + * Ben Dooks + * + * S3C - USB host port information + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __ASM_ARCH_USBCONTROL_H +#define __ASM_ARCH_USBCONTROL_H + +#define S3C_HCDFLG_USED (1) + +struct s3c2410_hcd_port { + unsigned char flags; + unsigned char power; + unsigned char oc_status; + unsigned char oc_changed; +}; + +struct s3c2410_hcd_info { + struct usb_hcd *hcd; + struct s3c2410_hcd_port port[2]; + + void (*power_control)(int port, int to); + void (*enable_oc)(struct s3c2410_hcd_info *, int on); + void (*report_oc)(struct s3c2410_hcd_info *, int ports); +}; + +static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports) +{ + if (info->report_oc != NULL) { + (info->report_oc)(info, ports); + } +} + +#endif /*__ASM_ARCH_USBCONTROL_H */ -- cgit v1.2.3 From 54ca5412b5576fdb0a4ea4fedf6565bd6f34150c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 26 Jan 2009 09:12:12 -0800 Subject: PS3: replace bus_id usage These simple debug statments should be using dev_dbg() instead of accessing bus_id directly (or they should use device_name). As bus_id is going away, this patch is necessary. Acked-by: Geoff Levand Cc: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/ps3/system-bus.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 58311a86785..a705fffbb49 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -376,7 +376,7 @@ static int ps3_system_bus_probe(struct device *_dev) struct ps3_system_bus_driver *drv; BUG_ON(!dev); - pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, _dev->bus_id); + dev_dbg(_dev, "%s:%d\n", __func__, __LINE__); drv = ps3_system_bus_dev_to_system_bus_drv(dev); BUG_ON(!drv); @@ -398,7 +398,7 @@ static int ps3_system_bus_remove(struct device *_dev) struct ps3_system_bus_driver *drv; BUG_ON(!dev); - pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, _dev->bus_id); + dev_dbg(_dev, "%s:%d\n", __func__, __LINE__); drv = ps3_system_bus_dev_to_system_bus_drv(dev); BUG_ON(!drv); -- cgit v1.2.3 From 7a192ec334cab9fafe3a8665a65af398b0e24730 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 6 Feb 2009 23:40:12 +0800 Subject: platform driver: fix incorrect use of 'platform_bus_type' with 'struct device_driver' This patch fixes the bug reported in http://bugzilla.kernel.org/show_bug.cgi?id=11681. "Lots of device drivers register a 'struct device_driver' with the '.bus' member set to '&platform_bus_type'. This is wrong, since the platform_bus functions expect the 'struct device_driver' to be wrapped up in a 'struct platform_driver' which provides some additional callbacks (like suspend_late, resume_early). The effect may be that platform_suspend_late() uses bogus data outside the device_driver struct as a pointer pointer to the device driver's suspend_late() function or other hard to reproduce failures."(Lothar Wassmann) Signed-off-by: Ming Lei Acked-by: Henrique de Moraes Holschuh Acked-by: David Brownell Signed-off-by: Greg Kroah-Hartman --- arch/mips/basler/excite/excite_iodev.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/mips/basler/excite/excite_iodev.c b/arch/mips/basler/excite/excite_iodev.c index a1e3526b4a9..dfbfd7e2ac0 100644 --- a/arch/mips/basler/excite/excite_iodev.c +++ b/arch/mips/basler/excite/excite_iodev.c @@ -33,8 +33,8 @@ static const struct resource *iodev_get_resource(struct platform_device *, const char *, unsigned int); -static int __init iodev_probe(struct device *); -static int __exit iodev_remove(struct device *); +static int __init iodev_probe(struct platform_device *); +static int __exit iodev_remove(struct platform_device *); static int iodev_open(struct inode *, struct file *); static int iodev_release(struct inode *, struct file *); static ssize_t iodev_read(struct file *, char __user *, size_t s, loff_t *); @@ -65,13 +65,13 @@ static struct miscdevice miscdev = .fops = &fops }; -static struct device_driver iodev_driver = -{ - .name = (char *) iodev_name, - .bus = &platform_bus_type, - .owner = THIS_MODULE, +static struct platform_driver iodev_driver = { + .driver = { + .name = iodev_name, + .owner = THIS_MODULE, + }, .probe = iodev_probe, - .remove = __exit_p(iodev_remove) + .remove = __devexit_p(iodev_remove), }; @@ -89,11 +89,10 @@ iodev_get_resource(struct platform_device *pdv, const char *name, /* No hotplugging on the platform bus - use __init */ -static int __init iodev_probe(struct device *dev) +static int __init iodev_probe(struct platform_device *dev) { - struct platform_device * const pdv = to_platform_device(dev); const struct resource * const ri = - iodev_get_resource(pdv, IODEV_RESOURCE_IRQ, IORESOURCE_IRQ); + iodev_get_resource(dev, IODEV_RESOURCE_IRQ, IORESOURCE_IRQ); if (unlikely(!ri)) return -ENXIO; @@ -104,7 +103,7 @@ static int __init iodev_probe(struct device *dev) -static int __exit iodev_remove(struct device *dev) +static int __exit iodev_remove(struct platform_device *dev) { return misc_deregister(&miscdev); } @@ -160,14 +159,14 @@ static irqreturn_t iodev_irqhdl(int irq, void *ctxt) static int __init iodev_init_module(void) { - return driver_register(&iodev_driver); + return platform_driver_register(&iodev_driver); } static void __exit iodev_cleanup_module(void) { - driver_unregister(&iodev_driver); + platform_driver_unregister(&iodev_driver); } module_init(iodev_init_module); -- cgit v1.2.3 From 9e058d4f57751daa008b764735f97fdfccfeab6c Mon Sep 17 00:00:00 2001 From: Thomas Reitmayr Date: Tue, 24 Feb 2009 14:59:22 -0800 Subject: [WATCHDOG] orion5x_wdt: fix compile issue by providing tclk as platform data The orion5x-wdt driver is now registered as a platform device and receives the tclk value as platform data. This fixes a compile issue cause by a previously removed define "ORION5X_TCLK". Signed-off-by: Thomas Reitmayr Acked-by: Nicolas Pitre Signed-off-by: Kristof Provost Cc: Lennert Buytenhek Cc: Wim Van Sebroeck Cc: Russell King Cc: Martin Michlmayr Cc: Sylver Bruneau Cc: Kunihiko IMAI Signed-off-by: Andrew Morton --- arch/arm/mach-orion5x/common.c | 29 ++++++++++++++++++++++++++ arch/arm/plat-orion/include/plat/orion5x_wdt.h | 18 ++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 arch/arm/plat-orion/include/plat/orion5x_wdt.h (limited to 'arch') diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 8a0e49d8425..1a1df24f419 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "common.h" @@ -532,6 +533,29 @@ void __init orion5x_xor_init(void) } +/***************************************************************************** + * Watchdog + ****************************************************************************/ +static struct orion5x_wdt_platform_data orion5x_wdt_data = { + .tclk = 0, +}; + +static struct platform_device orion5x_wdt_device = { + .name = "orion5x_wdt", + .id = -1, + .dev = { + .platform_data = &orion5x_wdt_data, + }, + .num_resources = 0, +}; + +void __init orion5x_wdt_init(void) +{ + orion5x_wdt_data.tclk = orion5x_tclk; + platform_device_register(&orion5x_wdt_device); +} + + /***************************************************************************** * Time handling ****************************************************************************/ @@ -631,6 +655,11 @@ void __init orion5x_init(void) printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); disable_hlt(); } + + /* + * Register watchdog driver + */ + orion5x_wdt_init(); } /* diff --git a/arch/arm/plat-orion/include/plat/orion5x_wdt.h b/arch/arm/plat-orion/include/plat/orion5x_wdt.h new file mode 100644 index 00000000000..3c9cf6a305e --- /dev/null +++ b/arch/arm/plat-orion/include/plat/orion5x_wdt.h @@ -0,0 +1,18 @@ +/* + * arch/arm/plat-orion/include/plat/orion5x_wdt.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PLAT_ORION5X_WDT_H +#define __PLAT_ORION5X_WDT_H + +struct orion5x_wdt_platform_data { + u32 tclk; /* no support yet */ +}; + + +#endif + -- cgit v1.2.3 From 86ee79c3dbd48d7430fd81edc1da3516c9f6dabc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 26 Mar 2009 01:54:46 -0700 Subject: sparc64: Flush TLB before releasing pages. tlb_flush_mmu() needs to flush pending TLB entries before processing the mmu_gather ->pages list. Noticed by Benjamin Herrenschmidt. Signed-off-by: David S. Miller --- arch/sparc/include/asm/tlb_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h index ec81cdedef2..ee38e731bfa 100644 --- a/arch/sparc/include/asm/tlb_64.h +++ b/arch/sparc/include/asm/tlb_64.h @@ -57,6 +57,8 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned i static inline void tlb_flush_mmu(struct mmu_gather *mp) { + if (!mp->fullmm) + flush_tlb_pending(); if (mp->need_flush) { free_pages_and_swap_cache(mp->pages, mp->pages_nr); mp->pages_nr = 0; @@ -78,8 +80,6 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un if (mp->fullmm) mp->fullmm = 0; - else - flush_tlb_pending(); /* keep the page table cache within bounds */ check_pgt_cache(); -- cgit v1.2.3 From d7fd5f1e3b195a8232b3ed768ac2809ddce8ca46 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 26 Mar 2009 15:23:42 +0100 Subject: [S390] fix dump_stack vs. %p and (null) The s390 implemenation of dump_stack uses %p to display stack content. Since d97106ab53f812910a62d18afb9dbe882819c1ba (Make %p print '(null)' for NULL pointers) this causes a strange output for dump_stack: [...] Process basename (pid: 8822, task: 00000000b2ece038, ksp: 00000000b24d7b38) 04000000b5685c00 00000000b24d7760 0000000000000002 (null) 00000000b24d7800 00000000b24d7778 00000000b24d7778 00000000001052fe (null) 00000000b24d7b38 (null) 000000000000000a 000000000000000d (null) 00000000b24d7760 00000000b24d77d8 000000000051a7e8 00000000001052fe 00000000b24d7760 00000000b24d77b0 Call Trace: [...] This patch changes our dump_stack to use the appropriate %x format. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/traps.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 4584d81984c..c2e42cc65ce 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -61,9 +61,11 @@ extern pgm_check_handler_t do_asce_exception; #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) #ifndef CONFIG_64BIT +#define LONG "%08lx " #define FOURLONG "%08lx %08lx %08lx %08lx\n" static int kstack_depth_to_print = 12; #else /* CONFIG_64BIT */ +#define LONG "%016lx " #define FOURLONG "%016lx %016lx %016lx %016lx\n" static int kstack_depth_to_print = 20; #endif /* CONFIG_64BIT */ @@ -155,7 +157,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) break; if (i && ((i * sizeof (long) % 32) == 0)) printk("\n "); - printk("%p ", (void *)*stack++); + printk(LONG, *stack++); } printk("\n"); show_trace(task, sp); -- cgit v1.2.3 From 099b765139929efdcf232f8870804accf8c4cdc5 Mon Sep 17 00:00:00 2001 From: Frank Munzert Date: Thu, 26 Mar 2009 15:23:43 +0100 Subject: [S390] Automatic IPL after dump Provide new shutdown action "dump_reipl" for automatic ipl after dump. Signed-off-by: Frank Munzert Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/include/asm/lowcore.h | 24 ++++++++++-------- arch/s390/include/asm/system.h | 16 ++++++++++++ arch/s390/kernel/ipl.c | 56 ++++++++++++++++++++++++++++++++++++++--- arch/s390/kernel/setup.c | 5 ++++ arch/s390/kernel/smp.c | 9 ------- 5 files changed, 86 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index f3720defdd1..ee4b10ff938 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -111,7 +111,7 @@ #define __LC_PASTE 0xE40 -#define __LC_PANIC_MAGIC 0xE00 +#define __LC_DUMP_REIPL 0xE00 #ifndef __s390x__ #define __LC_PFAULT_INTPARM 0x080 #define __LC_CPU_TIMER_SAVE_AREA 0x0D8 @@ -286,12 +286,14 @@ struct _lowcore __u64 int_clock; /* 0xc98 */ __u8 pad11[0xe00-0xca0]; /* 0xca0 */ - /* 0xe00 is used as indicator for dump tools */ - /* whether the kernel died with panic() or not */ - __u32 panic_magic; /* 0xe00 */ + /* 0xe00 contains the address of the IPL Parameter */ + /* Information block. Dump tools need IPIB for IPL */ + /* after dump. */ + __u32 ipib; /* 0xe00 */ + __u32 ipib_checksum; /* 0xe04 */ /* Align to the top 1k of prefix area */ - __u8 pad12[0x1000-0xe04]; /* 0xe04 */ + __u8 pad12[0x1000-0xe08]; /* 0xe08 */ #else /* !__s390x__ */ /* prefix area: defined by architecture */ __u32 ccw1[2]; /* 0x000 */ @@ -379,12 +381,14 @@ struct _lowcore __u64 int_clock; /* 0xde8 */ __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ - /* 0xe00 is used as indicator for dump tools */ - /* whether the kernel died with panic() or not */ - __u32 panic_magic; /* 0xe00 */ + /* 0xe00 contains the address of the IPL Parameter */ + /* Information block. Dump tools need IPIB for IPL */ + /* after dump. */ + __u64 ipib; /* 0xe00 */ + __u32 ipib_checksum; /* 0xe08 */ /* Per cpu primary space access list */ - __u8 pad_0xe04[0xe38-0xe04]; /* 0xe04 */ + __u8 pad_0xe0c[0xe38-0xe0c]; /* 0xe0c */ __u64 vdso_per_cpu_data; /* 0xe38 */ __u32 paste[16]; /* 0xe40 */ @@ -433,8 +437,6 @@ static inline __u32 store_prefix(void) return address; } -#define __PANIC_MAGIC 0xDEADC0DE - #endif #endif diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 3a8b26eb1f2..3f2ccb82b86 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -458,6 +458,22 @@ static inline unsigned short stap(void) return cpu_address; } +static inline u32 cksm(void *addr, unsigned long len) +{ + register unsigned long _addr asm("0") = (unsigned long) addr; + register unsigned long _len asm("1") = len; + unsigned long accu = 0; + + asm volatile( + "0:\n" + " cksm %0,%1\n" + " jnz 0b\n" + : "+d" (accu), "+d" (_addr), "+d" (_len) + : + : "cc", "memory"); + return accu; +} + extern void (*_machine_restart)(char *command); extern void (*_machine_halt)(void); extern void (*_machine_power_off)(void); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 2dcf590faba..5663c1f8e46 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -56,13 +56,14 @@ struct shutdown_trigger { }; /* - * Five shutdown action types are supported: + * The following shutdown action types are supported: */ #define SHUTDOWN_ACTION_IPL_STR "ipl" #define SHUTDOWN_ACTION_REIPL_STR "reipl" #define SHUTDOWN_ACTION_DUMP_STR "dump" #define SHUTDOWN_ACTION_VMCMD_STR "vmcmd" #define SHUTDOWN_ACTION_STOP_STR "stop" +#define SHUTDOWN_ACTION_DUMP_REIPL_STR "dump_reipl" struct shutdown_action { char *name; @@ -146,6 +147,7 @@ static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; static struct ipl_parameter_block *reipl_block_fcp; static struct ipl_parameter_block *reipl_block_ccw; static struct ipl_parameter_block *reipl_block_nss; +static struct ipl_parameter_block *reipl_block_actual; static int dump_capabilities = DUMP_TYPE_NONE; static enum dump_type dump_type = DUMP_TYPE_NONE; @@ -835,6 +837,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_CCW_VM; else reipl_method = REIPL_METHOD_CCW_CIO; + reipl_block_actual = reipl_block_ccw; break; case IPL_TYPE_FCP: if (diag308_set_works) @@ -843,6 +846,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_FCP_RO_VM; else reipl_method = REIPL_METHOD_FCP_RO_DIAG; + reipl_block_actual = reipl_block_fcp; break; case IPL_TYPE_FCP_DUMP: reipl_method = REIPL_METHOD_FCP_DUMP; @@ -852,6 +856,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_NSS_DIAG; else reipl_method = REIPL_METHOD_NSS; + reipl_block_actual = reipl_block_nss; break; case IPL_TYPE_UNKNOWN: reipl_method = REIPL_METHOD_DEFAULT; @@ -1332,6 +1337,48 @@ static struct shutdown_action __refdata dump_action = { .init = dump_init, }; +static void dump_reipl_run(struct shutdown_trigger *trigger) +{ + preempt_disable(); + /* + * Bypass dynamic address translation (DAT) when storing IPL parameter + * information block address and checksum into the prefix area + * (corresponding to absolute addresses 0-8191). + * When enhanced DAT applies and the STE format control in one, + * the absolute address is formed without prefixing. In this case a + * normal store (stg/st) into the prefix area would no more match to + * absolute addresses 0-8191. + */ +#ifdef CONFIG_64BIT + asm volatile("sturg %0,%1" + :: "a" ((unsigned long) reipl_block_actual), + "a" (&lowcore_ptr[smp_processor_id()]->ipib)); +#else + asm volatile("stura %0,%1" + :: "a" ((unsigned long) reipl_block_actual), + "a" (&lowcore_ptr[smp_processor_id()]->ipib)); +#endif + asm volatile("stura %0,%1" + :: "a" (cksm(reipl_block_actual, reipl_block_actual->hdr.len)), + "a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum)); + preempt_enable(); + dump_run(trigger); +} + +static int __init dump_reipl_init(void) +{ + if (!diag308_set_works) + return -EOPNOTSUPP; + else + return 0; +} + +static struct shutdown_action __refdata dump_reipl_action = { + .name = SHUTDOWN_ACTION_DUMP_REIPL_STR, + .fn = dump_reipl_run, + .init = dump_reipl_init, +}; + /* * vmcmd shutdown action: Trigger vm command on shutdown. */ @@ -1421,7 +1468,8 @@ static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, /* action list */ static struct shutdown_action *shutdown_actions_list[] = { - &ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action}; + &ipl_action, &reipl_action, &dump_reipl_action, &dump_action, + &vmcmd_action, &stop_action}; #define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *)) /* @@ -1434,11 +1482,11 @@ static int set_trigger(const char *buf, struct shutdown_trigger *trigger, size_t len) { int i; + for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) { if (!shutdown_actions_list[i]) continue; - if (strncmp(buf, shutdown_actions_list[i]->name, - strlen(shutdown_actions_list[i]->name)) == 0) { + if (sysfs_streq(buf, shutdown_actions_list[i]->name)) { trigger->action = shutdown_actions_list[i]; return len; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c5cfb6185ea..8fdf08379ce 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -86,6 +86,10 @@ volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ int __initdata memory_end_set; unsigned long __initdata memory_end; +/* An array with a pointer to the lowcore of every CPU. */ +struct _lowcore *lowcore_ptr[NR_CPUS]; +EXPORT_SYMBOL(lowcore_ptr); + /* * This is set up by the setup-routine at boot-time * for S390 need to find out, what we have to setup @@ -434,6 +438,7 @@ setup_lowcore(void) lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif set_prefix((u32)(unsigned long) lc); + lowcore_ptr[0] = lc; } static void __init diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2d337cbb932..e279d0fbbbe 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -50,12 +50,6 @@ #include #include "entry.h" -/* - * An array with a pointer the lowcore of every CPU. - */ -struct _lowcore *lowcore_ptr[NR_CPUS]; -EXPORT_SYMBOL(lowcore_ptr); - static struct task_struct *current_set[NR_CPUS]; static u8 smp_cpu_type; @@ -82,9 +76,6 @@ void smp_send_stop(void) /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); - /* write magic number to zero page (absolute 0) */ - lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; - /* stop all processors */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) -- cgit v1.2.3 From 59fa4392dddae244a1148cbbcb090b5a5728f576 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:23:44 +0100 Subject: [S390] page fault: invoke oom-killer s390 arch backend for 1c0fe6e3bda0464728c23c8d84aa47567e8b716c "mm: invoke oom-killer from page fault". Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/fault.c | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 4d537205e83..833e8366c35 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -200,29 +200,6 @@ static void do_low_address(struct pt_regs *regs, unsigned long error_code) do_no_context(regs, error_code, 0); } -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ -static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code, - unsigned long address) -{ - struct task_struct *tsk = current; - struct mm_struct *mm = tsk->mm; - - up_read(&mm->mmap_sem); - if (is_global_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - return 1; - } - printk("VM: killing process %s\n", tsk->comm); - if (regs->psw.mask & PSW_MASK_PSTATE) - do_group_exit(SIGKILL); - do_no_context(regs, error_code, address); - return 0; -} - static void do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) { @@ -367,7 +344,6 @@ good_area: goto bad_area; } -survive: if (is_vm_hugetlb_page(vma)) address &= HPAGE_MASK; /* @@ -378,8 +354,8 @@ survive: fault = handle_mm_fault(mm, vma, address, write); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { - if (do_out_of_memory(regs, error_code, address)) - goto survive; + up_read(&mm->mmap_sem); + pagefault_out_of_memory(); return; } else if (fault & VM_FAULT_SIGBUS) { do_sigbus(regs, error_code, address); -- cgit v1.2.3 From b44b0ab3bac16356f03e94b1b49ba9305710c445 Mon Sep 17 00:00:00 2001 From: Stefan Weinhuber Date: Thu, 26 Mar 2009 15:23:47 +0100 Subject: [S390] dasd: add large volume support The dasd device driver will now support ECKD devices with more then 65520 cylinders. In the traditional ECKD adressing scheme each track is addressed by a 16-bit cylinder and 16-bit head number. The new addressing scheme makes use of the fact that the actual number of heads is never larger then 15, so 12 bits of the head number can be redefined to be part of the cylinder address. Signed-off-by: Stefan Weinhuber Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/dasd.h | 10 +++++----- arch/s390/include/asm/vtoc.h | 16 ++++++++++++++-- 2 files changed, 19 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h index e2db6f16d9c..218bce81ec7 100644 --- a/arch/s390/include/asm/dasd.h +++ b/arch/s390/include/asm/dasd.h @@ -162,15 +162,15 @@ typedef struct dasd_profile_info_t { unsigned int dasd_io_nr_req[32]; /* histogram of # of requests in chanq */ } dasd_profile_info_t; -/* +/* * struct format_data_t * represents all data necessary to format a dasd */ typedef struct format_data_t { - int start_unit; /* from track */ - int stop_unit; /* to track */ - int blksize; /* sectorsize */ - int intensity; + unsigned int start_unit; /* from track */ + unsigned int stop_unit; /* to track */ + unsigned int blksize; /* sectorsize */ + unsigned int intensity; } format_data_t; /* diff --git a/arch/s390/include/asm/vtoc.h b/arch/s390/include/asm/vtoc.h index 3a5267d90d2..8406a2b3157 100644 --- a/arch/s390/include/asm/vtoc.h +++ b/arch/s390/include/asm/vtoc.h @@ -39,7 +39,7 @@ struct vtoc_labeldate __u16 day; } __attribute__ ((packed)); -struct vtoc_volume_label +struct vtoc_volume_label_cdl { char volkey[4]; /* volume key = volume label */ char vollbl[4]; /* volume label */ @@ -56,6 +56,14 @@ struct vtoc_volume_label char res3[29]; /* reserved */ } __attribute__ ((packed)); +struct vtoc_volume_label_ldl { + char vollbl[4]; /* volume label */ + char volid[6]; /* volume identifier */ + char res3[69]; /* reserved */ + char ldl_version; /* version number, valid for ldl format */ + __u64 formatted_blocks; /* valid when ldl_version >= f2 */ +} __attribute__ ((packed)); + struct vtoc_extent { __u8 typeind; /* extent type indicator */ @@ -140,7 +148,11 @@ struct vtoc_format4_label char res2[10]; /* reserved */ __u8 DS4EFLVL; /* extended free-space management level */ struct vtoc_cchhb DS4EFPTR; /* pointer to extended free-space info */ - char res3[9]; /* reserved */ + char res3; /* reserved */ + __u32 DS4DCYL; /* number of logical cyls */ + char res4[2]; /* reserved */ + __u8 DS4DEVF2; /* device flags */ + char res5; /* reserved */ } __attribute__ ((packed)); struct vtoc_ds5ext -- cgit v1.2.3 From f3eb5384cf0325c02e306b1d81e70f81a03d7432 Mon Sep 17 00:00:00 2001 From: Stefan Weinhuber Date: Thu, 26 Mar 2009 15:23:48 +0100 Subject: [S390] dasd: add High Performance FICON support To support High Performance FICON, the DASD device driver has to translate I/O requests into the new transport mode control words (TCW) instead of the traditional (command mode) CCW requests. Signed-off-by: Stefan Weinhuber Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/idals.h | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h index e82c10efe65..aae276d0038 100644 --- a/arch/s390/include/asm/idals.h +++ b/arch/s390/include/asm/idals.h @@ -44,24 +44,18 @@ idal_is_needed(void *vaddr, unsigned int length) /* * Return the number of idal words needed for an address/length pair. */ -static inline unsigned int -idal_nr_words(void *vaddr, unsigned int length) +static inline unsigned int idal_nr_words(void *vaddr, unsigned int length) { -#ifdef __s390x__ - if (idal_is_needed(vaddr, length)) - return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length + - (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; -#endif - return 0; + return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length + + (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; } /* * Create the list of idal words for an address/length pair. */ -static inline unsigned long * -idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length) +static inline unsigned long *idal_create_words(unsigned long *idaws, + void *vaddr, unsigned int length) { -#ifdef __s390x__ unsigned long paddr; unsigned int cidaw; @@ -74,7 +68,6 @@ idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length) paddr += IDA_BLOCK_SIZE; *idaws++ = paddr; } -#endif return idaws; } -- cgit v1.2.3 From 94f5b09d97ee1f803c76d0262e0b0d3791825d09 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:23:50 +0100 Subject: [S390] move sysinfo.c from drivers/s390 to arch/s390/kernel All in sysinfo.c is core kernel code and not driver code. So move it to arch/s390/kernel. Also includes some small cleanups. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/Makefile | 4 +- arch/s390/kernel/sysinfo.c | 428 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 431 insertions(+), 1 deletion(-) create mode 100644 arch/s390/kernel/sysinfo.c (limited to 'arch') diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 3edc6c6f258..33e7aee7051 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -17,10 +17,12 @@ CFLAGS_smp.o := -Wno-nonnull # CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' +CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w + obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ - vdso.o vtime.o + vdso.o vtime.o sysinfo.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c new file mode 100644 index 00000000000..b5e75e1061c --- /dev/null +++ b/arch/s390/kernel/sysinfo.c @@ -0,0 +1,428 @@ +/* + * Copyright IBM Corp. 2001, 2009 + * Author(s): Ulrich Weigand , + * Martin Schwidefsky , + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Sigh, math-emu. Don't ask. */ +#include +#include +#include + +static inline int stsi_0(void) +{ + int rc = stsi(NULL, 0, 0, 0); + return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28); +} + +static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len) +{ + if (stsi(info, 1, 1, 1) == -ENOSYS) + return len; + + EBCASC(info->manufacturer, sizeof(info->manufacturer)); + EBCASC(info->type, sizeof(info->type)); + EBCASC(info->model, sizeof(info->model)); + EBCASC(info->sequence, sizeof(info->sequence)); + EBCASC(info->plant, sizeof(info->plant)); + EBCASC(info->model_capacity, sizeof(info->model_capacity)); + EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap)); + EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap)); + len += sprintf(page + len, "Manufacturer: %-16.16s\n", + info->manufacturer); + len += sprintf(page + len, "Type: %-4.4s\n", + info->type); + if (info->model[0] != '\0') + /* + * Sigh: the model field has been renamed with System z9 + * to model_capacity and a new model field has been added + * after the plant field. To avoid confusing older programs + * the "Model:" prints "model_capacity model" or just + * "model_capacity" if the model string is empty . + */ + len += sprintf(page + len, + "Model: %-16.16s %-16.16s\n", + info->model_capacity, info->model); + else + len += sprintf(page + len, "Model: %-16.16s\n", + info->model_capacity); + len += sprintf(page + len, "Sequence Code: %-16.16s\n", + info->sequence); + len += sprintf(page + len, "Plant: %-4.4s\n", + info->plant); + len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n", + info->model_capacity, *(u32 *) info->model_cap_rating); + if (info->model_perm_cap[0] != '\0') + len += sprintf(page + len, + "Model Perm. Capacity: %-16.16s %08u\n", + info->model_perm_cap, + *(u32 *) info->model_perm_cap_rating); + if (info->model_temp_cap[0] != '\0') + len += sprintf(page + len, + "Model Temp. Capacity: %-16.16s %08u\n", + info->model_temp_cap, + *(u32 *) info->model_temp_cap_rating); + return len; +} + +static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) +{ + struct sysinfo_1_2_2_extension *ext; + int i; + + if (stsi(info, 1, 2, 2) == -ENOSYS) + return len; + ext = (struct sysinfo_1_2_2_extension *) + ((unsigned long) info + info->acc_offset); + + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "CPUs Total: %d\n", + info->cpus_total); + len += sprintf(page + len, "CPUs Configured: %d\n", + info->cpus_configured); + len += sprintf(page + len, "CPUs Standby: %d\n", + info->cpus_standby); + len += sprintf(page + len, "CPUs Reserved: %d\n", + info->cpus_reserved); + + if (info->format == 1) { + /* + * Sigh 2. According to the specification the alternate + * capability field is a 32 bit floating point number + * if the higher order 8 bits are not zero. Printing + * a floating point number in the kernel is a no-no, + * always print the number as 32 bit unsigned integer. + * The user-space needs to know about the strange + * encoding of the alternate cpu capability. + */ + len += sprintf(page + len, "Capability: %u %u\n", + info->capability, ext->alt_capability); + for (i = 2; i <= info->cpus_total; i++) + len += sprintf(page + len, + "Adjustment %02d-way: %u %u\n", + i, info->adjustment[i-2], + ext->alt_adjustment[i-2]); + + } else { + len += sprintf(page + len, "Capability: %u\n", + info->capability); + for (i = 2; i <= info->cpus_total; i++) + len += sprintf(page + len, + "Adjustment %02d-way: %u\n", + i, info->adjustment[i-2]); + } + + if (info->secondary_capability != 0) + len += sprintf(page + len, "Secondary Capability: %d\n", + info->secondary_capability); + return len; +} + +static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len) +{ + if (stsi(info, 2, 2, 2) == -ENOSYS) + return len; + + EBCASC(info->name, sizeof(info->name)); + + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "LPAR Number: %d\n", + info->lpar_number); + + len += sprintf(page + len, "LPAR Characteristics: "); + if (info->characteristics & LPAR_CHAR_DEDICATED) + len += sprintf(page + len, "Dedicated "); + if (info->characteristics & LPAR_CHAR_SHARED) + len += sprintf(page + len, "Shared "); + if (info->characteristics & LPAR_CHAR_LIMITED) + len += sprintf(page + len, "Limited "); + len += sprintf(page + len, "\n"); + + len += sprintf(page + len, "LPAR Name: %-8.8s\n", + info->name); + + len += sprintf(page + len, "LPAR Adjustment: %d\n", + info->caf); + + len += sprintf(page + len, "LPAR CPUs Total: %d\n", + info->cpus_total); + len += sprintf(page + len, "LPAR CPUs Configured: %d\n", + info->cpus_configured); + len += sprintf(page + len, "LPAR CPUs Standby: %d\n", + info->cpus_standby); + len += sprintf(page + len, "LPAR CPUs Reserved: %d\n", + info->cpus_reserved); + len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n", + info->cpus_dedicated); + len += sprintf(page + len, "LPAR CPUs Shared: %d\n", + info->cpus_shared); + return len; +} + +static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len) +{ + int i; + + if (stsi(info, 3, 2, 2) == -ENOSYS) + return len; + for (i = 0; i < info->count; i++) { + EBCASC(info->vm[i].name, sizeof(info->vm[i].name)); + EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi)); + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "VM%02d Name: %-8.8s\n", + i, info->vm[i].name); + len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n", + i, info->vm[i].cpi); + + len += sprintf(page + len, "VM%02d Adjustment: %d\n", + i, info->vm[i].caf); + + len += sprintf(page + len, "VM%02d CPUs Total: %d\n", + i, info->vm[i].cpus_total); + len += sprintf(page + len, "VM%02d CPUs Configured: %d\n", + i, info->vm[i].cpus_configured); + len += sprintf(page + len, "VM%02d CPUs Standby: %d\n", + i, info->vm[i].cpus_standby); + len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n", + i, info->vm[i].cpus_reserved); + } + return len; +} + +static int proc_read_sysinfo(char *page, char **start, + off_t off, int count, + int *eof, void *data) +{ + unsigned long info = get_zeroed_page(GFP_KERNEL); + int level, len; + + if (!info) + return 0; + + len = 0; + level = stsi_0(); + if (level >= 1) + len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); + + if (level >= 1) + len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); + + if (level >= 2) + len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len); + + if (level >= 3) + len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len); + + free_page(info); + return len; +} + +static __init int create_proc_sysinfo(void) +{ + create_proc_read_entry("sysinfo", 0444, NULL, + proc_read_sysinfo, NULL); + return 0; +} +device_initcall(create_proc_sysinfo); + +/* + * Service levels interface. + */ + +static DECLARE_RWSEM(service_level_sem); +static LIST_HEAD(service_level_list); + +int register_service_level(struct service_level *slr) +{ + struct service_level *ptr; + + down_write(&service_level_sem); + list_for_each_entry(ptr, &service_level_list, list) + if (ptr == slr) { + up_write(&service_level_sem); + return -EEXIST; + } + list_add_tail(&slr->list, &service_level_list); + up_write(&service_level_sem); + return 0; +} +EXPORT_SYMBOL(register_service_level); + +int unregister_service_level(struct service_level *slr) +{ + struct service_level *ptr, *next; + int rc = -ENOENT; + + down_write(&service_level_sem); + list_for_each_entry_safe(ptr, next, &service_level_list, list) { + if (ptr != slr) + continue; + list_del(&ptr->list); + rc = 0; + break; + } + up_write(&service_level_sem); + return rc; +} +EXPORT_SYMBOL(unregister_service_level); + +static void *service_level_start(struct seq_file *m, loff_t *pos) +{ + down_read(&service_level_sem); + return seq_list_start(&service_level_list, *pos); +} + +static void *service_level_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &service_level_list, pos); +} + +static void service_level_stop(struct seq_file *m, void *p) +{ + up_read(&service_level_sem); +} + +static int service_level_show(struct seq_file *m, void *p) +{ + struct service_level *slr; + + slr = list_entry(p, struct service_level, list); + slr->seq_print(m, slr); + return 0; +} + +static const struct seq_operations service_level_seq_ops = { + .start = service_level_start, + .next = service_level_next, + .stop = service_level_stop, + .show = service_level_show +}; + +static int service_level_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &service_level_seq_ops); +} + +static const struct file_operations service_level_ops = { + .open = service_level_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static void service_level_vm_print(struct seq_file *m, + struct service_level *slr) +{ + char *query_buffer, *str; + + query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA); + if (!query_buffer) + return; + cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL); + str = strchr(query_buffer, '\n'); + if (str) + *str = 0; + seq_printf(m, "VM: %s\n", query_buffer); + kfree(query_buffer); +} + +static struct service_level service_level_vm = { + .seq_print = service_level_vm_print +}; + +static __init int create_proc_service_level(void) +{ + proc_create("service_levels", 0, NULL, &service_level_ops); + if (MACHINE_IS_VM) + register_service_level(&service_level_vm); + return 0; +} +subsys_initcall(create_proc_service_level); + +/* + * Bogomips calculation based on cpu capability. + */ +int get_cpu_capability(unsigned int *capability) +{ + struct sysinfo_1_2_2 *info; + int rc; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return -ENOMEM; + rc = stsi(info, 1, 2, 2); + if (rc == -ENOSYS) + goto out; + rc = 0; + *capability = info->capability; +out: + free_page((unsigned long) info); + return rc; +} + +/* + * CPU capability might have changed. Therefore recalculate loops_per_jiffy. + */ +void s390_adjust_jiffies(void) +{ + struct sysinfo_1_2_2 *info; + const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */ + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + FP_DECL_EX; + unsigned int capability; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return; + + if (stsi(info, 1, 2, 2) != -ENOSYS) { + /* + * Major sigh. The cpu capability encoding is "special". + * If the first 9 bits of info->capability are 0 then it + * is a 32 bit unsigned integer in the range 0 .. 2^23. + * If the first 9 bits are != 0 then it is a 32 bit float. + * In addition a lower value indicates a proportionally + * higher cpu capacity. Bogomips are the other way round. + * To get to a halfway suitable number we divide 1e7 + * by the cpu capability number. Yes, that means a floating + * point division .. math-emu here we come :-) + */ + FP_UNPACK_SP(SA, &fmil); + if ((info->capability >> 23) == 0) + FP_FROM_INT_S(SB, info->capability, 32, int); + else + FP_UNPACK_SP(SB, &info->capability); + FP_DIV_S(SR, SA, SB); + FP_TO_INT_S(capability, SR, 32, 0); + } else + /* + * Really old machine without stsi block for basic + * cpu information. Report 42.0 bogomips. + */ + capability = 42; + loops_per_jiffy = capability * (500000/HZ); + free_page((unsigned long) info); +} + +/* + * calibrate the delay loop + */ +void __cpuinit calibrate_delay(void) +{ + s390_adjust_jiffies(); + /* Print the good old Bogomips line .. */ + printk(KERN_DEBUG "Calibrating delay loop (skipped)... " + "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ)) % 100); +} -- cgit v1.2.3 From cbdc229245e8cf5c201e68221ebf2f33d2aaf029 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:23:52 +0100 Subject: [S390] arch/s390/kernel/process.c: fix whitespace damage Fix all the whitespace damage in process.c, especially copy_thread(). Next patch will add code to copy_thread() which needs to 'fixed' first. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/process.c | 64 +++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 37 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 5cd38a90e64..e0563baa1cf 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -1,18 +1,10 @@ /* - * arch/s390/kernel/process.c + * This file handles the architecture dependent parts of process handling. * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - * Hartmut Penner (hp@de.ibm.com), - * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * - * Derived from "arch/i386/kernel/process.c" - * Copyright (C) 1995, Linus Torvalds - */ - -/* - * This file handles the architecture-dependent parts of process handling.. + * Copyright IBM Corp. 1999,2009 + * Author(s): Martin Schwidefsky , + * Hartmut Penner , + * Denis Joseph Barrow, */ #include @@ -168,34 +160,34 @@ void release_thread(struct task_struct *dead_task) } int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, - unsigned long unused, - struct task_struct * p, struct pt_regs * regs) + unsigned long unused, + struct task_struct *p, struct pt_regs *regs) { - struct fake_frame - { - struct stack_frame sf; - struct pt_regs childregs; - } *frame; - - frame = container_of(task_pt_regs(p), struct fake_frame, childregs); - p->thread.ksp = (unsigned long) frame; + struct fake_frame + { + struct stack_frame sf; + struct pt_regs childregs; + } *frame; + + frame = container_of(task_pt_regs(p), struct fake_frame, childregs); + p->thread.ksp = (unsigned long) frame; /* Store access registers to kernel stack of new process. */ - frame->childregs = *regs; + frame->childregs = *regs; frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ - frame->childregs.gprs[15] = new_stackp; - frame->sf.back_chain = 0; + frame->childregs.gprs[15] = new_stackp; + frame->sf.back_chain = 0; - /* new return point is ret_from_fork */ - frame->sf.gprs[8] = (unsigned long) ret_from_fork; + /* new return point is ret_from_fork */ + frame->sf.gprs[8] = (unsigned long) ret_from_fork; - /* fake return stack for resume(), don't go back to schedule */ - frame->sf.gprs[9] = (unsigned long) frame; + /* fake return stack for resume(), don't go back to schedule */ + frame->sf.gprs[9] = (unsigned long) frame; /* Save access registers to new thread structure. */ save_access_regs(&p->thread.acrs[0]); #ifndef CONFIG_64BIT - /* + /* * save fprs to current->thread.fp_regs to merge them with * the emulated registers and then copy the result to the child. */ @@ -220,10 +212,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, #endif /* CONFIG_64BIT */ /* start new process with ar4 pointing to the correct address space */ p->thread.mm_segment = get_fs(); - /* Don't copy debug registers */ - memset(&p->thread.per_info,0,sizeof(p->thread.per_info)); - - return 0; + /* Don't copy debug registers */ + memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); + return 0; } SYSCALL_DEFINE0(fork) @@ -311,7 +302,7 @@ out: int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) { #ifndef CONFIG_64BIT - /* + /* * save fprs to current->thread.fp_regs to merge them with * the emulated registers and then copy the result to the dump. */ @@ -346,4 +337,3 @@ unsigned long get_wchan(struct task_struct *p) } return 0; } - -- cgit v1.2.3 From 5168ce2c647f02756803bef7b74906f485491a1c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:23:53 +0100 Subject: [S390] cputime: initialize per thread timer values on fork Initialize per thread timer values instead of just copying them from the parent. That way it is easily possible to tell how much time a thread spent in user/system context. Doesn't fix a bug, this is just for debugging purposes. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/process.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index e0563baa1cf..e6b480625cb 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -163,6 +163,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { + struct thread_info *ti; struct fake_frame { struct stack_frame sf; @@ -214,6 +215,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, p->thread.mm_segment = get_fs(); /* Don't copy debug registers */ memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); + /* Initialize per thread user and system timer values */ + ti = task_thread_info(p); + ti->user_timer = 0; + ti->system_timer = 0; return 0; } -- cgit v1.2.3 From 3324e60aafa9aeb1009878d45079b67367a5e2b6 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 26 Mar 2009 15:23:56 +0100 Subject: [S390] lockdep: trace hardirq off in smp_send_stop With lockdep we got the following trace after a panic: Badness at /home/autobuild/BUILD/linux-2.6.28-20090204/kernel/lockdep.c:2878 [...] Call Trace: [<0000000000176334>] lock_acquire+0x54/0xbc [<000000000050b4fe>] __atomic_notifier_call_chain+0x6e/0xdc [<000000000050b59c>] atomic_notifier_call_chain+0x30/0x44 [<0000000000504274>] panic+0xd0/0x1e8 [...] INFO: lockdep is turned off. Last Breaking-Event-Address: [<0000000000170e62>] check_flags+0xae/0x15c possible reason: unannotated irqs-off. lockdep is right. We missed a trace_hardirq_off in our smp_send_stop function and smp_send_stop is called before the panic call chain. Reported-by: Mijo Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index e279d0fbbbe..a8858634dd0 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -75,6 +76,7 @@ void smp_send_stop(void) /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); + trace_hardirqs_off(); /* stop all processors */ for_each_online_cpu(cpu) { -- cgit v1.2.3 From 702d9e584feb028ed7e2a6d2b103b8ea57622ff2 Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Thu, 26 Mar 2009 15:23:57 +0100 Subject: [S390] check addressing mode in s390_enable_sie The sie instruction requires address spaces to be switched to run proper. This patch verifies that this is the case in s390_enable_sie, otherwise the kernel would crash badly as soon as the process runs into sie. Signed-off-by: Carsten Otte Signed-off-by: Martin Schwidefsky --- arch/s390/mm/pgtable.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 6b6ddc4ea02..9bf86125f6f 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -258,6 +258,10 @@ int s390_enable_sie(void) struct task_struct *tsk = current; struct mm_struct *mm, *old_mm; + /* Do we have switched amode? If no, we cannot do sie */ + if (!switch_amode) + return -EINVAL; + /* Do we have pgstes? if yes, we are done */ if (tsk->mm->context.has_pgste) return 0; -- cgit v1.2.3 From 92e6ecf392fac3082653ac9d84b1bdf53d0ea160 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 26 Mar 2009 15:23:58 +0100 Subject: [S390] Fix hypervisor detection for KVM Currently we use the cpuid (via STIDP instruction) to recognize LPAR, z/VM and KVM. The architecture states, that bit 0-7 of STIDP returns all zero, and if STIDP is executed in a virtual machine, the VM operating system will replace bits 0-7 with FF. KVM should not use FE to distinguish z/VM from KVM for interested guests. The proper way to detect the hypervisor is the STSI (Store System Information) instruction, which return information about the hypervisors via function code 3, selector1=2, selector2=2. This patch changes the detection routine of Linux to use STSI instead of STIDP. This detection is earlier than bootmem, we have to use a static buffer. Since STSI expects a 4kb block (4kb aligned) this patch also changes the init.data alignment for s390. As this section will be freed during boot, this should be no problem. Patch is tested with LPAR, z/VM, KVM on LPAR, and KVM under z/VM. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/sysinfo.h | 1 + arch/s390/kernel/early.c | 22 +++++++++++++--------- arch/s390/kernel/vmlinux.lds.S | 2 ++ arch/s390/kvm/kvm-s390.c | 2 +- 4 files changed, 17 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index ad93212d9e1..9d70057d828 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -100,6 +100,7 @@ struct sysinfo_3_2_2 { char reserved_1[24]; } vm[8]; + char reserved_544[3552]; }; static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 2a2ca268b1d..d61eb3334ed 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -6,6 +6,7 @@ * Heiko Carstens */ +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include #include "entry.h" @@ -173,19 +175,21 @@ static noinline __init void init_kernel_storage_key(void) page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); } +static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); + static noinline __init void detect_machine_type(void) { - struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; - - get_cpu_id(&S390_lowcore.cpu_data.cpu_id); - - /* Running under z/VM ? */ - if (cpuinfo->cpu_id.version == 0xff) - machine_flags |= MACHINE_FLAG_VM; + /* No VM information? Looks like LPAR */ + if (stsi(&vmms, 3, 2, 2) == -ENOSYS) + return; + if (!vmms.count) + return; - /* Running under KVM ? */ - if (cpuinfo->cpu_id.version == 0xfe) + /* Running under KVM? If not we assume z/VM */ + if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) machine_flags |= MACHINE_FLAG_KVM; + else + machine_flags |= MACHINE_FLAG_VM; } static __init void early_pgm_check_handler(void) diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index d796d05c9c0..7a2063eb88f 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -108,6 +108,8 @@ SECTIONS EXIT_TEXT } + /* early.c uses stsi, which requires page aligned data. */ + . = ALIGN(PAGE_SIZE); .init.data : { INIT_DATA } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0d33893e1e8..c55a4b9ffd8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -286,7 +286,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, (unsigned long) vcpu); get_cpu_id(&vcpu->arch.cpu_id); - vcpu->arch.cpu_id.version = 0xfe; + vcpu->arch.cpu_id.version = 0xff; return 0; } -- cgit v1.2.3 From 70193af9188113c9b4ff3dde1aed9f9c8f7c4f93 Mon Sep 17 00:00:00 2001 From: Sachin Sant Date: Thu, 26 Mar 2009 15:24:00 +0100 Subject: [S390] Fix appldata build break with !NET With CONFIG_NET not set appldata build breaks on s390. arch/s390/appldata/built-in.o: In function appldata_get_net_sum_data: appldata_net_sum.c:(.text+0x2684): undefined reference to dev_get_stats appldata_net_sum.c:(.text+0x2688): undefined reference to init_net appldata_net_sum.c:(.text+0x268c): undefined reference to init_net appldata_net_sum.c:(.text+0x2694): undefined reference to dev_base_lock The following patch fixes the issue for me. Signed-off-by: Sachin Sant Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 6b0a3538dc6..3f470604a89 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -521,7 +521,7 @@ config APPLDATA_OS config APPLDATA_NET_SUM tristate "Monitor overall network statistics" - depends on APPLDATA_BASE + depends on APPLDATA_BASE && NET help This provides network related data to the Linux - VM Monitor Stream, currently there is only a total sum of network I/O statistics, no -- cgit v1.2.3 From f5daba1d4116d964435ddd99f32b6c80448a496b Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:01 +0100 Subject: [S390] split/move machine check handler code Split machine check handler code and move it to cio and kernel code where it belongs to. No functional change. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/crw.h | 68 ++++++++ arch/s390/include/asm/nmi.h | 66 ++++++++ arch/s390/kernel/Makefile | 2 +- arch/s390/kernel/nmi.c | 383 ++++++++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/process.c | 2 +- arch/s390/kvm/kvm-s390.c | 4 +- 6 files changed, 520 insertions(+), 5 deletions(-) create mode 100644 arch/s390/include/asm/crw.h create mode 100644 arch/s390/include/asm/nmi.h create mode 100644 arch/s390/kernel/nmi.c (limited to 'arch') diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h new file mode 100644 index 00000000000..2185a6d619d --- /dev/null +++ b/arch/s390/include/asm/crw.h @@ -0,0 +1,68 @@ +/* + * Data definitions for channel report processing + * Copyright IBM Corp. 2000,2009 + * Author(s): Ingo Adlung , + * Martin Schwidefsky , + * Cornelia Huck , + * Heiko Carstens , + */ + +#ifndef _ASM_S390_CRW_H +#define _ASM_S390_CRW_H + +#include + +/* + * Channel Report Word + */ +struct crw { + __u32 res1 : 1; /* reserved zero */ + __u32 slct : 1; /* solicited */ + __u32 oflw : 1; /* overflow */ + __u32 chn : 1; /* chained */ + __u32 rsc : 4; /* reporting source code */ + __u32 anc : 1; /* ancillary report */ + __u32 res2 : 1; /* reserved zero */ + __u32 erc : 6; /* error-recovery code */ + __u32 rsid : 16; /* reporting-source ID */ +} __attribute__ ((packed)); + +typedef void (*crw_handler_t)(struct crw *, struct crw *, int); + +extern int crw_register_handler(int rsc, crw_handler_t handler); +extern void crw_unregister_handler(int rsc); +extern void crw_handle_channel_report(void); + +#define NR_RSCS 16 + +#define CRW_RSC_MONITOR 0x2 /* monitoring facility */ +#define CRW_RSC_SCH 0x3 /* subchannel */ +#define CRW_RSC_CPATH 0x4 /* channel path */ +#define CRW_RSC_CONFIG 0x9 /* configuration-alert facility */ +#define CRW_RSC_CSS 0xB /* channel subsystem */ + +#define CRW_ERC_EVENT 0x00 /* event information pending */ +#define CRW_ERC_AVAIL 0x01 /* available */ +#define CRW_ERC_INIT 0x02 /* initialized */ +#define CRW_ERC_TERROR 0x03 /* temporary error */ +#define CRW_ERC_IPARM 0x04 /* installed parm initialized */ +#define CRW_ERC_TERM 0x05 /* terminal */ +#define CRW_ERC_PERRN 0x06 /* perm. error, fac. not init */ +#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */ +#define CRW_ERC_PMOD 0x08 /* installed parameters modified */ + +static inline int stcrw(struct crw *pcrw) +{ + int ccode; + + asm volatile( + " stcrw 0(%2)\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (ccode), "=m" (*pcrw) + : "a" (pcrw) + : "cc" ); + return ccode; +} + +#endif /* _ASM_S390_CRW_H */ diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h new file mode 100644 index 00000000000..f4b60441adc --- /dev/null +++ b/arch/s390/include/asm/nmi.h @@ -0,0 +1,66 @@ +/* + * Machine check handler definitions + * + * Copyright IBM Corp. 2000,2009 + * Author(s): Ingo Adlung , + * Martin Schwidefsky , + * Cornelia Huck , + * Heiko Carstens , + */ + +#ifndef _ASM_S390_NMI_H +#define _ASM_S390_NMI_H + +#include + +struct mci { + __u32 sd : 1; /* 00 system damage */ + __u32 pd : 1; /* 01 instruction-processing damage */ + __u32 sr : 1; /* 02 system recovery */ + __u32 : 1; /* 03 */ + __u32 cd : 1; /* 04 timing-facility damage */ + __u32 ed : 1; /* 05 external damage */ + __u32 : 1; /* 06 */ + __u32 dg : 1; /* 07 degradation */ + __u32 w : 1; /* 08 warning pending */ + __u32 cp : 1; /* 09 channel-report pending */ + __u32 sp : 1; /* 10 service-processor damage */ + __u32 ck : 1; /* 11 channel-subsystem damage */ + __u32 : 2; /* 12-13 */ + __u32 b : 1; /* 14 backed up */ + __u32 : 1; /* 15 */ + __u32 se : 1; /* 16 storage error uncorrected */ + __u32 sc : 1; /* 17 storage error corrected */ + __u32 ke : 1; /* 18 storage-key error uncorrected */ + __u32 ds : 1; /* 19 storage degradation */ + __u32 wp : 1; /* 20 psw mwp validity */ + __u32 ms : 1; /* 21 psw mask and key validity */ + __u32 pm : 1; /* 22 psw program mask and cc validity */ + __u32 ia : 1; /* 23 psw instruction address validity */ + __u32 fa : 1; /* 24 failing storage address validity */ + __u32 : 1; /* 25 */ + __u32 ec : 1; /* 26 external damage code validity */ + __u32 fp : 1; /* 27 floating point register validity */ + __u32 gr : 1; /* 28 general register validity */ + __u32 cr : 1; /* 29 control register validity */ + __u32 : 1; /* 30 */ + __u32 st : 1; /* 31 storage logical validity */ + __u32 ie : 1; /* 32 indirect storage error */ + __u32 ar : 1; /* 33 access register validity */ + __u32 da : 1; /* 34 delayed access exception */ + __u32 : 7; /* 35-41 */ + __u32 pr : 1; /* 42 tod programmable register validity */ + __u32 fc : 1; /* 43 fp control register validity */ + __u32 ap : 1; /* 44 ancillary report */ + __u32 : 1; /* 45 */ + __u32 ct : 1; /* 46 cpu timer validity */ + __u32 cc : 1; /* 47 clock comparator validity */ + __u32 : 16; /* 47-63 */ +}; + +struct pt_regs; + +extern void s390_handle_mcck(void); +extern void s390_do_machine_check(struct pt_regs *regs); + +#endif /* _ASM_S390_NMI_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 33e7aee7051..228e3105ded 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -22,7 +22,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ - vdso.o vtime.o sysinfo.o + vdso.o vtime.o sysinfo.o nmi.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c new file mode 100644 index 00000000000..8d50eaf76a6 --- /dev/null +++ b/arch/s390/kernel/nmi.c @@ -0,0 +1,383 @@ +/* + * Machine check handler + * + * Copyright IBM Corp. 2000,2009 + * Author(s): Ingo Adlung , + * Martin Schwidefsky , + * Cornelia Huck , + * Heiko Carstens , + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct mcck_struct { + int kill_task; + int channel_report; + int warning; + unsigned long long mcck_code; +}; + +static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); + +static NORET_TYPE void s390_handle_damage(char *msg) +{ + smp_send_stop(); + disabled_wait((unsigned long) __builtin_return_address(0)); + while (1); +} + +/* + * Main machine check handler function. Will be called with interrupts enabled + * or disabled and machine checks enabled or disabled. + */ +void s390_handle_mcck(void) +{ + unsigned long flags; + struct mcck_struct mcck; + + /* + * Disable machine checks and get the current state of accumulated + * machine checks. Afterwards delete the old state and enable machine + * checks again. + */ + local_irq_save(flags); + local_mcck_disable(); + mcck = __get_cpu_var(cpu_mcck); + memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); + clear_thread_flag(TIF_MCCK_PENDING); + local_mcck_enable(); + local_irq_restore(flags); + + if (mcck.channel_report) + crw_handle_channel_report(); + +#ifdef CONFIG_MACHCHK_WARNING +/* + * The warning may remain for a prolonged period on the bare iron. + * (actually till the machine is powered off, or until the problem is gone) + * So we just stop listening for the WARNING MCH and prevent continuously + * being interrupted. One caveat is however, that we must do this per + * processor and cannot use the smp version of ctl_clear_bit(). + * On VM we only get one interrupt per virtally presented machinecheck. + * Though one suffices, we may get one interrupt per (virtual) processor. + */ + if (mcck.warning) { /* WARNING pending ? */ + static int mchchk_wng_posted = 0; + /* + * Use single machine clear, as we cannot handle smp right now + */ + __ctl_clear_bit(14, 24); /* Disable WARNING MCH */ + if (xchg(&mchchk_wng_posted, 1) == 0) + kill_cad_pid(SIGPWR, 1); + } +#endif + + if (mcck.kill_task) { + local_irq_enable(); + printk(KERN_EMERG "mcck: Terminating task because of machine " + "malfunction (code 0x%016llx).\n", mcck.mcck_code); + printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", + current->comm, current->pid); + do_exit(SIGSEGV); + } +} +EXPORT_SYMBOL_GPL(s390_handle_mcck); + +/* + * returns 0 if all registers could be validated + * returns 1 otherwise + */ +static int notrace s390_revalidate_registers(struct mci *mci) +{ + int kill_task; + u64 tmpclock; + u64 zero; + void *fpt_save_area, *fpt_creg_save_area; + + kill_task = 0; + zero = 0; + + if (!mci->gr) { + /* + * General purpose registers couldn't be restored and have + * unknown contents. Process needs to be terminated. + */ + kill_task = 1; + } + if (!mci->fp) { + /* + * Floating point registers can't be restored and + * therefore the process needs to be terminated. + */ + kill_task = 1; + } +#ifndef CONFIG_64BIT + asm volatile( + " ld 0,0(%0)\n" + " ld 2,8(%0)\n" + " ld 4,16(%0)\n" + " ld 6,24(%0)" + : : "a" (&S390_lowcore.floating_pt_save_area)); +#endif + + if (MACHINE_HAS_IEEE) { +#ifdef CONFIG_64BIT + fpt_save_area = &S390_lowcore.floating_pt_save_area; + fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; +#else + fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; + fpt_creg_save_area = fpt_save_area + 128; +#endif + if (!mci->fc) { + /* + * Floating point control register can't be restored. + * Task will be terminated. + */ + asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); + kill_task = 1; + + } else + asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); + + asm volatile( + " ld 0,0(%0)\n" + " ld 1,8(%0)\n" + " ld 2,16(%0)\n" + " ld 3,24(%0)\n" + " ld 4,32(%0)\n" + " ld 5,40(%0)\n" + " ld 6,48(%0)\n" + " ld 7,56(%0)\n" + " ld 8,64(%0)\n" + " ld 9,72(%0)\n" + " ld 10,80(%0)\n" + " ld 11,88(%0)\n" + " ld 12,96(%0)\n" + " ld 13,104(%0)\n" + " ld 14,112(%0)\n" + " ld 15,120(%0)\n" + : : "a" (fpt_save_area)); + } + /* Revalidate access registers */ + asm volatile( + " lam 0,15,0(%0)" + : : "a" (&S390_lowcore.access_regs_save_area)); + if (!mci->ar) { + /* + * Access registers have unknown contents. + * Terminating task. + */ + kill_task = 1; + } + /* Revalidate control registers */ + if (!mci->cr) { + /* + * Control registers have unknown contents. + * Can't recover and therefore stopping machine. + */ + s390_handle_damage("invalid control registers."); + } else { +#ifdef CONFIG_64BIT + asm volatile( + " lctlg 0,15,0(%0)" + : : "a" (&S390_lowcore.cregs_save_area)); +#else + asm volatile( + " lctl 0,15,0(%0)" + : : "a" (&S390_lowcore.cregs_save_area)); +#endif + } + /* + * We don't even try to revalidate the TOD register, since we simply + * can't write something sensible into that register. + */ +#ifdef CONFIG_64BIT + /* + * See if we can revalidate the TOD programmable register with its + * old contents (should be zero) otherwise set it to zero. + */ + if (!mci->pr) + asm volatile( + " sr 0,0\n" + " sckpf" + : : : "0", "cc"); + else + asm volatile( + " l 0,0(%0)\n" + " sckpf" + : : "a" (&S390_lowcore.tod_progreg_save_area) + : "0", "cc"); +#endif + /* Revalidate clock comparator register */ + asm volatile( + " stck 0(%1)\n" + " sckc 0(%1)" + : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); + + /* Check if old PSW is valid */ + if (!mci->wp) + /* + * Can't tell if we come from user or kernel mode + * -> stopping machine. + */ + s390_handle_damage("old psw invalid."); + + if (!mci->ms || !mci->pm || !mci->ia) + kill_task = 1; + + return kill_task; +} + +#define MAX_IPD_COUNT 29 +#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */ + +#define ED_STP_ISLAND 6 /* External damage STP island check */ +#define ED_STP_SYNC 7 /* External damage STP sync check */ +#define ED_ETR_SYNC 12 /* External damage ETR sync check */ +#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ + +/* + * machine check handler. + */ +void notrace s390_do_machine_check(struct pt_regs *regs) +{ + static int ipd_count; + static DEFINE_SPINLOCK(ipd_lock); + static unsigned long long last_ipd; + struct mcck_struct *mcck; + unsigned long long tmp; + struct mci *mci; + int umode; + + lockdep_off(); + s390_idle_check(); + + mci = (struct mci *) &S390_lowcore.mcck_interruption_code; + mcck = &__get_cpu_var(cpu_mcck); + umode = user_mode(regs); + + if (mci->sd) { + /* System damage -> stopping machine */ + s390_handle_damage("received system damage machine check."); + } + if (mci->pd) { + if (mci->b) { + /* Processing backup -> verify if we can survive this */ + u64 z_mcic, o_mcic, t_mcic; +#ifdef CONFIG_64BIT + z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); + o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | + 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | + 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | + 1ULL<<16); +#else + z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | + 1ULL<<29); + o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | + 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | + 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); +#endif + t_mcic = *(u64 *)mci; + + if (((t_mcic & z_mcic) != 0) || + ((t_mcic & o_mcic) != o_mcic)) { + s390_handle_damage("processing backup machine " + "check with damage."); + } + + /* + * Nullifying exigent condition, therefore we might + * retry this instruction. + */ + spin_lock(&ipd_lock); + tmp = get_clock(); + if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) + ipd_count++; + else + ipd_count = 1; + last_ipd = tmp; + if (ipd_count == MAX_IPD_COUNT) + s390_handle_damage("too many ipd retries."); + spin_unlock(&ipd_lock); + } else { + /* Processing damage -> stopping machine */ + s390_handle_damage("received instruction processing " + "damage machine check."); + } + } + if (s390_revalidate_registers(mci)) { + if (umode) { + /* + * Couldn't restore all register contents while in + * user mode -> mark task for termination. + */ + mcck->kill_task = 1; + mcck->mcck_code = *(unsigned long long *) mci; + set_thread_flag(TIF_MCCK_PENDING); + } else { + /* + * Couldn't restore all register contents while in + * kernel mode -> stopping machine. + */ + s390_handle_damage("unable to revalidate registers."); + } + } + if (mci->cd) { + /* Timing facility damage */ + s390_handle_damage("TOD clock damaged"); + } + if (mci->ed && mci->ec) { + /* External damage */ + if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) + etr_sync_check(); + if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) + etr_switch_to_local(); + if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) + stp_sync_check(); + if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) + stp_island_check(); + } + if (mci->se) + /* Storage error uncorrected */ + s390_handle_damage("received storage error uncorrected " + "machine check."); + if (mci->ke) + /* Storage key-error uncorrected */ + s390_handle_damage("received storage key-error uncorrected " + "machine check."); + if (mci->ds && mci->fa) + /* Storage degradation */ + s390_handle_damage("received storage degradation machine " + "check."); + if (mci->cp) { + /* Channel report word pending */ + mcck->channel_report = 1; + set_thread_flag(TIF_MCCK_PENDING); + } + if (mci->w) { + /* Warning pending */ + mcck->warning = 1; + set_thread_flag(TIF_MCCK_PENDING); + } + lockdep_on(); +} + +static int __init machine_check_init(void) +{ + ctl_set_bit(14, 25); /* enable external damage MCH */ + ctl_set_bit(14, 27); /* enable system recovery MCH */ +#ifdef CONFIG_MACHCHK_WARNING + ctl_set_bit(14, 24); /* enable warning MCH */ +#endif + return 0; +} +arch_initcall(machine_check_init); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index e6b480625cb..bd616fb31e7 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "entry.h" asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); @@ -68,7 +69,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sf->gprs[8]; } -extern void s390_handle_mcck(void); /* * The idle loop on a S390... */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c55a4b9ffd8..caa4d287701 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -23,7 +23,7 @@ #include #include #include - +#include #include "kvm-s390.h" #include "gaccess.h" @@ -440,8 +440,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return -EINVAL; /* not implemented yet */ } -extern void s390_handle_mcck(void); - static void __vcpu_run(struct kvm_vcpu *vcpu) { memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); -- cgit v1.2.3 From 7b886416dfd76df9dd7304868556b1d82cf38890 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:02 +0100 Subject: [S390] Remove CONFIG_MACHCHK_WARNING. Everybody enables it so there is no point for an extra config option. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 7 ------- arch/s390/kernel/nmi.c | 29 +++++++++++------------------ 2 files changed, 11 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 3f470604a89..0a9463bea75 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -343,13 +343,6 @@ source "mm/Kconfig" comment "I/O subsystem configuration" -config MACHCHK_WARNING - bool "Process warning machine checks" - help - Select this option if you want the machine check handler on IBM S/390 or - zSeries to process warning machine checks (e.g. on power failures). - If unsure, say "Y". - config QDIO tristate "QDIO support" ---help--- diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 8d50eaf76a6..4bfdc421d7e 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -59,28 +59,23 @@ void s390_handle_mcck(void) if (mcck.channel_report) crw_handle_channel_report(); - -#ifdef CONFIG_MACHCHK_WARNING -/* - * The warning may remain for a prolonged period on the bare iron. - * (actually till the machine is powered off, or until the problem is gone) - * So we just stop listening for the WARNING MCH and prevent continuously - * being interrupted. One caveat is however, that we must do this per - * processor and cannot use the smp version of ctl_clear_bit(). - * On VM we only get one interrupt per virtally presented machinecheck. - * Though one suffices, we may get one interrupt per (virtual) processor. - */ + /* + * A warning may remain for a prolonged period on the bare iron. + * (actually until the machine is powered off, or the problem is gone) + * So we just stop listening for the WARNING MCH and avoid continuously + * being interrupted. One caveat is however, that we must do this per + * processor and cannot use the smp version of ctl_clear_bit(). + * On VM we only get one interrupt per virtally presented machinecheck. + * Though one suffices, we may get one interrupt per (virtual) cpu. + */ if (mcck.warning) { /* WARNING pending ? */ static int mchchk_wng_posted = 0; - /* - * Use single machine clear, as we cannot handle smp right now - */ + + /* Use single cpu clear, as we cannot handle smp here. */ __ctl_clear_bit(14, 24); /* Disable WARNING MCH */ if (xchg(&mchchk_wng_posted, 1) == 0) kill_cad_pid(SIGPWR, 1); } -#endif - if (mcck.kill_task) { local_irq_enable(); printk(KERN_EMERG "mcck: Terminating task because of machine " @@ -375,9 +370,7 @@ static int __init machine_check_init(void) { ctl_set_bit(14, 25); /* enable external damage MCH */ ctl_set_bit(14, 27); /* enable system recovery MCH */ -#ifdef CONFIG_MACHCHK_WARNING ctl_set_bit(14, 24); /* enable warning MCH */ -#endif return 0; } arch_initcall(machine_check_init); -- cgit v1.2.3 From e3dd9c2da674993edb5b52acb56a5d954415639b Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:03 +0100 Subject: [S390] convert bitmap definitions to C Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/bitops.h | 2 +- arch/s390/kernel/bitmap.S | 56 ------------------------------------------ arch/s390/kernel/bitmap.c | 49 ++++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 57 deletions(-) delete mode 100644 arch/s390/kernel/bitmap.S create mode 100644 arch/s390/kernel/bitmap.c (limited to 'arch') diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 8e9243ae0c1..334964a0fb1 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -57,7 +57,7 @@ * with operation of the form "set_bit(bitnr, flags)". */ -/* bitmap tables from arch/S390/kernel/bitmap.S */ +/* bitmap tables from arch/s390/kernel/bitmap.c */ extern const char _oi_bitmap[]; extern const char _ni_bitmap[]; extern const char _zb_findmap[]; diff --git a/arch/s390/kernel/bitmap.S b/arch/s390/kernel/bitmap.S deleted file mode 100644 index dfb41f946e2..00000000000 --- a/arch/s390/kernel/bitmap.S +++ /dev/null @@ -1,56 +0,0 @@ -/* - * arch/s390/kernel/bitmap.S - * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... - * See include/asm-s390/{bitops.h|posix_types.h} for details - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - */ - - .globl _oi_bitmap -_oi_bitmap: - .byte 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80 - - .globl _ni_bitmap -_ni_bitmap: - .byte 0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F - - .globl _zb_findmap -_zb_findmap: - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 - - .globl _sb_findmap -_sb_findmap: - .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c new file mode 100644 index 00000000000..54e362b4267 --- /dev/null +++ b/arch/s390/kernel/bitmap.c @@ -0,0 +1,49 @@ +/* + * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... + * See include/asm/{bitops.h|posix_types.h} for details + * + * Copyright IBM Corp. 1999,2009 + * Author(s): Martin Schwidefsky , + */ + +#include + +const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; + +const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f }; + +const char _zb_findmap[] = { + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 }; + +const char _sb_findmap[] = { + 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 }; -- cgit v1.2.3 From 1485c5c88483d200c9c4c71ed7e8eef1a1e317a1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:04 +0100 Subject: [S390] move EXPORT_SYMBOLs to definitions Move all EXPORT_SYMBOLs to their corresponding definitions. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/bitmap.c | 5 +++++ arch/s390/kernel/process.c | 2 ++ arch/s390/kernel/s390_ksyms.c | 44 ------------------------------------------- arch/s390/kernel/setup.c | 8 ++++++++ arch/s390/lib/delay.c | 2 ++ arch/s390/mm/init.c | 2 ++ 6 files changed, 19 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c index 54e362b4267..3ae4757b006 100644 --- a/arch/s390/kernel/bitmap.c +++ b/arch/s390/kernel/bitmap.c @@ -7,10 +7,13 @@ */ #include +#include const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; +EXPORT_SYMBOL(_oi_bitmap); const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f }; +EXPORT_SYMBOL(_ni_bitmap); const char _zb_findmap[] = { 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, @@ -29,6 +32,7 @@ const char _zb_findmap[] = { 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 }; +EXPORT_SYMBOL(_zb_findmap); const char _sb_findmap[] = { 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, @@ -47,3 +51,4 @@ const char _sb_findmap[] = { 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 }; +EXPORT_SYMBOL(_sb_findmap); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index bd616fb31e7..b48e961a38f 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -141,6 +141,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); } +EXPORT_SYMBOL(kernel_thread); /* * Free current thread data structures etc.. @@ -318,6 +319,7 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) #endif /* CONFIG_64BIT */ return 1; } +EXPORT_SYMBOL(dump_fpu); unsigned long get_wchan(struct task_struct *p) { diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 46b90cb0370..656fcbb9bd8 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -1,49 +1,5 @@ -/* - * arch/s390/kernel/s390_ksyms.c - * - * S390 version - */ -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#ifdef CONFIG_IP_MULTICAST -#include -#endif - -/* - * memory management - */ -EXPORT_SYMBOL(_oi_bitmap); -EXPORT_SYMBOL(_ni_bitmap); -EXPORT_SYMBOL(_zb_findmap); -EXPORT_SYMBOL(_sb_findmap); - -/* - * binfmt_elf loader - */ -extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); -EXPORT_SYMBOL(dump_fpu); -EXPORT_SYMBOL(empty_zero_page); - -/* - * misc. - */ -EXPORT_SYMBOL(machine_flags); -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(csum_fold); -EXPORT_SYMBOL(console_mode); -EXPORT_SYMBOL(console_devno); -EXPORT_SYMBOL(console_irq); #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 8fdf08379ce..0b8ad7b4f8f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -74,9 +74,17 @@ EXPORT_SYMBOL(uaccess); * Machine setup.. */ unsigned int console_mode = 0; +EXPORT_SYMBOL(console_mode); + unsigned int console_devno = -1; +EXPORT_SYMBOL(console_devno); + unsigned int console_irq = -1; +EXPORT_SYMBOL(console_irq); + unsigned long machine_flags; +EXPORT_SYMBOL(machine_flags); + unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 6ccb9fab055..3f5f680726e 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -92,6 +93,7 @@ out: local_irq_restore(flags); preempt_enable(); } +EXPORT_SYMBOL(__udelay); /* * Simple udelay variant. To be used on startup and reboot diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index f0258ca3b17..c634dfbe92e 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -40,7 +40,9 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); + char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); +EXPORT_SYMBOL(empty_zero_page); /* * paging_init() sets up the page tables -- cgit v1.2.3 From 8283cb43ab3e92a039d3486a0f6253df95a5fa55 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 26 Mar 2009 15:24:21 +0100 Subject: [S390] clock sync mode flags The clock sync mode flag CLOCK_SYNC_STP is not cleared when stp is set offline. In this case the get_sync_clock() function returns -EACCESS and the dasd driver will block all i/o until stp is enabled again. In addition get_sync_clock can return -EACCESS if the clock is not in sync instead of -EAGAIN. Rework the stp/etr online handling to fix these problems. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/time.c | 71 ++++++++++++++++++++++++++++++------------------- 1 file changed, 44 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index fc468cae446..f72d41068dc 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -331,6 +331,7 @@ static unsigned long long adjust_time(unsigned long long old, } static DEFINE_PER_CPU(atomic_t, clock_sync_word); +static DEFINE_MUTEX(clock_sync_mutex); static unsigned long clock_sync_flags; #define CLOCK_SYNC_HAS_ETR 0 @@ -394,6 +395,20 @@ static void enable_sync_clock(void) atomic_set_mask(0x80000000, sw_ptr); } +/* + * Function to check if the clock is in sync. + */ +static inline int check_sync_clock(void) +{ + atomic_t *sw_ptr; + int rc; + + sw_ptr = &get_cpu_var(clock_sync_word); + rc = (atomic_read(sw_ptr) & 0x80000000U) != 0; + put_cpu_var(clock_sync_sync); + return rc; +} + /* Single threaded workqueue used for etr and stp sync events */ static struct workqueue_struct *time_sync_wq; @@ -485,6 +500,8 @@ static void etr_reset(void) if (etr_setr(&etr_eacr) == 0) { etr_tolec = get_clock(); set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); + if (etr_port0_online && etr_port1_online) + set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); } else if (etr_port0_online || etr_port1_online) { pr_warning("The real or virtual hardware system does " "not provide an ETR interface\n"); @@ -533,8 +550,7 @@ void etr_switch_to_local(void) { if (!etr_eacr.sl) return; - if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - disable_sync_clock(NULL); + disable_sync_clock(NULL); set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); queue_work(time_sync_wq, &etr_work); } @@ -549,8 +565,7 @@ void etr_sync_check(void) { if (!etr_eacr.es) return; - if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - disable_sync_clock(NULL); + disable_sync_clock(NULL); set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); queue_work(time_sync_wq, &etr_work); } @@ -914,7 +929,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, * Do not try to get the alternate port aib if the clock * is not in sync yet. */ - if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es) + if (!check_sync_clock()) return eacr; /* @@ -997,7 +1012,6 @@ static void etr_work_fn(struct work_struct *work) on_each_cpu(disable_sync_clock, NULL, 1); del_timer_sync(&etr_timer); etr_update_eacr(eacr); - clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); goto out_unlock; } @@ -1071,18 +1085,13 @@ static void etr_work_fn(struct work_struct *work) /* Both ports not usable. */ eacr.es = eacr.sl = 0; sync_port = -1; - clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); } - if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - eacr.es = 0; - /* * If the clock is in sync just update the eacr and return. * If there is no valid sync port wait for a port update. */ - if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) || - eacr.es || sync_port < 0) { + if (check_sync_clock() || sync_port < 0) { etr_update_eacr(eacr); etr_set_tolec_timeout(now); goto out_unlock; @@ -1103,13 +1112,11 @@ static void etr_work_fn(struct work_struct *work) * and set up a timer to try again after 0.5 seconds */ etr_update_eacr(eacr); - set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); if (now < etr_tolec + (1600000 << 12) || etr_sync_clock_stop(&aib, sync_port) != 0) { /* Sync failed. Try again in 1/2 second. */ eacr.es = 0; etr_update_eacr(eacr); - clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); etr_set_sync_timeout(); } else etr_set_tolec_timeout(now); @@ -1191,19 +1198,30 @@ static ssize_t etr_online_store(struct sys_device *dev, return -EINVAL; if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) return -EOPNOTSUPP; + mutex_lock(&clock_sync_mutex); if (dev == &etr_port0_dev) { if (etr_port0_online == value) - return count; /* Nothing to do. */ + goto out; /* Nothing to do. */ etr_port0_online = value; + if (etr_port0_online && etr_port1_online) + set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); + else + clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); queue_work(time_sync_wq, &etr_work); } else { if (etr_port1_online == value) - return count; /* Nothing to do. */ + goto out; /* Nothing to do. */ etr_port1_online = value; + if (etr_port0_online && etr_port1_online) + set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); + else + clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); queue_work(time_sync_wq, &etr_work); } +out: + mutex_unlock(&clock_sync_mutex); return count; } @@ -1471,8 +1489,6 @@ static void stp_timing_alert(struct stp_irq_parm *intparm) */ void stp_sync_check(void) { - if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) - return; disable_sync_clock(NULL); queue_work(time_sync_wq, &stp_work); } @@ -1485,8 +1501,6 @@ void stp_sync_check(void) */ void stp_island_check(void) { - if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) - return; disable_sync_clock(NULL); queue_work(time_sync_wq, &stp_work); } @@ -1513,10 +1527,6 @@ static int stp_sync_clock(void *data) enable_sync_clock(); - set_bit(CLOCK_SYNC_STP, &clock_sync_flags); - if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - queue_work(time_sync_wq, &etr_work); - rc = 0; if (stp_info.todoff[0] || stp_info.todoff[1] || stp_info.todoff[2] || stp_info.todoff[3] || @@ -1535,9 +1545,6 @@ static int stp_sync_clock(void *data) if (rc) { disable_sync_clock(NULL); stp_sync->in_sync = -EAGAIN; - clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); - if (etr_port0_online || etr_port1_online) - queue_work(time_sync_wq, &etr_work); } else stp_sync->in_sync = 1; xchg(&first, 0); @@ -1569,6 +1576,10 @@ static void stp_work_fn(struct work_struct *work) if (rc || stp_info.c == 0) goto out_unlock; + /* Skip synchronization if the clock is already in sync. */ + if (check_sync_clock()) + goto out_unlock; + memset(&stp_sync, 0, sizeof(stp_sync)); get_online_cpus(); atomic_set(&stp_sync.cpus, num_online_cpus() - 1); @@ -1684,8 +1695,14 @@ static ssize_t stp_online_store(struct sysdev_class *class, return -EINVAL; if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) return -EOPNOTSUPP; + mutex_lock(&clock_sync_mutex); stp_online = value; + if (stp_online) + set_bit(CLOCK_SYNC_STP, &clock_sync_flags); + else + clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); queue_work(time_sync_wq, &stp_work); + mutex_unlock(&clock_sync_mutex); return count; } -- cgit v1.2.3 From 4b2a8b6043979f1e6cc8aaa4116c95b31ae942ec Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Thu, 26 Mar 2009 15:24:22 +0100 Subject: [S390] kernel: Disable switch_amode by default Disable switch_amode by default because pagetable walk on pre z9 hardware has negative performance impact. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/setup.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0b8ad7b4f8f..dd3c5173627 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -301,11 +301,7 @@ static int __init early_parse_mem(char *p) early_param("mem", early_parse_mem); #ifdef CONFIG_S390_SWITCH_AMODE -#ifdef CONFIG_PGSTE -unsigned int switch_amode = 1; -#else unsigned int switch_amode = 0; -#endif EXPORT_SYMBOL_GPL(switch_amode); static int set_amode_and_uaccess(unsigned long user_amode, -- cgit v1.2.3 From d303b6fd858370c22d5c70c313669e3521a5f758 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Thu, 26 Mar 2009 15:24:31 +0100 Subject: [S390] qdio: report SIGA errors directly Errors from SIGA instructions are stored in the per queue qdio_error and reported back when the queue handler is called. That opens a race when multiple error conditions occur simultanously. Report SIGA errors immediately in the return value of do_QDIO so the upper layer can react and SIGA errors no longer interfere with other errors. Move the SIGA error handling in qeth from the outbound handler to qeth_flush_buffers. Signed-off-by: Jan Glauber --- arch/s390/include/asm/qdio.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 27fc1746de1..402d6dcf0d2 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -314,6 +314,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, int, int, unsigned long); /* qdio errors reported to the upper-layer program */ +#define QDIO_ERROR_SIGA_TARGET 0x02 #define QDIO_ERROR_SIGA_ACCESS_EXCEPTION 0x10 #define QDIO_ERROR_SIGA_BUSY 0x20 #define QDIO_ERROR_ACTIVATE_CHECK_CONDITION 0x40 -- cgit v1.2.3 From 6faf250789dfd35146a71402f9be245be82bf01e Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 26 Mar 2009 15:24:33 +0100 Subject: [S390] allow usage of string functions in linux/string.h In introducing a trivial "strstarts()" function in linux/string.h, we hit the following error on s390: In file included from include/linux/bitmap.h:8, from include/linux/cpumask.h:142, from include/linux/smp.h:12, from /home/rusty/devel/kernel/patches/linux-2.6/arch/s390/include/asm/spinlock.h:14, from include/linux/spinlock.h:88, from include/linux/seqlock.h:29, from include/linux/time.h:8, from include/linux/stat.h:60, from include/linux/module.h:10, from arch/s390/lib/string.c:13: include/linux/string.h: In function 'strstarts': include/linux/string.h:124: error: implicit declaration of function 'strlen' include/linux/string.h:124: warning: incompatible implicit declaration of built-in function 'strlen' Because when including asm/string.h from arch/s390/lib/string.c we don't declare the string ops we are about to define, and linux/string.h barfs. The fix is to declare them in this IN_ARCH_STRING_C case, but in general I wonder if there's a neater fix. Reported-by: linux-next Signed-off-by: Rusty Russell Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/string.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index d074673a6d9..adf079170aa 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -135,7 +135,13 @@ static inline size_t strnlen(const char * s, size_t n) : "+a" (end), "+a" (tmp) : "d" (r0) : "cc"); return end - s; } - +#else /* IN_ARCH_STRING_C */ +void *memchr(const void * s, int c, size_t n); +void *memscan(void *s, int c, size_t n); +char *strcat(char *dst, const char *src); +char *strcpy(char *dst, const char *src); +size_t strlen(const char *s); +size_t strnlen(const char * s, size_t n); #endif /* !IN_ARCH_STRING_C */ #endif /* __KERNEL__ */ -- cgit v1.2.3 From ced2c8bcbc5082bc112d7c7410b3f5380c77e306 Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Thu, 26 Mar 2009 15:24:34 +0100 Subject: [S390] remove duplicate nul-termination of string strlcpy() does already NUL-terminate the destination string. Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/early.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index d61eb3334ed..4d221c81c84 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -352,7 +352,6 @@ static void __init setup_boot_command_line(void) /* copy arch command line */ strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); - boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0; /* append IPL PARM data to the boot command line */ if (MACHINE_IS_VM) { -- cgit v1.2.3 From e13ed9b2704487e98d9241282765869fbd9a2dda Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:35 +0100 Subject: [S390] bitops: remove likely annotations likely/unlikely profiling revealed that none of the branches in bitops is taken likely or unlikely. So remove the annotations. In addition the generated code is shorter. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/bitops.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 334964a0fb1..b30606f6d52 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -525,16 +525,16 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr, static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) { #ifdef __s390x__ - if (likely((word & 0xffffffff) == 0xffffffff)) { + if ((word & 0xffffffff) == 0xffffffff) { word >>= 32; nr += 32; } #endif - if (likely((word & 0xffff) == 0xffff)) { + if ((word & 0xffff) == 0xffff) { word >>= 16; nr += 16; } - if (likely((word & 0xff) == 0xff)) { + if ((word & 0xff) == 0xff) { word >>= 8; nr += 8; } @@ -549,16 +549,16 @@ static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) static inline unsigned long __ffs_word(unsigned long nr, unsigned long word) { #ifdef __s390x__ - if (likely((word & 0xffffffff) == 0)) { + if ((word & 0xffffffff) == 0) { word >>= 32; nr += 32; } #endif - if (likely((word & 0xffff) == 0)) { + if ((word & 0xffff) == 0) { word >>= 16; nr += 16; } - if (likely((word & 0xff) == 0)) { + if ((word & 0xff) == 0) { word >>= 8; nr += 8; } -- cgit v1.2.3 From 504665a91498f43d220b7d0942281067283a35f7 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 26 Mar 2009 15:24:36 +0100 Subject: [S390] module function call optimization Avoid the detour over the PLT if the branch target of a function call in a module is in the range of the bras (16-bit) or brasl (32-bit) instruction. The PLT is still generated but it is unused. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/module.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 59b4e796680..eed4a00cb67 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -310,15 +310,20 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, info->plt_initialized = 1; } if (r_type == R_390_PLTOFF16 || - r_type == R_390_PLTOFF32 - || r_type == R_390_PLTOFF64 - ) + r_type == R_390_PLTOFF32 || + r_type == R_390_PLTOFF64) val = me->arch.plt_offset - me->arch.got_offset + info->plt_offset + rela->r_addend; - else - val = (Elf_Addr) me->module_core + - me->arch.plt_offset + info->plt_offset + - rela->r_addend - loc; + else { + if (!((r_type == R_390_PLT16DBL && + val - loc + 0xffffUL < 0x1ffffeUL) || + (r_type == R_390_PLT32DBL && + val - loc + 0xffffffffULL < 0x1fffffffeULL))) + val = (Elf_Addr) me->module_core + + me->arch.plt_offset + + info->plt_offset; + val += rela->r_addend - loc; + } if (r_type == R_390_PLT16DBL) *(unsigned short *) loc = val >> 1; else if (r_type == R_390_PLTOFF16) -- cgit v1.2.3 From 1edad85b16fdda43c8ab809e2779e8bf64ab8bb2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:37 +0100 Subject: [S390] use compiler builtin versions of strlen/strcpy/strcat Use builtin variants if gcc 4 or newer is used to compile the kernel. Generates better code than the asm variants. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/string.h | 8 ++++++++ arch/s390/lib/string.c | 8 ++++++++ 2 files changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index adf079170aa..cd0241db5a4 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -100,6 +100,7 @@ static inline char *strcat(char *dst, const char *src) static inline char *strcpy(char *dst, const char *src) { +#if __GNUC__ < 4 register int r0 asm("0") = 0; char *ret = dst; @@ -109,10 +110,14 @@ static inline char *strcpy(char *dst, const char *src) : "+&a" (dst), "+&a" (src) : "d" (r0) : "cc", "memory"); return ret; +#else + return __builtin_strcpy(dst, src); +#endif } static inline size_t strlen(const char *s) { +#if __GNUC__ < 4 register unsigned long r0 asm("0") = 0; const char *tmp = s; @@ -121,6 +126,9 @@ static inline size_t strlen(const char *s) " jo 0b" : "+d" (r0), "+a" (tmp) : : "cc"); return r0 - (unsigned long) s; +#else + return __builtin_strlen(s); +#endif } static inline size_t strnlen(const char * s, size_t n) diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index ae5cf5d03d4..4143b7c1909 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -44,7 +44,11 @@ static inline char *__strnend(const char *s, size_t n) */ size_t strlen(const char *s) { +#if __GNUC__ < 4 return __strend(s) - s; +#else + return __builtin_strlen(s); +#endif } EXPORT_SYMBOL(strlen); @@ -70,6 +74,7 @@ EXPORT_SYMBOL(strnlen); */ char *strcpy(char *dest, const char *src) { +#if __GNUC__ < 4 register int r0 asm("0") = 0; char *ret = dest; @@ -78,6 +83,9 @@ char *strcpy(char *dest, const char *src) : "+&a" (dest), "+&a" (src) : "d" (r0) : "cc", "memory" ); return ret; +#else + return __builtin_strcpy(dest, src); +#endif } EXPORT_SYMBOL(strcpy); -- cgit v1.2.3 From eaf1b6fbca8d9be87bef2eafaa3f40bffe26ce04 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 26 Mar 2009 15:24:40 +0100 Subject: [S390] Expose a constant for the number of words representing the CRs We need to use this value in the checkpoint/restart code and would like to have a constant instead of a magic '3'. Cc: linux-s390@vger.kernel.org Signed-off-by: Dan Smith Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ptrace.h | 4 +++- arch/s390/kernel/compat_ptrace.h | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 8920025c3c0..f1b051630c5 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -172,6 +172,8 @@ #define NUM_CRS 16 #define NUM_ACRS 16 +#define NUM_CR_WORDS 3 + #define FPR_SIZE 8 #define FPC_SIZE 4 #define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */ @@ -334,7 +336,7 @@ struct pt_regs */ typedef struct { - unsigned long cr[3]; + unsigned long cr[NUM_CR_WORDS]; } per_cr_words; #define PER_EM_MASK 0xE8000000UL diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h index a2be3a978d5..123dd660d7f 100644 --- a/arch/s390/kernel/compat_ptrace.h +++ b/arch/s390/kernel/compat_ptrace.h @@ -1,10 +1,11 @@ #ifndef _PTRACE32_H #define _PTRACE32_H +#include /* needed for NUM_CR_WORDS */ #include "compat_linux.h" /* needed for psw_compat_t */ typedef struct { - __u32 cr[3]; + __u32 cr[NUM_CR_WORDS]; } per_cr_words32; typedef struct { -- cgit v1.2.3 From 2938af534d47891ddbced552e5d29f7b90bec609 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 26 Mar 2009 15:24:41 +0100 Subject: [S390] Fix comments in lowcore structure This patch fixes two addresses in the comments for the lowcore structure. Looks like an copy-paste bug. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index ee4b10ff938..f94386ece0d 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -315,8 +315,8 @@ struct _lowcore __u8 op_access_id; /* 0x0a2 */ __u8 ar_access_id; /* 0x0a3 */ __u8 pad2[0xA8-0xA4]; /* 0x0a4 */ - addr_t trans_exc_code; /* 0x0A0 */ - addr_t monitor_code; /* 0x09c */ + addr_t trans_exc_code; /* 0x0a8 */ + addr_t monitor_code; /* 0x0b0 */ __u16 subchannel_id; /* 0x0b8 */ __u16 subchannel_nr; /* 0x0ba */ __u32 io_int_parm; /* 0x0bc */ -- cgit v1.2.3 From 7b4684880dfc6c45bc56039ca5eada771d7643ab Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 26 Mar 2009 15:24:42 +0100 Subject: [S390] eliminate cpuinfo_S390 structure Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 15 +++++---- arch/s390/include/asm/processor.h | 16 +-------- arch/s390/include/asm/smp.h | 7 +--- arch/s390/kernel/processor.c | 69 +++++++++++++++++++-------------------- arch/s390/kernel/setup.c | 10 ++---- arch/s390/kernel/smp.c | 8 ++--- 6 files changed, 51 insertions(+), 74 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index f94386ece0d..ad543c11826 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -80,7 +80,6 @@ #define __LC_USER_ASCE 0xC50 #define __LC_PANIC_STACK 0xC54 #define __LC_CPUID 0xC60 -#define __LC_CPUADDR 0xC68 #define __LC_IPLDEV 0xC7C #define __LC_CURRENT 0xC90 #define __LC_INT_CLOCK 0xC98 @@ -102,7 +101,6 @@ #define __LC_USER_ASCE 0xD60 #define __LC_PANIC_STACK 0xD68 #define __LC_CPUID 0xD80 -#define __LC_CPUADDR 0xD88 #define __LC_IPLDEV 0xDB8 #define __LC_CURRENT 0xDD8 #define __LC_INT_CLOCK 0xDE8 @@ -273,8 +271,10 @@ struct _lowcore __u32 user_exec_asce; /* 0xc58 */ __u8 pad10[0xc60-0xc5c]; /* 0xc5c */ /* entry.S sensitive area start */ - struct cpuinfo_S390 cpu_data; /* 0xc60 */ - __u32 ipl_device; /* 0xc7c */ + cpuid_t cpu_id; /* 0xc60 */ + __u32 cpu_nr; /* 0xc68 */ + __u32 ipl_device; /* 0xc6c */ + __u8 pad_0xc70[0xc80-0xc70]; /* 0xc70 */ /* entry.S sensitive area end */ /* SMP info area: defined by DJB */ @@ -366,9 +366,10 @@ struct _lowcore __u64 user_exec_asce; /* 0xd70 */ __u8 pad10[0xd80-0xd78]; /* 0xd78 */ /* entry.S sensitive area start */ - struct cpuinfo_S390 cpu_data; /* 0xd80 */ - __u32 ipl_device; /* 0xdb8 */ - __u32 pad11; /* 0xdbc */ + cpuid_t cpu_id; /* 0xd80 */ + __u32 cpu_nr; /* 0xd88 */ + __u32 ipl_device; /* 0xd8c */ + __u8 pad_0xd90[0xdc0-0xd90]; /* 0xd90 */ /* entry.S sensitive area end */ /* SMP info area: defined by DJB */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index db4523fe38a..61862b3ac79 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -42,22 +42,8 @@ static inline void get_cpu_id(cpuid_t *ptr) asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); } -struct cpuinfo_S390 -{ - cpuid_t cpu_id; - __u16 cpu_addr; - __u16 cpu_nr; - unsigned long loops_per_jiffy; - unsigned long *pgd_quick; -#ifdef __s390x__ - unsigned long *pmd_quick; -#endif /* __s390x__ */ - unsigned long *pte_quick; - unsigned long pgtable_cache_sz; -}; - extern void s390_adjust_jiffies(void); -extern void print_cpu_info(struct cpuinfo_S390 *); +extern void print_cpu_info(void); extern int get_cpu_capability(unsigned int *); /* diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 024b91e0623..2009158a450 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -50,12 +50,7 @@ extern void machine_power_off_smp(void); #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ -#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) - -static inline __u16 hard_smp_processor_id(void) -{ - return stap(); -} +#define raw_smp_processor_id() (S390_lowcore.cpu_nr) /* * returns 1 if cpu is in stopped/check stopped state or not operational diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 82c1872cfe8..423da1bd42a 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -18,10 +18,11 @@ #include #include -void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) +void __cpuinit print_cpu_info(void) { pr_info("Processor %d started, address %d, identification %06X\n", - cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident); + S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, + S390_lowcore.cpu_id.ident); } /* @@ -34,44 +35,42 @@ static int show_cpuinfo(struct seq_file *m, void *v) "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", "edat" }; - struct cpuinfo_S390 *cpuinfo; - unsigned long n = (unsigned long) v - 1; - int i; + struct _lowcore *lc; + unsigned long n = (unsigned long) v - 1; + int i; - s390_adjust_jiffies(); - preempt_disable(); - if (!n) { - seq_printf(m, "vendor_id : IBM/S390\n" - "# processors : %i\n" - "bogomips per cpu: %lu.%02lu\n", - num_online_cpus(), loops_per_jiffy/(500000/HZ), - (loops_per_jiffy/(5000/HZ))%100); - seq_puts(m, "features\t: "); - for (i = 0; i < 8; i++) - if (hwcap_str[i] && (elf_hwcap & (1UL << i))) - seq_printf(m, "%s ", hwcap_str[i]); - seq_puts(m, "\n"); - } + s390_adjust_jiffies(); + preempt_disable(); + if (!n) { + seq_printf(m, "vendor_id : IBM/S390\n" + "# processors : %i\n" + "bogomips per cpu: %lu.%02lu\n", + num_online_cpus(), loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ))%100); + seq_puts(m, "features\t: "); + for (i = 0; i < 8; i++) + if (hwcap_str[i] && (elf_hwcap & (1UL << i))) + seq_printf(m, "%s ", hwcap_str[i]); + seq_puts(m, "\n"); + } - if (cpu_online(n)) { + if (cpu_online(n)) { #ifdef CONFIG_SMP - if (smp_processor_id() == n) - cpuinfo = &S390_lowcore.cpu_data; - else - cpuinfo = &lowcore_ptr[n]->cpu_data; + lc = (smp_processor_id() == n) ? + &S390_lowcore : lowcore_ptr[n]; #else - cpuinfo = &S390_lowcore.cpu_data; + lc = &S390_lowcore; #endif - seq_printf(m, "processor %li: " - "version = %02X, " - "identification = %06X, " - "machine = %04X\n", - n, cpuinfo->cpu_id.version, - cpuinfo->cpu_id.ident, - cpuinfo->cpu_id.machine); - } - preempt_enable(); - return 0; + seq_printf(m, "processor %li: " + "version = %02X, " + "identification = %06X, " + "machine = %04X\n", + n, lc->cpu_id.version, + lc->cpu_id.ident, + lc->cpu_id.machine); + } + preempt_enable(); + return 0; } static void *c_start(struct seq_file *m, loff_t *pos) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index dd3c5173627..9c8853f21bb 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -121,13 +121,10 @@ static struct resource data_resource = { */ void __cpuinit cpu_init(void) { - int addr = hard_smp_processor_id(); - /* * Store processor id in lowcore (used e.g. in timer_interrupt) */ - get_cpu_id(&S390_lowcore.cpu_data.cpu_id); - S390_lowcore.cpu_data.cpu_addr = addr; + get_cpu_id(&S390_lowcore.cpu_id); /* * Force FPU initialization: @@ -686,7 +683,6 @@ setup_memory(void) static void __init setup_hwcaps(void) { static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; - struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; unsigned long long facility_list_extended; unsigned int facility_list; int i; @@ -732,7 +728,7 @@ static void __init setup_hwcaps(void) if (MACHINE_HAS_HPAGE) elf_hwcap |= 1UL << 7; - switch (cpuinfo->cpu_id.machine) { + switch (S390_lowcore.cpu_id.machine) { case 0x9672: #if !defined(CONFIG_64BIT) default: /* Use "g5" as default for 31 bit kernels. */ @@ -825,7 +821,7 @@ setup_arch(char **cmdline_p) setup_lowcore(); cpu_init(); - __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; + __cpu_logical_map[0] = stap(); s390_init_cpu_topology(); /* diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a8858634dd0..b167f74d94c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -372,7 +372,7 @@ static void __init smp_detect_cpus(void) c_cpus = 1; s_cpus = 0; - boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; + boot_cpu_addr = __cpu_logical_map[0]; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) panic("smp_detect_cpus failed to allocate memory\n"); @@ -446,7 +446,7 @@ int __cpuinit start_secondary(void *cpuvoid) /* Switch on interrupts */ local_irq_enable(); /* Print info about this processor */ - print_cpu_info(&S390_lowcore.cpu_data); + print_cpu_info(); /* cpu_idle will call schedule for us */ cpu_idle(); return 0; @@ -564,7 +564,7 @@ int __cpuinit __cpu_up(unsigned int cpu) : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; cpu_lowcore->current_task = (unsigned long) idle; - cpu_lowcore->cpu_data.cpu_nr = cpu; + cpu_lowcore->cpu_nr = cpu; cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; cpu_lowcore->ipl_device = S390_lowcore.ipl_device; eieio(); @@ -656,7 +656,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* request the 0x1201 emergency signal external interrupt */ if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) panic("Couldn't request external interrupt 0x1201"); - print_cpu_info(&S390_lowcore.cpu_data); + print_cpu_info(); /* Reallocate current lowcore, but keep its contents. */ lc_order = sizeof(long) == 8 ? 1 : 0; -- cgit v1.2.3 From da292bbe1f620221b08c4b589424f370168d642b Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 26 Mar 2009 15:24:43 +0100 Subject: [S390] eliminate ipl_device from lowcore Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 8 ++------ arch/s390/kernel/head31.S | 1 - arch/s390/kernel/head64.S | 1 - arch/s390/kernel/setup.c | 1 - arch/s390/kernel/smp.c | 1 - 5 files changed, 2 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index ad543c11826..5b18035c1dc 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -80,7 +80,6 @@ #define __LC_USER_ASCE 0xC50 #define __LC_PANIC_STACK 0xC54 #define __LC_CPUID 0xC60 -#define __LC_IPLDEV 0xC7C #define __LC_CURRENT 0xC90 #define __LC_INT_CLOCK 0xC98 #else /* __s390x__ */ @@ -101,7 +100,6 @@ #define __LC_USER_ASCE 0xD60 #define __LC_PANIC_STACK 0xD68 #define __LC_CPUID 0xD80 -#define __LC_IPLDEV 0xDB8 #define __LC_CURRENT 0xDD8 #define __LC_INT_CLOCK 0xDE8 #define __LC_VDSO_PER_CPU 0xE38 @@ -273,8 +271,7 @@ struct _lowcore /* entry.S sensitive area start */ cpuid_t cpu_id; /* 0xc60 */ __u32 cpu_nr; /* 0xc68 */ - __u32 ipl_device; /* 0xc6c */ - __u8 pad_0xc70[0xc80-0xc70]; /* 0xc70 */ + __u8 pad_0xc6c[0xc80-0xc6c]; /* 0xc6c */ /* entry.S sensitive area end */ /* SMP info area: defined by DJB */ @@ -368,8 +365,7 @@ struct _lowcore /* entry.S sensitive area start */ cpuid_t cpu_id; /* 0xd80 */ __u32 cpu_nr; /* 0xd88 */ - __u32 ipl_device; /* 0xd8c */ - __u8 pad_0xd90[0xdc0-0xd90]; /* 0xd90 */ + __u8 pad_0xd8c[0xdc0-0xd8c]; /* 0xd8c */ /* entry.S sensitive area end */ /* SMP info area: defined by DJB */ diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index db476d114ca..2ced846065b 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -20,7 +20,6 @@ startup_continue: lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore - mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) # # Setup stack # diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index f9f70aa1524..65667b2e65c 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -86,7 +86,6 @@ startup_continue: lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore - mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12) lghi %r0,__LC_PASTE stg %r0,__LC_VDSO_PER_CPU # diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 9c8853f21bb..91551ef1d67 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -419,7 +419,6 @@ setup_lowcore(void) PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; lc->io_new_psw.mask = psw_kernel_bits; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; - lc->ipl_device = S390_lowcore.ipl_device; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; lc->async_stack = (unsigned long) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b167f74d94c..775885772ff 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -566,7 +566,6 @@ int __cpuinit __cpu_up(unsigned int cpu) cpu_lowcore->current_task = (unsigned long) idle; cpu_lowcore->cpu_nr = cpu; cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; - cpu_lowcore->ipl_device = S390_lowcore.ipl_device; eieio(); while (signal_processor(cpu, sigp_restart) == sigp_busy) -- cgit v1.2.3 From 866ba28418d30122d863c50182a202741f4dcf3e Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 26 Mar 2009 15:24:44 +0100 Subject: [S390] cleanup lowcore.h The lowcore.h header has quite a lot of whitespace damage and a rather wild collection of entries. Remove all that whitespace and tidy up the order of the lowcore fields. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 652 ++++++++++++++++++++-------------------- arch/s390/kernel/head.S | 2 + 2 files changed, 331 insertions(+), 323 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 5b18035c1dc..b349f1c7fdf 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -11,125 +11,118 @@ #ifndef _ASM_S390_LOWCORE_H #define _ASM_S390_LOWCORE_H -#ifndef __s390x__ -#define __LC_EXT_OLD_PSW 0x018 -#define __LC_SVC_OLD_PSW 0x020 -#define __LC_PGM_OLD_PSW 0x028 -#define __LC_MCK_OLD_PSW 0x030 -#define __LC_IO_OLD_PSW 0x038 -#define __LC_EXT_NEW_PSW 0x058 -#define __LC_SVC_NEW_PSW 0x060 -#define __LC_PGM_NEW_PSW 0x068 -#define __LC_MCK_NEW_PSW 0x070 -#define __LC_IO_NEW_PSW 0x078 -#else /* !__s390x__ */ -#define __LC_EXT_OLD_PSW 0x0130 -#define __LC_SVC_OLD_PSW 0x0140 -#define __LC_PGM_OLD_PSW 0x0150 -#define __LC_MCK_OLD_PSW 0x0160 -#define __LC_IO_OLD_PSW 0x0170 -#define __LC_EXT_NEW_PSW 0x01b0 -#define __LC_SVC_NEW_PSW 0x01c0 -#define __LC_PGM_NEW_PSW 0x01d0 -#define __LC_MCK_NEW_PSW 0x01e0 -#define __LC_IO_NEW_PSW 0x01f0 -#endif /* !__s390x__ */ - -#define __LC_IPL_PARMBLOCK_PTR 0x014 -#define __LC_EXT_PARAMS 0x080 -#define __LC_CPU_ADDRESS 0x084 -#define __LC_EXT_INT_CODE 0x086 - -#define __LC_SVC_ILC 0x088 -#define __LC_SVC_INT_CODE 0x08A -#define __LC_PGM_ILC 0x08C -#define __LC_PGM_INT_CODE 0x08E +#define __LC_IPL_PARMBLOCK_PTR 0x0014 +#define __LC_EXT_PARAMS 0x0080 +#define __LC_CPU_ADDRESS 0x0084 +#define __LC_EXT_INT_CODE 0x0086 -#define __LC_PER_ATMID 0x096 -#define __LC_PER_ADDRESS 0x098 -#define __LC_PER_ACCESS_ID 0x0A1 -#define __LC_AR_MODE_ID 0x0A3 +#define __LC_SVC_ILC 0x0088 +#define __LC_SVC_INT_CODE 0x008a +#define __LC_PGM_ILC 0x008c +#define __LC_PGM_INT_CODE 0x008e -#define __LC_SUBCHANNEL_ID 0x0B8 -#define __LC_SUBCHANNEL_NR 0x0BA -#define __LC_IO_INT_PARM 0x0BC -#define __LC_IO_INT_WORD 0x0C0 -#define __LC_MCCK_CODE 0x0E8 +#define __LC_PER_ATMID 0x0096 +#define __LC_PER_ADDRESS 0x0098 +#define __LC_PER_ACCESS_ID 0x00a1 +#define __LC_AR_MODE_ID 0x00a3 -#define __LC_LAST_BREAK 0x110 +#define __LC_SUBCHANNEL_ID 0x00b8 +#define __LC_SUBCHANNEL_NR 0x00ba +#define __LC_IO_INT_PARM 0x00bc +#define __LC_IO_INT_WORD 0x00c0 +#define __LC_MCCK_CODE 0x00e8 -#define __LC_RETURN_PSW 0x200 - -#define __LC_SAVE_AREA 0xC00 +#define __LC_DUMP_REIPL 0x0e00 #ifndef __s390x__ -#define __LC_IRB 0x208 -#define __LC_SYNC_ENTER_TIMER 0x248 -#define __LC_ASYNC_ENTER_TIMER 0x250 -#define __LC_EXIT_TIMER 0x258 -#define __LC_USER_TIMER 0x260 -#define __LC_SYSTEM_TIMER 0x268 -#define __LC_STEAL_TIMER 0x270 -#define __LC_LAST_UPDATE_TIMER 0x278 -#define __LC_LAST_UPDATE_CLOCK 0x280 -#define __LC_RETURN_MCCK_PSW 0x288 -#define __LC_KERNEL_STACK 0xC40 -#define __LC_THREAD_INFO 0xC44 -#define __LC_ASYNC_STACK 0xC48 -#define __LC_KERNEL_ASCE 0xC4C -#define __LC_USER_ASCE 0xC50 -#define __LC_PANIC_STACK 0xC54 -#define __LC_CPUID 0xC60 -#define __LC_CURRENT 0xC90 -#define __LC_INT_CLOCK 0xC98 +#define __LC_EXT_OLD_PSW 0x0018 +#define __LC_SVC_OLD_PSW 0x0020 +#define __LC_PGM_OLD_PSW 0x0028 +#define __LC_MCK_OLD_PSW 0x0030 +#define __LC_IO_OLD_PSW 0x0038 +#define __LC_EXT_NEW_PSW 0x0058 +#define __LC_SVC_NEW_PSW 0x0060 +#define __LC_PGM_NEW_PSW 0x0068 +#define __LC_MCK_NEW_PSW 0x0070 +#define __LC_IO_NEW_PSW 0x0078 +#define __LC_SAVE_AREA 0x0200 +#define __LC_RETURN_PSW 0x0240 +#define __LC_RETURN_MCCK_PSW 0x0248 +#define __LC_SYNC_ENTER_TIMER 0x0250 +#define __LC_ASYNC_ENTER_TIMER 0x0258 +#define __LC_EXIT_TIMER 0x0260 +#define __LC_USER_TIMER 0x0268 +#define __LC_SYSTEM_TIMER 0x0270 +#define __LC_STEAL_TIMER 0x0278 +#define __LC_LAST_UPDATE_TIMER 0x0280 +#define __LC_LAST_UPDATE_CLOCK 0x0288 +#define __LC_CURRENT 0x0290 +#define __LC_THREAD_INFO 0x0294 +#define __LC_KERNEL_STACK 0x0298 +#define __LC_ASYNC_STACK 0x029c +#define __LC_PANIC_STACK 0x02a0 +#define __LC_KERNEL_ASCE 0x02a4 +#define __LC_USER_ASCE 0x02a8 +#define __LC_USER_EXEC_ASCE 0x02ac +#define __LC_CPUID 0x02b0 +#define __LC_INT_CLOCK 0x02c8 +#define __LC_IRB 0x0300 +#define __LC_PFAULT_INTPARM 0x0080 +#define __LC_CPU_TIMER_SAVE_AREA 0x00d8 +#define __LC_CLOCK_COMP_SAVE_AREA 0x00e0 +#define __LC_PSW_SAVE_AREA 0x0100 +#define __LC_PREFIX_SAVE_AREA 0x0108 +#define __LC_AREGS_SAVE_AREA 0x0120 +#define __LC_FPREGS_SAVE_AREA 0x0160 +#define __LC_GPREGS_SAVE_AREA 0x0180 +#define __LC_CREGS_SAVE_AREA 0x01c0 #else /* __s390x__ */ -#define __LC_IRB 0x210 -#define __LC_SYNC_ENTER_TIMER 0x250 -#define __LC_ASYNC_ENTER_TIMER 0x258 -#define __LC_EXIT_TIMER 0x260 -#define __LC_USER_TIMER 0x268 -#define __LC_SYSTEM_TIMER 0x270 -#define __LC_STEAL_TIMER 0x278 -#define __LC_LAST_UPDATE_TIMER 0x280 -#define __LC_LAST_UPDATE_CLOCK 0x288 -#define __LC_RETURN_MCCK_PSW 0x290 -#define __LC_KERNEL_STACK 0xD40 -#define __LC_THREAD_INFO 0xD48 -#define __LC_ASYNC_STACK 0xD50 -#define __LC_KERNEL_ASCE 0xD58 -#define __LC_USER_ASCE 0xD60 -#define __LC_PANIC_STACK 0xD68 -#define __LC_CPUID 0xD80 -#define __LC_CURRENT 0xDD8 -#define __LC_INT_CLOCK 0xDE8 -#define __LC_VDSO_PER_CPU 0xE38 -#endif /* __s390x__ */ - -#define __LC_PASTE 0xE40 - -#define __LC_DUMP_REIPL 0xE00 -#ifndef __s390x__ -#define __LC_PFAULT_INTPARM 0x080 -#define __LC_CPU_TIMER_SAVE_AREA 0x0D8 -#define __LC_CLOCK_COMP_SAVE_AREA 0x0E0 -#define __LC_PSW_SAVE_AREA 0x100 -#define __LC_PREFIX_SAVE_AREA 0x108 -#define __LC_AREGS_SAVE_AREA 0x120 -#define __LC_FPREGS_SAVE_AREA 0x160 -#define __LC_GPREGS_SAVE_AREA 0x180 -#define __LC_CREGS_SAVE_AREA 0x1C0 -#else /* __s390x__ */ -#define __LC_PFAULT_INTPARM 0x11B8 +#define __LC_LAST_BREAK 0x0110 +#define __LC_EXT_OLD_PSW 0x0130 +#define __LC_SVC_OLD_PSW 0x0140 +#define __LC_PGM_OLD_PSW 0x0150 +#define __LC_MCK_OLD_PSW 0x0160 +#define __LC_IO_OLD_PSW 0x0170 +#define __LC_EXT_NEW_PSW 0x01b0 +#define __LC_SVC_NEW_PSW 0x01c0 +#define __LC_PGM_NEW_PSW 0x01d0 +#define __LC_MCK_NEW_PSW 0x01e0 +#define __LC_IO_NEW_PSW 0x01f0 +#define __LC_SAVE_AREA 0x0200 +#define __LC_RETURN_PSW 0x0280 +#define __LC_RETURN_MCCK_PSW 0x0290 +#define __LC_SYNC_ENTER_TIMER 0x02a0 +#define __LC_ASYNC_ENTER_TIMER 0x02a8 +#define __LC_EXIT_TIMER 0x02b0 +#define __LC_USER_TIMER 0x02b8 +#define __LC_SYSTEM_TIMER 0x02c0 +#define __LC_STEAL_TIMER 0x02c8 +#define __LC_LAST_UPDATE_TIMER 0x02d0 +#define __LC_LAST_UPDATE_CLOCK 0x02d8 +#define __LC_CURRENT 0x02e0 +#define __LC_THREAD_INFO 0x02e8 +#define __LC_KERNEL_STACK 0x02f0 +#define __LC_ASYNC_STACK 0x02f8 +#define __LC_PANIC_STACK 0x0300 +#define __LC_KERNEL_ASCE 0x0308 +#define __LC_USER_ASCE 0x0310 +#define __LC_USER_EXEC_ASCE 0x0318 +#define __LC_CPUID 0x0320 +#define __LC_INT_CLOCK 0x0340 +#define __LC_VDSO_PER_CPU 0x0350 +#define __LC_IRB 0x0380 +#define __LC_PASTE 0x03c0 +#define __LC_PFAULT_INTPARM 0x11b8 #define __LC_FPREGS_SAVE_AREA 0x1200 -#define __LC_GPREGS_SAVE_AREA 0x1280 +#define __LC_GPREGS_SAVE_AREA 0x1280 #define __LC_PSW_SAVE_AREA 0x1300 #define __LC_PREFIX_SAVE_AREA 0x1318 -#define __LC_FP_CREG_SAVE_AREA 0x131C +#define __LC_FP_CREG_SAVE_AREA 0x131c #define __LC_TODREG_SAVE_AREA 0x1324 -#define __LC_CPU_TIMER_SAVE_AREA 0x1328 +#define __LC_CPU_TIMER_SAVE_AREA 0x1328 #define __LC_CLOCK_COMP_SAVE_AREA 0x1331 -#define __LC_AREGS_SAVE_AREA 0x1340 -#define __LC_CREGS_SAVE_AREA 0x1380 +#define __LC_AREGS_SAVE_AREA 0x1340 +#define __LC_CREGS_SAVE_AREA 0x1380 #endif /* __s390x__ */ #ifndef __ASSEMBLY__ @@ -194,227 +187,240 @@ union save_area { struct _lowcore { #ifndef __s390x__ - /* prefix area: defined by architecture */ - psw_t restart_psw; /* 0x000 */ - __u32 ccw2[4]; /* 0x008 */ - psw_t external_old_psw; /* 0x018 */ - psw_t svc_old_psw; /* 0x020 */ - psw_t program_old_psw; /* 0x028 */ - psw_t mcck_old_psw; /* 0x030 */ - psw_t io_old_psw; /* 0x038 */ - __u8 pad1[0x58-0x40]; /* 0x040 */ - psw_t external_new_psw; /* 0x058 */ - psw_t svc_new_psw; /* 0x060 */ - psw_t program_new_psw; /* 0x068 */ - psw_t mcck_new_psw; /* 0x070 */ - psw_t io_new_psw; /* 0x078 */ - __u32 ext_params; /* 0x080 */ - __u16 cpu_addr; /* 0x084 */ - __u16 ext_int_code; /* 0x086 */ - __u16 svc_ilc; /* 0x088 */ - __u16 svc_code; /* 0x08a */ - __u16 pgm_ilc; /* 0x08c */ - __u16 pgm_code; /* 0x08e */ - __u32 trans_exc_code; /* 0x090 */ - __u16 mon_class_num; /* 0x094 */ - __u16 per_perc_atmid; /* 0x096 */ - __u32 per_address; /* 0x098 */ - __u32 monitor_code; /* 0x09c */ - __u8 exc_access_id; /* 0x0a0 */ - __u8 per_access_id; /* 0x0a1 */ - __u8 pad2[0xB8-0xA2]; /* 0x0a2 */ - __u16 subchannel_id; /* 0x0b8 */ - __u16 subchannel_nr; /* 0x0ba */ - __u32 io_int_parm; /* 0x0bc */ - __u32 io_int_word; /* 0x0c0 */ - __u8 pad3[0xc8-0xc4]; /* 0x0c4 */ - __u32 stfl_fac_list; /* 0x0c8 */ - __u8 pad4[0xd4-0xcc]; /* 0x0cc */ - __u32 extended_save_area_addr; /* 0x0d4 */ - __u32 cpu_timer_save_area[2]; /* 0x0d8 */ - __u32 clock_comp_save_area[2]; /* 0x0e0 */ - __u32 mcck_interruption_code[2]; /* 0x0e8 */ - __u8 pad5[0xf4-0xf0]; /* 0x0f0 */ - __u32 external_damage_code; /* 0x0f4 */ - __u32 failing_storage_address; /* 0x0f8 */ - __u8 pad6[0x100-0xfc]; /* 0x0fc */ - __u32 st_status_fixed_logout[4];/* 0x100 */ - __u8 pad7[0x120-0x110]; /* 0x110 */ - __u32 access_regs_save_area[16];/* 0x120 */ - __u32 floating_pt_save_area[8]; /* 0x160 */ - __u32 gpregs_save_area[16]; /* 0x180 */ - __u32 cregs_save_area[16]; /* 0x1c0 */ - - psw_t return_psw; /* 0x200 */ - __u8 irb[64]; /* 0x208 */ - __u64 sync_enter_timer; /* 0x248 */ - __u64 async_enter_timer; /* 0x250 */ - __u64 exit_timer; /* 0x258 */ - __u64 user_timer; /* 0x260 */ - __u64 system_timer; /* 0x268 */ - __u64 steal_timer; /* 0x270 */ - __u64 last_update_timer; /* 0x278 */ - __u64 last_update_clock; /* 0x280 */ - psw_t return_mcck_psw; /* 0x288 */ - __u8 pad8[0xc00-0x290]; /* 0x290 */ - - /* System info area */ - __u32 save_area[16]; /* 0xc00 */ - __u32 kernel_stack; /* 0xc40 */ - __u32 thread_info; /* 0xc44 */ - __u32 async_stack; /* 0xc48 */ - __u32 kernel_asce; /* 0xc4c */ - __u32 user_asce; /* 0xc50 */ - __u32 panic_stack; /* 0xc54 */ - __u32 user_exec_asce; /* 0xc58 */ - __u8 pad10[0xc60-0xc5c]; /* 0xc5c */ - /* entry.S sensitive area start */ - cpuid_t cpu_id; /* 0xc60 */ - __u32 cpu_nr; /* 0xc68 */ - __u8 pad_0xc6c[0xc80-0xc6c]; /* 0xc6c */ - /* entry.S sensitive area end */ - - /* SMP info area: defined by DJB */ - __u64 clock_comparator; /* 0xc80 */ - __u32 ext_call_fast; /* 0xc88 */ - __u32 percpu_offset; /* 0xc8c */ - __u32 current_task; /* 0xc90 */ - __u32 softirq_pending; /* 0xc94 */ - __u64 int_clock; /* 0xc98 */ - __u8 pad11[0xe00-0xca0]; /* 0xca0 */ - - /* 0xe00 contains the address of the IPL Parameter */ - /* Information block. Dump tools need IPIB for IPL */ - /* after dump. */ - __u32 ipib; /* 0xe00 */ - __u32 ipib_checksum; /* 0xe04 */ - - /* Align to the top 1k of prefix area */ - __u8 pad12[0x1000-0xe08]; /* 0xe08 */ + /* 0x0000 - 0x01ff: defined by architecture */ + psw_t restart_psw; /* 0x0000 */ + __u32 ccw2[4]; /* 0x0008 */ + psw_t external_old_psw; /* 0x0018 */ + psw_t svc_old_psw; /* 0x0020 */ + psw_t program_old_psw; /* 0x0028 */ + psw_t mcck_old_psw; /* 0x0030 */ + psw_t io_old_psw; /* 0x0038 */ + __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */ + psw_t external_new_psw; /* 0x0058 */ + psw_t svc_new_psw; /* 0x0060 */ + psw_t program_new_psw; /* 0x0068 */ + psw_t mcck_new_psw; /* 0x0070 */ + psw_t io_new_psw; /* 0x0078 */ + __u32 ext_params; /* 0x0080 */ + __u16 cpu_addr; /* 0x0084 */ + __u16 ext_int_code; /* 0x0086 */ + __u16 svc_ilc; /* 0x0088 */ + __u16 svc_code; /* 0x008a */ + __u16 pgm_ilc; /* 0x008c */ + __u16 pgm_code; /* 0x008e */ + __u32 trans_exc_code; /* 0x0090 */ + __u16 mon_class_num; /* 0x0094 */ + __u16 per_perc_atmid; /* 0x0096 */ + __u32 per_address; /* 0x0098 */ + __u32 monitor_code; /* 0x009c */ + __u8 exc_access_id; /* 0x00a0 */ + __u8 per_access_id; /* 0x00a1 */ + __u8 pad_0x00a2[0x00b8-0x00a2]; /* 0x00a2 */ + __u16 subchannel_id; /* 0x00b8 */ + __u16 subchannel_nr; /* 0x00ba */ + __u32 io_int_parm; /* 0x00bc */ + __u32 io_int_word; /* 0x00c0 */ + __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ + __u32 stfl_fac_list; /* 0x00c8 */ + __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */ + __u32 extended_save_area_addr; /* 0x00d4 */ + __u32 cpu_timer_save_area[2]; /* 0x00d8 */ + __u32 clock_comp_save_area[2]; /* 0x00e0 */ + __u32 mcck_interruption_code[2]; /* 0x00e8 */ + __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ + __u32 external_damage_code; /* 0x00f4 */ + __u32 failing_storage_address; /* 0x00f8 */ + __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ + __u32 st_status_fixed_logout[4]; /* 0x0100 */ + __u8 pad_0x0110[0x0120-0x0110]; /* 0x0110 */ + + /* CPU register save area: defined by architecture */ + __u32 access_regs_save_area[16]; /* 0x0120 */ + __u32 floating_pt_save_area[8]; /* 0x0160 */ + __u32 gpregs_save_area[16]; /* 0x0180 */ + __u32 cregs_save_area[16]; /* 0x01c0 */ + + /* Return psws. */ + __u32 save_area[16]; /* 0x0200 */ + psw_t return_psw; /* 0x0240 */ + psw_t return_mcck_psw; /* 0x0248 */ + + /* CPU time accounting values */ + __u64 sync_enter_timer; /* 0x0250 */ + __u64 async_enter_timer; /* 0x0258 */ + __u64 exit_timer; /* 0x0260 */ + __u64 user_timer; /* 0x0268 */ + __u64 system_timer; /* 0x0270 */ + __u64 steal_timer; /* 0x0278 */ + __u64 last_update_timer; /* 0x0280 */ + __u64 last_update_clock; /* 0x0288 */ + + /* Current process. */ + __u32 current_task; /* 0x0290 */ + __u32 thread_info; /* 0x0294 */ + __u32 kernel_stack; /* 0x0298 */ + + /* Interrupt and panic stack. */ + __u32 async_stack; /* 0x029c */ + __u32 panic_stack; /* 0x02a0 */ + + /* Address space pointer. */ + __u32 kernel_asce; /* 0x02a4 */ + __u32 user_asce; /* 0x02a8 */ + __u32 user_exec_asce; /* 0x02ac */ + + /* SMP info area */ + cpuid_t cpu_id; /* 0x02b0 */ + __u32 cpu_nr; /* 0x02b8 */ + __u32 softirq_pending; /* 0x02bc */ + __u32 percpu_offset; /* 0x02c0 */ + __u32 ext_call_fast; /* 0x02c4 */ + __u64 int_clock; /* 0x02c8 */ + __u64 clock_comparator; /* 0x02d0 */ + __u8 pad_0x02d8[0x0300-0x02d8]; /* 0x02d8 */ + + /* Interrupt response block */ + __u8 irb[64]; /* 0x0300 */ + + __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ + + /* + * 0xe00 contains the address of the IPL Parameter Information + * block. Dump tools need IPIB for IPL after dump. + * Note: do not change the position of any fields in 0x0e00-0x0f00 + */ + __u32 ipib; /* 0x0e00 */ + __u32 ipib_checksum; /* 0x0e04 */ + + /* Align to the top 1k of prefix area */ + __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ #else /* !__s390x__ */ - /* prefix area: defined by architecture */ - __u32 ccw1[2]; /* 0x000 */ - __u32 ccw2[4]; /* 0x008 */ - __u8 pad1[0x80-0x18]; /* 0x018 */ - __u32 ext_params; /* 0x080 */ - __u16 cpu_addr; /* 0x084 */ - __u16 ext_int_code; /* 0x086 */ - __u16 svc_ilc; /* 0x088 */ - __u16 svc_code; /* 0x08a */ - __u16 pgm_ilc; /* 0x08c */ - __u16 pgm_code; /* 0x08e */ - __u32 data_exc_code; /* 0x090 */ - __u16 mon_class_num; /* 0x094 */ - __u16 per_perc_atmid; /* 0x096 */ - addr_t per_address; /* 0x098 */ - __u8 exc_access_id; /* 0x0a0 */ - __u8 per_access_id; /* 0x0a1 */ - __u8 op_access_id; /* 0x0a2 */ - __u8 ar_access_id; /* 0x0a3 */ - __u8 pad2[0xA8-0xA4]; /* 0x0a4 */ - addr_t trans_exc_code; /* 0x0a8 */ - addr_t monitor_code; /* 0x0b0 */ - __u16 subchannel_id; /* 0x0b8 */ - __u16 subchannel_nr; /* 0x0ba */ - __u32 io_int_parm; /* 0x0bc */ - __u32 io_int_word; /* 0x0c0 */ - __u8 pad3[0xc8-0xc4]; /* 0x0c4 */ - __u32 stfl_fac_list; /* 0x0c8 */ - __u8 pad4[0xe8-0xcc]; /* 0x0cc */ - __u32 mcck_interruption_code[2]; /* 0x0e8 */ - __u8 pad5[0xf4-0xf0]; /* 0x0f0 */ - __u32 external_damage_code; /* 0x0f4 */ - addr_t failing_storage_address; /* 0x0f8 */ - __u8 pad6[0x120-0x100]; /* 0x100 */ - psw_t restart_old_psw; /* 0x120 */ - psw_t external_old_psw; /* 0x130 */ - psw_t svc_old_psw; /* 0x140 */ - psw_t program_old_psw; /* 0x150 */ - psw_t mcck_old_psw; /* 0x160 */ - psw_t io_old_psw; /* 0x170 */ - __u8 pad7[0x1a0-0x180]; /* 0x180 */ - psw_t restart_psw; /* 0x1a0 */ - psw_t external_new_psw; /* 0x1b0 */ - psw_t svc_new_psw; /* 0x1c0 */ - psw_t program_new_psw; /* 0x1d0 */ - psw_t mcck_new_psw; /* 0x1e0 */ - psw_t io_new_psw; /* 0x1f0 */ - psw_t return_psw; /* 0x200 */ - __u8 irb[64]; /* 0x210 */ - __u64 sync_enter_timer; /* 0x250 */ - __u64 async_enter_timer; /* 0x258 */ - __u64 exit_timer; /* 0x260 */ - __u64 user_timer; /* 0x268 */ - __u64 system_timer; /* 0x270 */ - __u64 steal_timer; /* 0x278 */ - __u64 last_update_timer; /* 0x280 */ - __u64 last_update_clock; /* 0x288 */ - psw_t return_mcck_psw; /* 0x290 */ - __u8 pad8[0xc00-0x2a0]; /* 0x2a0 */ - /* System info area */ - __u64 save_area[16]; /* 0xc00 */ - __u8 pad9[0xd40-0xc80]; /* 0xc80 */ - __u64 kernel_stack; /* 0xd40 */ - __u64 thread_info; /* 0xd48 */ - __u64 async_stack; /* 0xd50 */ - __u64 kernel_asce; /* 0xd58 */ - __u64 user_asce; /* 0xd60 */ - __u64 panic_stack; /* 0xd68 */ - __u64 user_exec_asce; /* 0xd70 */ - __u8 pad10[0xd80-0xd78]; /* 0xd78 */ - /* entry.S sensitive area start */ - cpuid_t cpu_id; /* 0xd80 */ - __u32 cpu_nr; /* 0xd88 */ - __u8 pad_0xd8c[0xdc0-0xd8c]; /* 0xd8c */ - /* entry.S sensitive area end */ - - /* SMP info area: defined by DJB */ - __u64 clock_comparator; /* 0xdc0 */ - __u64 ext_call_fast; /* 0xdc8 */ - __u64 percpu_offset; /* 0xdd0 */ - __u64 current_task; /* 0xdd8 */ - __u32 softirq_pending; /* 0xde0 */ - __u32 pad_0x0de4; /* 0xde4 */ - __u64 int_clock; /* 0xde8 */ - __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ - - /* 0xe00 contains the address of the IPL Parameter */ - /* Information block. Dump tools need IPIB for IPL */ - /* after dump. */ - __u64 ipib; /* 0xe00 */ - __u32 ipib_checksum; /* 0xe08 */ + /* 0x0000 - 0x01ff: defined by architecture */ + __u32 ccw1[2]; /* 0x0000 */ + __u32 ccw2[4]; /* 0x0008 */ + __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ + __u32 ext_params; /* 0x0080 */ + __u16 cpu_addr; /* 0x0084 */ + __u16 ext_int_code; /* 0x0086 */ + __u16 svc_ilc; /* 0x0088 */ + __u16 svc_code; /* 0x008a */ + __u16 pgm_ilc; /* 0x008c */ + __u16 pgm_code; /* 0x008e */ + __u32 data_exc_code; /* 0x0090 */ + __u16 mon_class_num; /* 0x0094 */ + __u16 per_perc_atmid; /* 0x0096 */ + addr_t per_address; /* 0x0098 */ + __u8 exc_access_id; /* 0x00a0 */ + __u8 per_access_id; /* 0x00a1 */ + __u8 op_access_id; /* 0x00a2 */ + __u8 ar_access_id; /* 0x00a3 */ + __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ + addr_t trans_exc_code; /* 0x00a8 */ + addr_t monitor_code; /* 0x00b0 */ + __u16 subchannel_id; /* 0x00b8 */ + __u16 subchannel_nr; /* 0x00ba */ + __u32 io_int_parm; /* 0x00bc */ + __u32 io_int_word; /* 0x00c0 */ + __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ + __u32 stfl_fac_list; /* 0x00c8 */ + __u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */ + __u32 mcck_interruption_code[2]; /* 0x00e8 */ + __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ + __u32 external_damage_code; /* 0x00f4 */ + addr_t failing_storage_address; /* 0x00f8 */ + __u8 pad_0x0100[0x0120-0x0100]; /* 0x0100 */ + psw_t restart_old_psw; /* 0x0120 */ + psw_t external_old_psw; /* 0x0130 */ + psw_t svc_old_psw; /* 0x0140 */ + psw_t program_old_psw; /* 0x0150 */ + psw_t mcck_old_psw; /* 0x0160 */ + psw_t io_old_psw; /* 0x0170 */ + __u8 pad_0x0180[0x01a0-0x0180]; /* 0x0180 */ + psw_t restart_psw; /* 0x01a0 */ + psw_t external_new_psw; /* 0x01b0 */ + psw_t svc_new_psw; /* 0x01c0 */ + psw_t program_new_psw; /* 0x01d0 */ + psw_t mcck_new_psw; /* 0x01e0 */ + psw_t io_new_psw; /* 0x01f0 */ + + /* Entry/exit save area & return psws. */ + __u64 save_area[16]; /* 0x0200 */ + psw_t return_psw; /* 0x0280 */ + psw_t return_mcck_psw; /* 0x0290 */ + + /* CPU accounting and timing values. */ + __u64 sync_enter_timer; /* 0x02a0 */ + __u64 async_enter_timer; /* 0x02a8 */ + __u64 exit_timer; /* 0x02b0 */ + __u64 user_timer; /* 0x02b8 */ + __u64 system_timer; /* 0x02c0 */ + __u64 steal_timer; /* 0x02c8 */ + __u64 last_update_timer; /* 0x02d0 */ + __u64 last_update_clock; /* 0x02d8 */ + + /* Current process. */ + __u64 current_task; /* 0x02e0 */ + __u64 thread_info; /* 0x02e8 */ + __u64 kernel_stack; /* 0x02f0 */ + + /* Interrupt and panic stack. */ + __u64 async_stack; /* 0x02f8 */ + __u64 panic_stack; /* 0x0300 */ + + /* Address space pointer. */ + __u64 kernel_asce; /* 0x0308 */ + __u64 user_asce; /* 0x0310 */ + __u64 user_exec_asce; /* 0x0318 */ + + /* SMP info area */ + cpuid_t cpu_id; /* 0x0320 */ + __u32 cpu_nr; /* 0x0328 */ + __u32 softirq_pending; /* 0x032c */ + __u64 percpu_offset; /* 0x0330 */ + __u64 ext_call_fast; /* 0x0338 */ + __u64 int_clock; /* 0x0340 */ + __u64 clock_comparator; /* 0x0348 */ + __u64 vdso_per_cpu_data; /* 0x0350 */ + __u8 pad_0x0358[0x0380-0x0358]; /* 0x0358 */ + + /* Interrupt response block. */ + __u8 irb[64]; /* 0x0380 */ /* Per cpu primary space access list */ - __u8 pad_0xe0c[0xe38-0xe0c]; /* 0xe0c */ - __u64 vdso_per_cpu_data; /* 0xe38 */ - __u32 paste[16]; /* 0xe40 */ - - __u8 pad13[0x11b8-0xe80]; /* 0xe80 */ - - /* 64 bit extparam used for pfault, diag 250 etc */ - __u64 ext_params2; /* 0x11B8 */ - - __u8 pad14[0x1200-0x11C0]; /* 0x11C0 */ - - /* System info area */ - - __u64 floating_pt_save_area[16]; /* 0x1200 */ - __u64 gpregs_save_area[16]; /* 0x1280 */ - __u32 st_status_fixed_logout[4]; /* 0x1300 */ - __u8 pad15[0x1318-0x1310]; /* 0x1310 */ - __u32 prefixreg_save_area; /* 0x1318 */ - __u32 fpt_creg_save_area; /* 0x131c */ - __u8 pad16[0x1324-0x1320]; /* 0x1320 */ - __u32 tod_progreg_save_area; /* 0x1324 */ - __u32 cpu_timer_save_area[2]; /* 0x1328 */ - __u32 clock_comp_save_area[2]; /* 0x1330 */ - __u8 pad17[0x1340-0x1338]; /* 0x1338 */ - __u32 access_regs_save_area[16]; /* 0x1340 */ - __u64 cregs_save_area[16]; /* 0x1380 */ + __u32 paste[16]; /* 0x03c0 */ + + __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ + + /* + * 0xe00 contains the address of the IPL Parameter Information + * block. Dump tools need IPIB for IPL after dump. + * Note: do not change the position of any fields in 0x0e00-0x0f00 + */ + __u64 ipib; /* 0x0e00 */ + __u32 ipib_checksum; /* 0x0e08 */ + __u8 pad_0x0e0c[0x11b8-0x0e0c]; /* 0x0e0c */ + + /* 64 bit extparam used for pfault/diag 250: defined by architecture */ + __u64 ext_params2; /* 0x11B8 */ + __u8 pad_0x11c0[0x1200-0x11C0]; /* 0x11C0 */ + + /* CPU register save area: defined by architecture */ + __u64 floating_pt_save_area[16]; /* 0x1200 */ + __u64 gpregs_save_area[16]; /* 0x1280 */ + __u32 st_status_fixed_logout[4]; /* 0x1300 */ + __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ + __u32 prefixreg_save_area; /* 0x1318 */ + __u32 fpt_creg_save_area; /* 0x131c */ + __u8 pad_0x1320[0x1324-0x1320]; /* 0x1320 */ + __u32 tod_progreg_save_area; /* 0x1324 */ + __u32 cpu_timer_save_area[2]; /* 0x1328 */ + __u32 clock_comp_save_area[2]; /* 0x1330 */ + __u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */ + __u32 access_regs_save_area[16]; /* 0x1340 */ + __u64 cregs_save_area[16]; /* 0x1380 */ /* align to the top of the prefix area */ - - __u8 pad18[0x2000-0x1400]; /* 0x1400 */ + __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ #endif /* !__s390x__ */ } __attribute__((packed)); /* End structure*/ diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index ec7e35f6055..1046c2c9f8d 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -469,6 +469,8 @@ start: .org 0x10000 startup:basr %r13,0 # get base .LPG0: + xc 0x200(256),0x200 # partially clear lowcore + xc 0x300(256),0x300 #ifndef CONFIG_MARCH_G5 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} -- cgit v1.2.3 From 159d1ff8f6c38086ed75f8e892790d0a4f3a6b71 Mon Sep 17 00:00:00 2001 From: Frank Munzert Date: Thu, 26 Mar 2009 15:24:45 +0100 Subject: [S390] Use csum_partial in checksum.h The cksm function in system.h is duplicate to csum_partial in checksum.h. Remove cksm and use csum_partial instead. Signed-off-by: Frank Munzert Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/system.h | 16 ---------------- arch/s390/kernel/ipl.c | 4 +++- 2 files changed, 3 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 3f2ccb82b86..3a8b26eb1f2 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -458,22 +458,6 @@ static inline unsigned short stap(void) return cpu_address; } -static inline u32 cksm(void *addr, unsigned long len) -{ - register unsigned long _addr asm("0") = (unsigned long) addr; - register unsigned long _len asm("1") = len; - unsigned long accu = 0; - - asm volatile( - "0:\n" - " cksm %0,%1\n" - " jnz 0b\n" - : "+d" (accu), "+d" (_addr), "+d" (_len) - : - : "cc", "memory"); - return accu; -} - extern void (*_machine_restart)(char *command); extern void (*_machine_halt)(void); extern void (*_machine_power_off)(void); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 5663c1f8e46..505fec06e63 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -24,6 +24,7 @@ #include #include #include +#include #define IPL_PARM_BLOCK_VERSION 0 @@ -1359,7 +1360,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger) "a" (&lowcore_ptr[smp_processor_id()]->ipib)); #endif asm volatile("stura %0,%1" - :: "a" (cksm(reipl_block_actual, reipl_block_actual->hdr.len)), + :: "a" (csum_partial(reipl_block_actual, + reipl_block_actual->hdr.len, 0)), "a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum)); preempt_enable(); dump_run(trigger); -- cgit v1.2.3 From 59f2e69d0f95bc00353628ef33fd534fbb8e3597 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Thu, 26 Mar 2009 15:24:46 +0100 Subject: [S390] zfcpdump: Prevent zcore from beeing built as a kernel module. The zcore code switches to real addressing mode when creating a kernel dump. This is not possible, if it is built as a kernel module. With this patch zcore (zfcpdump) can't be built as a kernel module any more. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 2 +- arch/s390/kernel/setup.c | 4 ++-- arch/s390/kernel/smp.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0a9463bea75..2a8af5e1634 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -545,7 +545,7 @@ config KEXEC but is independent of hardware/microcode support. config ZFCPDUMP - tristate "zfcpdump support" + bool "zfcpdump support" select SMP default n help diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 91551ef1d67..46fc981e02b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -226,7 +226,7 @@ static void __init conmode_default(void) } } -#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +#ifdef CONFIG_ZFCPDUMP static void __init setup_zfcpdump(unsigned int console_devno) { static char str[41]; @@ -515,7 +515,7 @@ static void __init setup_memory_end(void) unsigned long max_mem; int i; -#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +#ifdef CONFIG_ZFCPDUMP if (ipl_info.type == IPL_TYPE_FCP_DUMP) { memory_end = ZFCPDUMP_HSA_SIZE; memory_end_set = 1; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 775885772ff..bb3d34aa7b8 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -226,7 +226,7 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); */ #define CPU_INIT_NO 1 -#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +#ifdef CONFIG_ZFCPDUMP /* * zfcpdump_prefix_array holds prefix registers for the following scenario: @@ -267,7 +267,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas); static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } -#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ +#endif /* CONFIG_ZFCPDUMP */ static int cpu_stopped(int cpu) { -- cgit v1.2.3 From 6aa0d3a922c4f58fc36cc1502c6ac72f999e26bb Mon Sep 17 00:00:00 2001 From: Stoyan Gaydarov Date: Thu, 26 Mar 2009 15:24:47 +0100 Subject: [S390] BUG to BUG_ON changes Signed-off-by: Stoyan Gaydarov Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/debug.c | 3 +-- arch/s390/kernel/setup.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index ba03fc0a3a5..39137b9e162 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -698,8 +698,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, if ((uid != 0) || (gid != 0)) pr_warning("Root becomes the owner of all s390dbf files " "in sysfs\n"); - if (!initialized) - BUG(); + BUG_ON(!initialized); mutex_lock(&debug_mutex); /* create new debug_info */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 46fc981e02b..580abb53ce8 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -134,8 +134,7 @@ void __cpuinit cpu_init(void) atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; - if (current->mm) - BUG(); + BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); } -- cgit v1.2.3 From 3e75a902196c45d26d5e28014eb2d9821aa9794f Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 26 Mar 2009 15:24:48 +0100 Subject: [S390] use kzfree() Use kzfree() instead of memset() + kfree(). Signed-off-by: Johannes Weiner Reviewed-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/crypto/prng.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index eca724d229e..b49c00ce65e 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -201,8 +201,7 @@ out_free: static void __exit prng_exit(void) { /* wipe me */ - memset(p->buf, 0, prng_chunk_size); - kfree(p->buf); + kzfree(p->buf); kfree(p); misc_deregister(&prng_dev); -- cgit v1.2.3 From a0fa8e3743c80643a06a770b95e73e751548ab17 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Thu, 26 Mar 2009 15:24:49 +0100 Subject: [S390] s390dbf: Remove redundant initilizations. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/debug.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 39137b9e162..369de12b873 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -603,7 +603,7 @@ debug_input(struct file *file, const char __user *user_buf, size_t length, static int debug_open(struct inode *inode, struct file *file) { - int i = 0, rc = 0; + int i, rc = 0; file_private_info_t *p_info; debug_info_t *debug_info, *debug_info_snapshot; @@ -1155,7 +1155,6 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view) else { debugfs_remove(id->debugfs_entries[i]); id->views[i] = NULL; - rc = 0; } spin_unlock_irqrestore(&id->lock, flags); out: -- cgit v1.2.3 From 58ace9f2a8f1caa0303aa64079d3929be28a937a Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Thu, 26 Mar 2009 15:24:50 +0100 Subject: [S390] s390dbf: Remove needless check for NULL pointer. The check for NULL pointer has already be done before. Therefore we can remove the second check. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/debug.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 369de12b873..be8bceaf37d 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -642,8 +642,7 @@ found: p_info = kmalloc(sizeof(file_private_info_t), GFP_KERNEL); if(!p_info){ - if(debug_info_snapshot) - debug_info_free(debug_info_snapshot); + debug_info_free(debug_info_snapshot); rc = -ENOMEM; goto out; } -- cgit v1.2.3 From 008d2d112cb8a743a87dfe41a67e11c0a4c73aa4 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Thu, 26 Mar 2009 15:24:51 +0100 Subject: [S390] ipl: Improve checking logic and remove switch defaults. A code analysis tool reported two warnings: "The expression `ipl_info.type == IPL_TYPE_FCP' is true whenever evaluated." and "Default is not possible". This patch improves the corresponding if statement logic and removes the unnecessary switch defaults. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ipl.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 505fec06e63..2b901fb8193 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -966,7 +966,6 @@ static void reipl_run(struct shutdown_trigger *trigger) diag308(DIAG308_IPL, NULL); break; case REIPL_METHOD_FCP_DUMP: - default: break; } disabled_wait((unsigned long) __builtin_return_address(0)); @@ -1075,10 +1074,12 @@ static int __init reipl_fcp_init(void) { int rc; - if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP)) - return 0; - if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP)) - make_attrs_ro(reipl_fcp_attrs); + if (!diag308_set_works) { + if (ipl_info.type == IPL_TYPE_FCP) + make_attrs_ro(reipl_fcp_attrs); + else + return 0; + } reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); if (!reipl_block_fcp) @@ -1259,7 +1260,6 @@ static void dump_run(struct shutdown_trigger *trigger) diag308(DIAG308_DUMP, NULL); break; case DUMP_METHOD_NONE: - default: return; } printk(KERN_EMERG "Dump failed!\n"); @@ -1746,7 +1746,6 @@ void __init setup_ipl(void) sizeof(ipl_info.data.nss.name)); break; case IPL_TYPE_UNKNOWN: - default: /* We have no info to copy */ break; } -- cgit v1.2.3 From 6d54c5a3fb13d32d66a8383cb0b5fb422a563051 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:52 +0100 Subject: [S390] smp: fix memory leak on __cpu_up If sigp_set_prefix fails on __cpu_up we leak the lowcore structures and async+panic stacks for the failed cpu. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 6 +++--- arch/s390/kernel/vdso.c | 2 -- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index bb3d34aa7b8..1f4711b60aa 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -508,7 +508,6 @@ out: return -ENOMEM; } -#ifdef CONFIG_HOTPLUG_CPU static void smp_free_lowcore(int cpu) { struct _lowcore *lowcore; @@ -527,7 +526,6 @@ static void smp_free_lowcore(int cpu) free_pages((unsigned long) lowcore, lc_order); lowcore_ptr[cpu] = NULL; } -#endif /* CONFIG_HOTPLUG_CPU */ /* Upping and downing of CPUs */ int __cpuinit __cpu_up(unsigned int cpu) @@ -544,8 +542,10 @@ int __cpuinit __cpu_up(unsigned int cpu) ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); - if (ccode) + if (ccode) { + smp_free_lowcore(cpu); return -EIO; + } idle = current_set[cpu]; cpu_lowcore = lowcore_ptr[cpu]; diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 690e1781968..89b2e7f1b7a 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -144,7 +144,6 @@ out: return -ENOMEM; } -#ifdef CONFIG_HOTPLUG_CPU void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; @@ -163,7 +162,6 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) free_page(page_table); free_pages(segment_table, SEGMENT_ORDER); } -#endif /* CONFIG_HOTPLUG_CPU */ static void __vdso_init_cr5(void *dummy) { -- cgit v1.2.3 From d0d3cdf4c27fa4ce241616da08138954e02890f7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:53 +0100 Subject: [S390] smp: perform initial cpu reset before starting a cpu Performing an initial cpu reset makes sure all registers and tlbs of the targeted cpu are initialized and flushed. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1f4711b60aa..fe5f8dc1e6f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -534,18 +534,23 @@ int __cpuinit __cpu_up(unsigned int cpu) struct _lowcore *cpu_lowcore; struct stack_frame *sf; sigp_ccode ccode; + u32 lowcore; if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) return -EIO; if (smp_alloc_lowcore(cpu)) return -ENOMEM; - - ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), - cpu, sigp_set_prefix); - if (ccode) { - smp_free_lowcore(cpu); - return -EIO; - } + do { + ccode = signal_processor(cpu, sigp_initial_cpu_reset); + if (ccode == sigp_busy) + udelay(10); + if (ccode == sigp_not_operational) + goto err_out; + } while (ccode == sigp_busy); + + lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; + while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) + udelay(10); idle = current_set[cpu]; cpu_lowcore = lowcore_ptr[cpu]; @@ -574,6 +579,10 @@ int __cpuinit __cpu_up(unsigned int cpu) while (!cpu_online(cpu)) cpu_relax(); return 0; + +err_out: + smp_free_lowcore(cpu); + return -EIO; } static int __init setup_possible_cpus(char *s) -- cgit v1.2.3 From 2ac3307f275c2a91af0417e16d2cfb95ae478661 Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Thu, 26 Mar 2009 15:24:54 +0100 Subject: [S390] fix dfp elf hwcap/facility bit detection The old dfp detection wanted to check bit 43 (dfp high performance), but due to a wrong calculation always used to check bit 42. Additionally the "userspace expectation" is, that the dfp capability bit is set is if facility bit 42 (decimal floating point facility available) and bit 44 (perform floating point operation facility avail). The patch fixes the bit calculation and extends the check to work like: elf hw cap dfp bit = facility bits 42 (dfp) & 44 (pfpo) available Signed-off-by: Christian Ehrhardt Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/setup.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 580abb53ce8..18222ac4078 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -713,13 +713,15 @@ static void __init setup_hwcaps(void) * How many facility words are stored depends on the number of * doublewords passed to the instruction. The additional facilites * are: - * Bit 43: decimal floating point facility is installed + * Bit 42: decimal floating point facility is installed + * Bit 44: perform floating point operation facility is installed * translated to: * HWCAP_S390_DFP bit 6. */ if ((elf_hwcap & (1UL << 2)) && __stfle(&facility_list_extended, 1) > 0) { - if (facility_list_extended & (1ULL << (64 - 43))) + if ((facility_list_extended & (1ULL << (63 - 42))) + && (facility_list_extended & (1ULL << (63 - 44)))) elf_hwcap |= 1UL << 6; } -- cgit v1.2.3 From 7e9b580e5f0644cd8952b6671fd5380fd430bca3 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Thu, 26 Mar 2009 15:24:55 +0100 Subject: [S390] Ensure that ipl panic notifier is called late. The s390 ipl panic notifier will stop the system or trigger a system dump. This should be done as final action on the panic path. All other panic notifiers should be executed before. Currently we use priority 0 for the ipl notifier. In order to be called late, this patch changes the priority to INT_MIN which is the lowest possible priority. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ipl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 2b901fb8193..56d876904ec 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1722,7 +1722,7 @@ static int on_panic_notify(struct notifier_block *self, static struct notifier_block on_panic_nb = { .notifier_call = on_panic_notify, - .priority = 0, + .priority = INT_MIN, }; void __init setup_ipl(void) -- cgit v1.2.3 From 488253ce49714f4e9d42413c1d60b7724059a338 Mon Sep 17 00:00:00 2001 From: Andreas Krebbel Date: Thu, 26 Mar 2009 15:24:56 +0100 Subject: [S390] Add hwcap flag for the etf3 enhancement facility The Extended Translation Facility 3 (ETF3) added instructions which allow conversions between different unicode character maps (UTF-8, UTF-32 ...). These instructions got enhanced with a later version of the ETF3 allowing malformed multibyte chars to be recognized and reported correctly. The attached patch reserves bit 8 in the elf hwcaps vector for the enhanced version of ETF3. The bit corresponds to the stfle bits 22 and 30 and will only be set if both of the stfle bits are set. Signed-off-by: Andreas Krebbel Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/processor.c | 6 +++--- arch/s390/kernel/setup.c | 13 ++++++++++--- 2 files changed, 13 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 423da1bd42a..802c8ab247f 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void) static int show_cpuinfo(struct seq_file *m, void *v) { - static const char *hwcap_str[8] = { + static const char *hwcap_str[9] = { "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", - "edat" + "edat", "etf3eh" }; struct _lowcore *lc; unsigned long n = (unsigned long) v - 1; @@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) num_online_cpus(), loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ))%100); seq_puts(m, "features\t: "); - for (i = 0; i < 8; i++) + for (i = 0; i < 9; i++) if (hwcap_str[i] && (elf_hwcap & (1UL << i))) seq_printf(m, "%s ", hwcap_str[i]); seq_puts(m, "\n"); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 18222ac4078..06201b93cbb 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -696,15 +696,22 @@ static void __init setup_hwcaps(void) * Bit 17: the message-security assist is installed * Bit 19: the long-displacement facility is installed * Bit 21: the extended-immediate facility is installed + * Bit 22: extended-translation facility 3 is installed + * Bit 30: extended-translation facility 3 enhancement facility * These get translated to: * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, - * HWCAP_S390_LDISP bit 4, and HWCAP_S390_EIMM bit 5. + * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and + * HWCAP_S390_ETF3EH bit 8 (22 && 30). */ for (i = 0; i < 6; i++) if (facility_list & (1UL << (31 - stfl_bits[i]))) elf_hwcap |= 1UL << i; + if ((facility_list & (1UL << (31 - 22))) + && (facility_list & (1UL << (31 - 30)))) + elf_hwcap |= 1UL << 8; + /* * Check for additional facilities with store-facility-list-extended. * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 @@ -716,12 +723,12 @@ static void __init setup_hwcaps(void) * Bit 42: decimal floating point facility is installed * Bit 44: perform floating point operation facility is installed * translated to: - * HWCAP_S390_DFP bit 6. + * HWCAP_S390_DFP bit 6 (42 && 44). */ if ((elf_hwcap & (1UL << 2)) && __stfle(&facility_list_extended, 1) > 0) { if ((facility_list_extended & (1ULL << (63 - 42))) - && (facility_list_extended & (1ULL << (63 - 44)))) + && (facility_list_extended & (1ULL << (63 - 44)))) elf_hwcap |= 1UL << 6; } -- cgit v1.2.3 From c70d0fef1da286d7f96e774674aea2d20cf6222f Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 26 Mar 2009 15:24:57 +0100 Subject: [S390] fix clock comparator save area usage The lowcore clock comparator save area on 64 bit machines is defined to contain only the seven most significant bits of the register. That's also why it starts at an uneven address (0x1331). The current code however writes eight bytes to the address and therefore overwrites the first byte of the access register save area. Fix this and write only seven bytes to the save area. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/reipl64.S | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index c41930499a5..774147824c3 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -1,10 +1,7 @@ /* - * arch/s390/kernel/reipl.S - * - * S390 version - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) - Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Copyright IBM Corp 2000,2009 + * Author(s): Holger Smolinski , + * Denis Joseph Barrow, */ #include @@ -30,7 +27,7 @@ do_reipl_asm: basr %r13,0 mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10) stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) stckc .Lclkcmp-.Lpg0(%r13) - mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) + mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13) stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1) stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1) -- cgit v1.2.3 From 6f7a321d5feb0bc9aa7f7b14eceb1e6a63bff937 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 26 Mar 2009 15:24:58 +0100 Subject: [S390] cpumask: remove cpu_coregroup_map Impact: cleanup cpu_coregroup_mask is the New Hotness. As S/390 uses theirs internally, so we just make it static. Signed-off-by: Rusty Russell Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/topology.h | 1 - arch/s390/kernel/topology.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index c979c3b56ab..5e0ad618dc4 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -5,7 +5,6 @@ #define mc_capable() (1) -cpumask_t cpu_coregroup_map(unsigned int cpu); const struct cpumask *cpu_coregroup_mask(unsigned int cpu); extern cpumask_t cpu_core_map[NR_CPUS]; diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index cc362c9ea8f..3c72c9cf22b 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(topology_lock); cpumask_t cpu_core_map[NR_CPUS]; -cpumask_t cpu_coregroup_map(unsigned int cpu) +static cpumask_t cpu_coregroup_map(unsigned int cpu) { struct core_info *core = &core_info; unsigned long flags; -- cgit v1.2.3 From 93632d1bf7447d445c6fc274c8931152f9412b56 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 26 Mar 2009 15:24:59 +0100 Subject: [S390] cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits. Impact: cleanup, futureproof In fact, all cpumask ops will only be valid (in general) for bit numbers < nr_cpu_ids. So use that instead of NR_CPUS in various places (I also updated the immediate sites to use the new cpumask_ operators). This is always safe: no cpu number can be >= nr_cpu_ids, and nr_cpu_ids is initialized to NR_CPUS at boot. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Acked-by: Ingo Molnar Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index fe5f8dc1e6f..13052a4ed71 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -297,8 +297,8 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) { int cpu_id, logical_cpu; - logical_cpu = first_cpu(avail); - if (logical_cpu == NR_CPUS) + logical_cpu = cpumask_first(&avail); + if (logical_cpu >= nr_cpu_ids) return 0; for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { if (cpu_known(cpu_id)) @@ -309,8 +309,8 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) continue; cpu_set(logical_cpu, cpu_present_map); smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; - logical_cpu = next_cpu(logical_cpu, avail); - if (logical_cpu == NR_CPUS) + logical_cpu = cpumask_next(logical_cpu, &avail); + if (logical_cpu >= nr_cpu_ids) break; } return 0; @@ -322,8 +322,8 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) int cpu_id, logical_cpu, cpu; int rc; - logical_cpu = first_cpu(avail); - if (logical_cpu == NR_CPUS) + logical_cpu = cpumask_first(&avail); + if (logical_cpu >= nr_cpu_ids) return 0; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) @@ -344,8 +344,8 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; else smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; - logical_cpu = next_cpu(logical_cpu, avail); - if (logical_cpu == NR_CPUS) + logical_cpu = cpumask_next(logical_cpu, &avail); + if (logical_cpu >= nr_cpu_ids) break; } out: @@ -591,7 +591,7 @@ static int __init setup_possible_cpus(char *s) pcpus = simple_strtoul(s, NULL, 0); cpu_possible_map = cpumask_of_cpu(0); - for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++) + for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) cpu_set(cpu, cpu_possible_map); return 0; } -- cgit v1.2.3 From def6cfb70bab83c0094bc0cedd27c4eda563043e Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 26 Mar 2009 15:25:00 +0100 Subject: [S390] cpumask: Use accessors code. Impact: use new API Use the accessors rather than frobbing bits directly. Most of this is in arch code I haven't even compiled, but is straightforward. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 13052a4ed71..006ed5016eb 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -590,9 +590,8 @@ static int __init setup_possible_cpus(char *s) int pcpus, cpu; pcpus = simple_strtoul(s, NULL, 0); - cpu_possible_map = cpumask_of_cpu(0); - for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) - cpu_set(cpu, cpu_possible_map); + for (cpu = 0; cpu < pcpus && cpu < nr_cpu_ids; cpu++) + set_cpu_possible(cpu, true); return 0; } early_param("possible_cpus", setup_possible_cpus); -- cgit v1.2.3 From 005f8eee6f3c8173e492d7bd4d51bda990eb468b Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 26 Mar 2009 15:25:01 +0100 Subject: [S390] cpumask: use mm_cpumask() wrapper Makes code futureproof against the impending change to mm->cpu_vm_mask. It's also a chance to use the new cpumask_ ops which take a pointer (the older ones are deprecated, but there's no hurry for arch code). Signed-off-by: Rusty Russell Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/mmu_context.h | 2 +- arch/s390/include/asm/tlbflush.h | 4 ++-- arch/s390/mm/pgtable.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 28ec870655a..fc7edd6f41b 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -74,7 +74,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - cpu_set(smp_processor_id(), next->cpu_vm_mask); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); update_mm(next, tsk); } diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index d60394b9745..304cffa623e 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -51,7 +51,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm) * If the process only ran on the local cpu, do a local flush. */ local_cpumask = cpumask_of_cpu(smp_processor_id()); - if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) + if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) __tlb_flush_local(); else __tlb_flush_global(); @@ -73,7 +73,7 @@ static inline void __tlb_flush_idte(unsigned long asce) static inline void __tlb_flush_mm(struct mm_struct * mm) { - if (unlikely(cpus_empty(mm->cpu_vm_mask))) + if (unlikely(cpumask_empty(mm_cpumask(mm)))) return; /* * If the machine has IDTE we prefer to do a per mm flush diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 9bf86125f6f..be6c1cf4ad5 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -296,7 +296,7 @@ int s390_enable_sie(void) tsk->mm = tsk->active_mm = mm; preempt_disable(); update_mm(mm, tsk); - cpu_set(smp_processor_id(), mm->cpu_vm_mask); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); preempt_enable(); task_unlock(tsk); mmput(old_mm); -- cgit v1.2.3 From ef3500b2b2955af4fa6b0564b51c0c604e38c571 Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Thu, 26 Mar 2009 15:25:02 +0100 Subject: [S390] remove duplicated #includes Remove duplicated #include's in arch/s390/kernel/ipl.c. Signed-off-by: Huang Weiyi Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ipl.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 56d876904ec..6f3711a0eaa 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #define IPL_PARM_BLOCK_VERSION 0 -- cgit v1.2.3 From fc2869f6a1993550c2765e934b117e993782db30 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 13 Mar 2009 16:37:48 +0100 Subject: x86: disable __do_IRQ support Impact: disable unused code x86 is fully converted to flow handlers. No need to keep the deprecated __do_IRQ() support active. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bc2fbadff9f..3a330a437c6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -165,6 +165,9 @@ config GENERIC_HARDIRQS bool default y +config GENERIC_HARDIRQS_NO__DO_IRQ + def_bool y + config GENERIC_IRQ_PROBE bool default y -- cgit v1.2.3 From 17d140402e6f0fd5dde2fdf8d045e3f95f865663 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 14 Jan 2009 23:37:50 +0300 Subject: x86: headers cleanup - setup.h Impact: cleanup 'make headers_check' warn us about leaking of kernel private (mostly compile time vars) data to userspace in headers. Fix it. Guard this one by __KERNEL__. Signed-off-by: Cyrill Gorcunov Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/include/asm/setup.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 5a3a1371575..c2308f5250f 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_SETUP_H #define _ASM_X86_SETUP_H +#ifdef __KERNEL__ + #define COMMAND_LINE_SIZE 2048 #ifndef __ASSEMBLY__ @@ -33,8 +35,6 @@ struct x86_quirks { #endif /* __ASSEMBLY__ */ -#ifdef __KERNEL__ - #ifdef __i386__ #include -- cgit v1.2.3 From 612bfc9e630e3f7a4f3be1325eac28de8b8970af Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Thu, 30 Oct 2008 21:17:47 +0100 Subject: m68k: Add install target This patch enables the use of "make install" on m68k architecture to copy kernel to /boot. Signed-off-by: Laurent Vivier Signed-off-by: Geert Uytterhoeven --- arch/m68k/Makefile | 3 +++ arch/m68k/install.sh | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 arch/m68k/install.sh (limited to 'arch') diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index 8133dbc4496..570d85c3f97 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -117,3 +117,6 @@ endif archclean: rm -f vmlinux.gz vmlinux.bz2 + +install: + sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)" diff --git a/arch/m68k/install.sh b/arch/m68k/install.sh new file mode 100644 index 00000000000..9c6bae6112e --- /dev/null +++ b/arch/m68k/install.sh @@ -0,0 +1,52 @@ +#!/bin/sh +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995 by Linus Torvalds +# +# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin +# +# "make install" script for m68k architecture +# +# Arguments: +# $1 - kernel version +# $2 - kernel image file +# $3 - kernel map file +# $4 - default install path (blank if root directory) +# + +verify () { + if [ ! -f "$1" ]; then + echo "" 1>&2 + echo " *** Missing file: $1" 1>&2 + echo ' *** You need to run "make" before "make install".' 1>&2 + echo "" 1>&2 + exit 1 + fi +} + +# Make sure the files actually exist +verify "$2" +verify "$3" + +# User may have a custom install script + +if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi +if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi + +# Default install - same as make zlilo + +if [ -f $4/vmlinuz ]; then + mv $4/vmlinuz $4/vmlinuz.old +fi + +if [ -f $4/System.map ]; then + mv $4/System.map $4/System.old +fi + +cat $2 > $4/vmlinuz +cp $3 $4/System.map + +sync -- cgit v1.2.3 From 7ad93b42bd135641ddc2c298f030132aca7aeca3 Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Thu, 6 Nov 2008 20:57:41 +0100 Subject: m68k: mac - Add a new entry in mac_model to identify the floppy controller type. This patch adds a field "floppy_type" which can take the following values: MAC_FLOPPY_IWM for an IWM based mac MAC_FLOPPY_SWIM_ADDR1 for a SWIM based mac with controller at VIA1 + 0x1E000 MAC_FLOPPY_SWIM_ADDR2 for a SWIM based mac with controller at VIA1 + 0x16000 MAC_FLOPPY_IOP for an IOP based mac MAC_FLOPPY_AV for an AV based mac Signed-off-by: Laurent Vivier Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/macintosh.h | 7 ++ arch/m68k/mac/config.c | 163 +++++++++++++++++++++++++------------- 2 files changed, 116 insertions(+), 54 deletions(-) (limited to 'arch') diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index 05309f7e3d0..50db3591ca1 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h @@ -34,6 +34,7 @@ struct mac_model char scc_type; char ether_type; char nubus_type; + char floppy_type; }; #define MAC_ADB_NONE 0 @@ -71,6 +72,12 @@ struct mac_model #define MAC_NO_NUBUS 0 #define MAC_NUBUS 1 +#define MAC_FLOPPY_IWM 0 +#define MAC_FLOPPY_SWIM_ADDR1 1 +#define MAC_FLOPPY_SWIM_ADDR2 2 +#define MAC_FLOPPY_SWIM_IOP 3 +#define MAC_FLOPPY_AV 4 + /* * Gestalt numbers */ diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 98b6bcfb37b..3a1c0b2862e 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -224,7 +224,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_IWM }, /* @@ -239,7 +240,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_IWM }, { .ident = MAC_MODEL_IIX, .name = "IIx", @@ -247,7 +249,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_IICX, .name = "IIcx", @@ -255,7 +258,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_SE30, .name = "SE/30", @@ -263,7 +267,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, /* @@ -280,7 +285,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_IIFX, .name = "IIfx", @@ -288,7 +294,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_IOP, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_IOP }, { .ident = MAC_MODEL_IISI, .name = "IIsi", @@ -296,7 +303,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_IIVI, .name = "IIvi", @@ -304,7 +312,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_IIVX, .name = "IIvx", @@ -312,7 +321,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, /* @@ -326,7 +336,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_CCL, .name = "Color Classic", @@ -334,7 +345,9 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS}, + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 + }, /* * Some Mac LC machines. Basically the same as the IIci, ADB like IIsi @@ -347,7 +360,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_LCII, .name = "LC II", @@ -355,7 +369,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_LCIII, .name = "LC III", @@ -363,7 +378,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, /* @@ -383,7 +399,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_Q605_ACC, .name = "Quadra 605", @@ -391,7 +408,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_Q610, .name = "Quadra 610", @@ -400,7 +418,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_Q630, .name = "Quadra 630", @@ -410,7 +429,8 @@ static struct mac_model mac_data_table[] = { .ide_type = MAC_IDE_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_Q650, .name = "Quadra 650", @@ -419,7 +439,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, /* The Q700 does have a NS Sonic */ { @@ -430,7 +451,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_Q800, .name = "Quadra 800", @@ -439,7 +461,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_Q840, .name = "Quadra 840AV", @@ -448,7 +471,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA3, .scc_type = MAC_SCC_PSC, .ether_type = MAC_ETHER_MACE, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_AV }, { .ident = MAC_MODEL_Q900, .name = "Quadra 900", @@ -457,7 +481,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_IOP, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_IOP }, { .ident = MAC_MODEL_Q950, .name = "Quadra 950", @@ -466,7 +491,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_IOP, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_IOP }, /* @@ -480,7 +506,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_P475, .name = "Performa 475", @@ -488,7 +515,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_P475F, .name = "Performa 475", @@ -496,7 +524,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_P520, .name = "Performa 520", @@ -504,7 +533,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_P550, .name = "Performa 550", @@ -512,7 +542,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, /* These have the comm slot, and therefore the possibility of SONIC ethernet */ { @@ -523,7 +554,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_P588, .name = "Performa 588", @@ -533,7 +565,8 @@ static struct mac_model mac_data_table[] = { .ide_type = MAC_IDE_QUADRA, .scc_type = MAC_SCC_II, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_TV, .name = "TV", @@ -541,7 +574,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_P600, .name = "Performa 600", @@ -549,7 +583,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, /* @@ -565,7 +600,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_C650, .name = "Centris 650", @@ -574,7 +610,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR1 }, { .ident = MAC_MODEL_C660, .name = "Centris 660AV", @@ -583,7 +620,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_QUADRA3, .scc_type = MAC_SCC_PSC, .ether_type = MAC_ETHER_MACE, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_AV }, /* @@ -599,7 +637,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB145, .name = "PowerBook 145", @@ -607,7 +646,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB150, .name = "PowerBook 150", @@ -616,7 +656,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_OLD, .ide_type = MAC_IDE_PB, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB160, .name = "PowerBook 160", @@ -624,7 +665,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB165, .name = "PowerBook 165", @@ -632,7 +674,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB165C, .name = "PowerBook 165c", @@ -640,7 +683,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB170, .name = "PowerBook 170", @@ -648,7 +692,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB180, .name = "PowerBook 180", @@ -656,7 +701,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB180C, .name = "PowerBook 180c", @@ -664,7 +710,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB190, .name = "PowerBook 190", @@ -673,7 +720,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_OLD, .ide_type = MAC_IDE_BABOON, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB520, .name = "PowerBook 520", @@ -682,7 +730,8 @@ static struct mac_model mac_data_table[] = { .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, /* @@ -702,7 +751,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB230, .name = "PowerBook Duo 230", @@ -710,7 +760,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB250, .name = "PowerBook Duo 250", @@ -718,7 +769,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB270C, .name = "PowerBook Duo 270c", @@ -726,7 +778,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB280, .name = "PowerBook Duo 280", @@ -734,7 +787,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, { .ident = MAC_MODEL_PB280C, .name = "PowerBook Duo 280c", @@ -742,7 +796,8 @@ static struct mac_model mac_data_table[] = { .via_type = MAC_VIA_IIci, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, - .nubus_type = MAC_NUBUS + .nubus_type = MAC_NUBUS, + .floppy_type = MAC_FLOPPY_SWIM_ADDR2 }, /* -- cgit v1.2.3 From 8852ecd97488249ca7fe2c0d3eb44cae95886881 Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Sat, 15 Nov 2008 16:10:10 +0100 Subject: m68k: mac - Add SWIM floppy support It allows to read data from a floppy, but not to write to, and to eject the floppy (useful on our Mac without eject button). Signed-off-by: Laurent Vivier Signed-off-by: Geert Uytterhoeven --- arch/m68k/mac/config.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ arch/m68k/mac/via.c | 9 +++++++++ 2 files changed, 53 insertions(+) (limited to 'arch') diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 3a1c0b2862e..be017984a45 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -22,6 +22,7 @@ /* keyb */ #include #include +#include #define BOOTINFO_COMPAT_1_0 #include @@ -43,6 +44,10 @@ #include #include +/* platform device info */ + +#define SWIM_IO_SIZE 0x2000 /* SWIM IO resource size */ + /* Mac bootinfo struct */ struct mac_booter_data mac_bi_data; @@ -870,3 +875,42 @@ static void mac_get_model(char *str) strcpy(str, "Macintosh "); strcat(str, macintosh_config->name); } + +static struct resource swim_resources[1]; + +static struct platform_device swim_device = { + .name = "swim", + .id = -1, + .num_resources = ARRAY_SIZE(swim_resources), + .resource = swim_resources, +}; + +static struct platform_device *mac_platform_devices[] __initdata = { + &swim_device +}; + +int __init mac_platform_init(void) +{ + u8 *swim_base; + + switch (macintosh_config->floppy_type) { + case MAC_FLOPPY_SWIM_ADDR1: + swim_base = (u8 *)(VIA1_BASE + 0x1E000); + break; + case MAC_FLOPPY_SWIM_ADDR2: + swim_base = (u8 *)(VIA1_BASE + 0x16000); + break; + default: + return 0; + } + + swim_resources[0].name = "swim-regs"; + swim_resources[0].start = (resource_size_t)swim_base; + swim_resources[0].end = (resource_size_t)(swim_base + SWIM_IO_SIZE); + swim_resources[0].flags = IORESOURCE_MEM; + + return platform_add_devices(mac_platform_devices, + ARRAY_SIZE(mac_platform_devices)); +} + +arch_initcall(mac_platform_init); diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 7d97ba54536..11bce3cb648 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -645,3 +645,12 @@ int via_irq_pending(int irq) } return 0; } + +void via1_set_head(int head) +{ + if (head == 0) + via1[vBufA] &= ~VIA1A_vHeadSel; + else + via1[vBufA] |= VIA1A_vHeadSel; +} +EXPORT_SYMBOL(via1_set_head); -- cgit v1.2.3 From 4b2873ba0bda7dc747cde8b6c7dc6c0b01ea6213 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 26 Mar 2009 09:48:45 +0100 Subject: m68k: irq_node.handler() should return irqreturn_t commit b5dc7840b3ebe9c7967dd8ba73db957767009ff9 ("m68k: introduce irq controller") reverted the return type of struct irq_node.handler() from irqreturn_t to int. Change it back to irqreturn_t, else it will give a compiler warning when irqreturn_t is turned into an enum in the near future: | arch/m68k/kernel/ints.c:231: warning: assignment from incompatible pointer type Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/irq_mm.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/m68k/include/asm/irq_mm.h b/arch/m68k/include/asm/irq_mm.h index 226bfc0f21b..0cab42cad79 100644 --- a/arch/m68k/include/asm/irq_mm.h +++ b/arch/m68k/include/asm/irq_mm.h @@ -3,6 +3,7 @@ #include #include +#include #include /* @@ -80,7 +81,7 @@ struct pt_regs; * interrupt source (if it supports chaining). */ typedef struct irq_node { - int (*handler)(int, void *); + irqreturn_t (*handler)(int, void *); void *dev_id; struct irq_node *next; unsigned long flags; -- cgit v1.2.3 From e2ab3dff9d515ef69ac7c245b5ad1e348f2106be Mon Sep 17 00:00:00 2001 From: David Miller Date: Thu, 26 Mar 2009 16:44:17 -0700 Subject: sparc64: Fix build of timer_interrupt(). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit arch/sparc/kernel/time_64.c: In function ‘timer_interrupt’: arch/sparc/kernel/time_64.c:732: error: ‘struct kernel_stat’ has no member named ‘irqs’ make[1]: *** [arch/sparc/kernel/time_64.o] Error 1 Signed-off-by: David S. Miller Signed-off-by: Linus Torvalds --- arch/sparc/kernel/time_64.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 642562d83ec..4ee2e48c4b3 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -724,12 +724,14 @@ void timer_interrupt(int irq, struct pt_regs *regs) unsigned long tick_mask = tick_ops->softint_mask; int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); + struct irq_desc *desc; clear_softint(tick_mask); irq_enter(); - kstat_this_cpu.irqs[0]++; + desc = irq_to_desc(0); + kstat_incr_irqs_this_cpu(0, desc); if (unlikely(!evt->event_handler)) { printk(KERN_WARNING -- cgit v1.2.3 From cfa8707aa65f7ec8ed2130937810b4fb05b40cfa Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:16 +0000 Subject: uml: convert network device to internal network device stats Convert the UML network device to use internal network device stats. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- arch/um/drivers/net_kern.c | 19 ++++++------------- arch/um/include/shared/net_kern.h | 2 +- 2 files changed, 7 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index fde510b664d..6d2b1004d1e 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -86,7 +86,7 @@ static int uml_net_rx(struct net_device *dev) drop_skb->dev = dev; /* Read a packet into drop_skb and don't do anything with it. */ (*lp->read)(lp->fd, drop_skb, lp); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return 0; } @@ -99,8 +99,8 @@ static int uml_net_rx(struct net_device *dev) skb_trim(skb, pkt_len); skb->protocol = (*lp->protocol)(skb); - lp->stats.rx_bytes += skb->len; - lp->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; netif_rx(skb); return pkt_len; } @@ -224,8 +224,8 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) len = (*lp->write)(lp->fd, skb, lp); if (len == skb->len) { - lp->stats.tx_packets++; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; dev->trans_start = jiffies; netif_start_queue(dev); @@ -234,7 +234,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) } else if (len == 0) { netif_start_queue(dev); - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; } else { netif_start_queue(dev); @@ -248,12 +248,6 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *uml_net_get_stats(struct net_device *dev) -{ - struct uml_net_private *lp = netdev_priv(dev); - return &lp->stats; -} - static void uml_net_set_multicast_list(struct net_device *dev) { return; @@ -476,7 +470,6 @@ static void eth_configure(int n, void *init, char *mac, dev->open = uml_net_open; dev->hard_start_xmit = uml_net_start_xmit; dev->stop = uml_net_close; - dev->get_stats = uml_net_get_stats; dev->set_multicast_list = uml_net_set_multicast_list; dev->tx_timeout = uml_net_tx_timeout; dev->set_mac_address = uml_net_set_mac; diff --git a/arch/um/include/shared/net_kern.h b/arch/um/include/shared/net_kern.h index d843c7924a7..5c367f22595 100644 --- a/arch/um/include/shared/net_kern.h +++ b/arch/um/include/shared/net_kern.h @@ -26,7 +26,7 @@ struct uml_net_private { spinlock_t lock; struct net_device *dev; struct timer_list tl; - struct net_device_stats stats; + struct work_struct work; int fd; unsigned char mac[ETH_ALEN]; -- cgit v1.2.3 From 8bb95b39a16ed55226810596f92216c53329d2fe Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:17 +0000 Subject: uml: convert network device to netdevice ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- arch/um/drivers/net_kern.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 6d2b1004d1e..434224e2229 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -371,6 +371,18 @@ static void net_device_release(struct device *dev) free_netdev(netdev); } +static const struct net_device_ops uml_netdev_ops = { + .ndo_open = uml_net_open, + .ndo_stop = uml_net_close, + .ndo_start_xmit = uml_net_start_xmit, + .ndo_set_multicast_list = uml_net_set_multicast_list, + .ndo_tx_timeout = uml_net_tx_timeout, + .ndo_set_mac_address = uml_net_set_mac, + .ndo_change_mtu = uml_net_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* * Ensures that platform_driver_register is called only once by * eth_configure. Will be set in an initcall. @@ -467,13 +479,7 @@ static void eth_configure(int n, void *init, char *mac, set_ether_mac(dev, device->mac); dev->mtu = transport->user->mtu; - dev->open = uml_net_open; - dev->hard_start_xmit = uml_net_start_xmit; - dev->stop = uml_net_close; - dev->set_multicast_list = uml_net_set_multicast_list; - dev->tx_timeout = uml_net_tx_timeout; - dev->set_mac_address = uml_net_set_mac; - dev->change_mtu = uml_net_change_mtu; + dev->netdev_ops = ¨_netdev_ops; dev->ethtool_ops = ¨_net_ethtool_ops; dev->watchdog_timeo = (HZ >> 1); dev->irq = UM_ETH_IRQ; -- cgit v1.2.3 From f9384d41c02408dd404aa64d66d0ef38adcf6479 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Mar 2009 01:09:17 -0700 Subject: sparc64: Fix MM refcount check in smp_flush_tlb_pending(). As explained by Benjamin Herrenschmidt: > CPU 0 is running the context, task->mm == task->active_mm == your > context. The CPU is in userspace happily churning things. > > CPU 1 used to run it, not anymore, it's now running fancyfsd which > is a kernel thread, but current->active_mm still points to that > same context. > > Because there's only one "real" user, mm_users is 1 (but mm_count is > elevated, it's just that the presence on CPU 1 as active_mm has no > effect on mm_count(). > > At this point, fancyfsd decides to invalidate a mapping currently mapped > by that context, for example because a networked file has changed > remotely or something like that, using unmap_mapping_ranges(). > > So CPU 1 goes into the zapping code, which eventually ends up calling > flush_tlb_pending(). Your test will succeed, as current->active_mm is > indeed the target mm for the flush, and mm_users is indeed 1. So you > will -not- send an IPI to the other CPU, and CPU 0 will continue happily > accessing the pages that should have been unmapped. To fix this problem, check ->mm instead of ->active_mm, and this means: > So if you test current->mm, you effectively account for mm_users == 1, > so the only way the mm can be active on another processor is as a lazy > mm for a kernel thread. So your test should work properly as long > as you don't have a HW that will do speculative TLB reloads into the > TLB on that other CPU (and even if you do, you flush-on-switch-in should > get rid of any crap here). And therefore we should be OK. Signed-off-by: David S. Miller --- arch/sparc/kernel/smp_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 6cd1a5b6506..79457f682b5 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void) * If the address space is non-shared (ie. mm->count == 1) we avoid * cross calls when we want to flush the currently running process's * tlb state. This is done by clearing all cpu bits except the current - * processor's in current->active_mm->cpu_vm_mask and performing the + * processor's in current->mm->cpu_vm_mask and performing the * flush locally only. This will force any subsequent cpus which run * this task to flush the context from the local tlb if the process * migrates to another cpu (again). @@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long u32 ctx = CTX_HWBITS(mm->context); int cpu = get_cpu(); - if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) mm->cpu_vm_mask = cpumask_of_cpu(cpu); else smp_cross_call_masked(&xcall_flush_tlb_pending, -- cgit v1.2.3 From 66f3e6afa8e48486c4dd535d616fbfe04569fbd4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Fri, 27 Mar 2009 16:55:41 +0100 Subject: [IA64] Fix kstat_this_cpu build breakage arch/ia64/kernel/irq_ia64.c: In function 'ia64_handle_irq': arch/ia64/kernel/irq_ia64.c:498: error: 'struct kernel_stat' has no member named 'irqs' arch/ia64/kernel/irq_ia64.c:500: error: 'struct kernel_stat' has no member named 'irqs' arch/ia64/kernel/irq_ia64.c: In function 'ia64_process_pending_intr': arch/ia64/kernel/irq_ia64.c:556: error: 'struct kernel_stat' has no member named 'irqs' arch/ia64/kernel/irq_ia64.c:558: error: 'struct kernel_stat' has no member named 'irqs' Fix build breakage due to recent kstat_this_cpu changes in: d7e51e66899f95dabc89b4d4c6674a6e50fa37fc sparseirq: make some func to be used with genirq Signed-off-by: Jes Sorensen Signed-off-by: Tony Luck --- arch/ia64/kernel/irq_ia64.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 28d3d483db9..977a6ef1332 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -493,14 +493,16 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { + struct irq_desc *desc; + int irq = local_vector_to_irq(vector); + + desc = irq_desc + irq; if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); - kstat_this_cpu.irqs[vector]++; + kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) - kstat_this_cpu.irqs[vector]++; + kstat_incr_irqs_this_cpu(irq, desc); else { - int irq = local_vector_to_irq(vector); - ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); @@ -543,22 +545,25 @@ void ia64_process_pending_intr(void) vector = ia64_get_ivr(); - irq_enter(); - saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); - ia64_srlz_d(); + irq_enter(); + saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); + ia64_srlz_d(); /* * Perform normal interrupt style processing */ while (vector != IA64_SPURIOUS_INT_VECTOR) { + struct irq_desc *desc; + int irq = local_vector_to_irq(vector); + desc = irq_desc + irq; + if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); - kstat_this_cpu.irqs[vector]++; + kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) - kstat_this_cpu.irqs[vector]++; + kstat_incr_irqs_this_cpu(irq, desc); else { struct pt_regs *old_regs = set_irq_regs(NULL); - int irq = local_vector_to_irq(vector); ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); -- cgit v1.2.3 From 2b1c6bd77d4e6a727ffac8630cd154b2144b751a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 10:09:09 +0100 Subject: generic compat_sys_ustat Due to a different size of ino_t ustat needs a compat handler, but currently only x86 and mips provide one. Add a generic compat_sys_ustat and switch all architectures over to it. Instead of doing various user copy hacks compat_sys_ustat just reimplements sys_ustat as it's trivial. This was suggested by Arnd Bergmann. Found by Eric Sandeen when running xfstests/017 on ppc64, which causes stack smashing warnings on RHEL/Fedora due to the too large amount of data writen by the syscall. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- arch/ia64/ia32/ia32_entry.S | 2 +- arch/mips/kernel/linux32.c | 34 ---------------------------------- arch/mips/kernel/scall64-n32.S | 2 +- arch/mips/kernel/scall64-o32.S | 2 +- arch/parisc/kernel/syscall_table.S | 2 +- arch/powerpc/include/asm/systbl.h | 2 +- arch/s390/kernel/compat_wrapper.S | 2 +- arch/x86/ia32/ia32entry.S | 2 +- arch/x86/ia32/sys_ia32.c | 22 ---------------------- arch/x86/include/asm/ia32.h | 7 ------- arch/x86/include/asm/sys_ia32.h | 2 -- 11 files changed, 7 insertions(+), 72 deletions(-) (limited to 'arch') diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index a46f8395e9a..af9405cd70e 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -240,7 +240,7 @@ ia32_syscall_table: data8 sys_ni_syscall data8 sys_umask /* 60 */ data8 sys_chroot - data8 sys_ustat + data8 compat_sys_ustat data8 sys_dup2 data8 sys_getppid data8 sys_getpgrp /* 65 */ diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 1a86f84fa94..784859cedef 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -356,40 +356,6 @@ SYSCALL_DEFINE1(32_personality, unsigned long, personality) return ret; } -/* ustat compatibility */ -struct ustat32 { - compat_daddr_t f_tfree; - compat_ino_t f_tinode; - char f_fname[6]; - char f_fpack[6]; -}; - -extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf); - -SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32) -{ - int err; - struct ustat tmp; - struct ustat32 tmp32; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - err = sys_ustat(dev, (struct ustat __user *)&tmp); - set_fs(old_fs); - - if (err) - goto out; - - memset(&tmp32, 0, sizeof(struct ustat32)); - tmp32.f_tfree = tmp.f_tfree; - tmp32.f_tinode = tmp.f_tinode; - - err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0; - -out: - return err; -} - SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd, compat_off_t __user *, offset, s32, count) { diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 7438e92f8a0..f61d6b0e573 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -253,7 +253,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_utime /* 6130 */ PTR sys_mknod PTR sys_32_personality - PTR sys_32_ustat + PTR compat_sys_ustat PTR compat_sys_statfs PTR compat_sys_fstatfs /* 6135 */ PTR sys_sysfs diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index b0fef4ff982..60997f1f69d 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -265,7 +265,7 @@ sys_call_table: PTR sys_olduname PTR sys_umask /* 4060 */ PTR sys_chroot - PTR sys_32_ustat + PTR compat_sys_ustat PTR sys_dup2 PTR sys_getppid PTR sys_getpgrp /* 4065 */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 303d2b647e4..03b9a01bc16 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -130,7 +130,7 @@ ENTRY_OURS(newuname) ENTRY_SAME(umask) /* 60 */ ENTRY_SAME(chroot) - ENTRY_SAME(ustat) + ENTRY_COMP(ustat) ENTRY_SAME(dup2) ENTRY_SAME(getppid) ENTRY_SAME(getpgrp) /* 65 */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 72353f6070a..fe166491e9d 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -65,7 +65,7 @@ SYSCALL(ni_syscall) SYSX(sys_ni_syscall,sys_olduname, sys_olduname) COMPAT_SYS_SPU(umask) SYSCALL_SPU(chroot) -SYSCALL(ustat) +COMPAT_SYS(ustat) SYSCALL_SPU(dup2) SYSCALL_SPU(getppid) SYSCALL_SPU(getpgrp) diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 62c706eb0de..87cf5a79a35 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -252,7 +252,7 @@ sys32_chroot_wrapper: sys32_ustat_wrapper: llgfr %r2,%r2 # dev_t llgtr %r3,%r3 # struct ustat * - jg sys_ustat + jg compat_sys_ustat .globl sys32_dup2_wrapper sys32_dup2_wrapper: diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 5a0d76dc56a..8ef8876666b 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -557,7 +557,7 @@ ia32_sys_call_table: .quad sys32_olduname .quad sys_umask /* 60 */ .quad sys_chroot - .quad sys32_ustat + .quad compat_sys_ustat .quad sys_dup2 .quad sys_getppid .quad sys_getpgrp /* 65 */ diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 6c0d7f6231a..efac92fd1ef 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -638,28 +638,6 @@ long sys32_uname(struct old_utsname __user *name) return err ? -EFAULT : 0; } -long sys32_ustat(unsigned dev, struct ustat32 __user *u32p) -{ - struct ustat u; - mm_segment_t seg; - int ret; - - seg = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ustat(dev, (struct ustat __user *)&u); - set_fs(seg); - if (ret < 0) - return ret; - - if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) || - __put_user((__u32) u.f_tfree, &u32p->f_tfree) || - __put_user((__u32) u.f_tinode, &u32p->f_tfree) || - __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) || - __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack))) - ret = -EFAULT; - return ret; -} - asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, struct pt_regs *regs) { diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index 50ca486fd88..1f7e6251728 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h @@ -129,13 +129,6 @@ typedef struct compat_siginfo { } _sifields; } compat_siginfo_t; -struct ustat32 { - __u32 f_tfree; - compat_ino_t f_tinode; - char f_fname[6]; - char f_fpack[6]; -}; - #define IA32_STACK_TOP IA32_PAGE_OFFSET #ifdef __KERNEL__ diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index ffb08be2a53..72a6dcd1299 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h @@ -70,8 +70,6 @@ struct old_utsname; asmlinkage long sys32_olduname(struct oldold_utsname __user *); long sys32_uname(struct old_utsname __user *); -long sys32_ustat(unsigned, struct ustat32 __user *); - asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, compat_uptr_t __user *, struct pt_regs *); asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); -- cgit v1.2.3 From 10f303ae1e5e77a9f7cb053e6329906afb132c67 Mon Sep 17 00:00:00 2001 From: Cheng Renquan Date: Wed, 14 Jan 2009 17:01:33 +0800 Subject: do_pipe cleanup: drop its last user in arch/alpha/ The last user of do_pipe is in arch/alpha/, after replacing it with do_pipe_flags, the do_pipe can be totally dropped. Signed-off-by: Cheng Renquan Acked-by: Richard Henderson Signed-off-by: Al Viro --- arch/alpha/kernel/entry.S | 3 ++- arch/alpha/kernel/osf_sys.c | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index e4a54b61589..b45d913a51c 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -903,8 +903,9 @@ sys_alpha_pipe: stq $26, 0($sp) .prologue 0 + mov $31, $17 lda $16, 8($sp) - jsr $26, do_pipe + jsr $26, do_pipe_flags ldq $26, 0($sp) bne $0, 1f diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index ae41f097864..42ee05981e7 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -46,8 +46,6 @@ #include #include -extern int do_pipe(int *); - /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite -- cgit v1.2.3 From 3ba13d179e8c24c68eac32b93593a6b10fcd1572 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 06:02:22 +0000 Subject: constify dentry_operations: rest Signed-off-by: Al Viro --- arch/ia64/kernel/perfmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 0e499757309..5c0f408cfd7 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry) return 1; } -static struct dentry_operations pfmfs_dentry_operations = { +static const struct dentry_operations pfmfs_dentry_operations = { .d_delete = pfmfs_delete_dentry, }; -- cgit v1.2.3 From 6e8a4fa651975ff808dba130eae442f4cea1671c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Mar 2009 18:15:02 -0700 Subject: sparc64: We need to use compat_sys_ustat() as well. Sparc was missed in commit 2b1c6bd77d4e6a727ffac8630cd154b2144b751a ("generic compat_sys_ustat"). We definitely need it, since our __kernel_ino_t is "unsigned long". Signed-off-by: David S. Miller --- arch/sparc/kernel/systbls_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index f93c42a2b52..a8000b1cda7 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -51,7 +51,7 @@ sys_call_table32: /*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall - .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr + .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr /*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr /*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall -- cgit v1.2.3