From a2ceff5e555e664751bc653a4d9b133efa18c742 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 28 Mar 2008 19:11:48 +1100 Subject: [POWERPC] Fix missed hardware breakpoints across multiple threads There is a bug in the powerpc DABR (data access breakpoint) handling, which can result in us missing breakpoints if several threads are trying to break on the same address. The circumstances are that do_page_fault() calls do_dabr(), this clears the DABR (sets it to 0) and sets up the signal which will report to userspace that the DABR was hit. The do_signal() code will restore the DABR value on the way out to userspace. If we reschedule before calling do_signal(), __switch_to() will check the cached DABR value and compare it to the new thread's value, if they match we don't set the DABR in hardware. So if two threads have the same DABR value, and we schedule from one to the other after taking the interrupt for the first thread hitting the DABR, the second thread will run without the DABR set in hardware. The cleanest fix is to move the cache update into set_dabr(), that way we can't forget to do it. Reported-by: Jan Kratochvil Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/process.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 59311ec0d42..4ec60552150 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -241,8 +241,12 @@ void discard_lazy_cpu_state(void) } #endif /* CONFIG_SMP */ +static DEFINE_PER_CPU(unsigned long, current_dabr); + int set_dabr(unsigned long dabr) { + __get_cpu_var(current_dabr) = dabr; + #ifdef CONFIG_PPC_MERGE /* XXX for now */ if (ppc_md.set_dabr) return ppc_md.set_dabr(dabr); @@ -259,8 +263,6 @@ int set_dabr(unsigned long dabr) DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); #endif -static DEFINE_PER_CPU(unsigned long, current_dabr); - struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { @@ -325,10 +327,8 @@ struct task_struct *__switch_to(struct task_struct *prev, #endif /* CONFIG_SMP */ - if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { + if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) set_dabr(new->thread.dabr); - __get_cpu_var(current_dabr) = new->thread.dabr; - } new_thread = &new->thread; old_thread = ¤t->thread; -- cgit v1.2.3 From ff3da2e0938bae36d10d69c22bce0177b067a9e2 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 2 Apr 2008 15:58:40 +1100 Subject: [POWERPC] Fix iSeries hard irq enabling regression A subtle bug sneaked into iSeries recently. On this platform, we must not normally clear MSR:EE (the hardware external interrupt enable) except for short periods of time. Taking an interrupt while soft-disabled doesn't cause us to clear it for example. The iSeries kernel expects to mostly run with MSR:EE enabled at all times except in a few exception entry/exit code paths. Thus local_irq_enable() doesn't check if it needs to hard-enable as it expects this to be unnecessary on iSeries. However, hard_irq_disable() _does_ cause MSR:EE to be cleared, including on iSeries. A call to it was recently added to the context switch code, thus causing interrupts to become disabled for a long periods of time, causing the iSeries watchdog to kick in under some circumstances and other nasty things. This patch fixes it by making local_irq_enable() properly re-enable MSR:EE on iSeries. It basically removes a return statement here to make iSeries use the same code path as everybody else. That does mean that we might occasionally get spurious decrementer interrupts but I don't think that matters. Another option would have been to make hard_irq_disable() a nop on iSeries but I didn't like it much, in case we have good reasons to hard-disable. Part of the patch is fixes to make sure the hard_enabled PACA field is properly set on iSeries as it used not to be before, since it was mostly unused. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 13 +++++++------ arch/powerpc/kernel/irq.c | 1 - 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 11b4f6d9ffc..d3aee08e681 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1387,12 +1387,14 @@ __secondary_start: #ifdef CONFIG_PPC_ISERIES BEGIN_FW_FTR_SECTION ori r4,r4,MSR_EE + li r8,1 + stb r8,PACAHARDIRQEN(r13) END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif BEGIN_FW_FTR_SECTION - stb r7,PACASOFTIRQEN(r13) stb r7,PACAHARDIRQEN(r13) END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) + stb r7,PACASOFTIRQEN(r13) mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 @@ -1520,15 +1522,14 @@ _INIT_GLOBAL(start_here_common) #ifdef CONFIG_PPC_ISERIES BEGIN_FW_FTR_SECTION mfmsr r5 - ori r5,r5,MSR_EE /* Hard Enabled */ + ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/ mtmsrd r5 + li r5,1 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif -BEGIN_FW_FTR_SECTION - stb r5,PACAHARDIRQEN(r13) -END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) + stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ - bl .start_kernel + bl .start_kernel /* Not reached */ BUG_OPCODE diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index b0e5deb4274..292163f5b39 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -143,7 +143,6 @@ void local_irq_restore(unsigned long en) */ if (local_paca->lppaca_ptr->int_dword.any_int) iseries_handle_interrupts(); - return; } /* -- cgit v1.2.3 From 7484839850d826e14b8b024bb048dca8489140ae Mon Sep 17 00:00:00 2001 From: Maxim Shchetynin Date: Wed, 2 Apr 2008 00:12:20 +1100 Subject: [POWERPC] Fix rtas_flash procfs interface Handling of the proc_dir_entry->count was changed in 2.6.24-rc5. After this change, the default value for pde->count is 1 and not 0 as before. Therefore, if we want to check whether our procfs file is already opened (already in use), we have to check if pde->count is greater than 2 rather than 1. Signed-off-by: Maxim Shchetynin Signed-off-by: Jens Osterkamp Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/rtas_flash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index f2276593f41..538baf46f15 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -356,7 +356,7 @@ static int rtas_excl_open(struct inode *inode, struct file *file) /* Enforce exclusive open with use count of PDE */ spin_lock(&flash_file_open_lock); - if (atomic_read(&dp->count) > 1) { + if (atomic_read(&dp->count) > 2) { spin_unlock(&flash_file_open_lock); return -EBUSY; } -- cgit v1.2.3