From 143a5d325d35efb1b29dcb8d6031cf27107e183a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 25 Oct 2007 14:01:10 +0200 Subject: lockdep: fixup irq tracing Ensure we fixup the IRQ state before we hit any locking code. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes_32.c | 7 +------ arch/x86/kernel/kprobes_64.c | 7 +------ arch/x86/kernel/traps_32.c | 2 ++ arch/x86/kernel/traps_64.c | 2 ++ arch/x86/mm/fault_32.c | 5 +++++ arch/x86/mm/fault_64.c | 5 +++++ include/asm-x86/irqflags_32.h | 21 +++++++++++++++++++++ include/asm-x86/irqflags_64.h | 20 ++++++++++++++++++++ 8 files changed, 57 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c index 90f778c04b3..d87a523070d 100644 --- a/arch/x86/kernel/kprobes_32.c +++ b/arch/x86/kernel/kprobes_32.c @@ -564,12 +564,7 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) resume_execution(cur, regs, kcb); regs->eflags |= kcb->kprobe_saved_eflags; -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT - if (raw_irqs_disabled_flags(regs->eflags)) - trace_hardirqs_off(); - else - trace_hardirqs_on(); -#endif + trace_hardirqs_fixup_flags(regs->eflags); /*Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c index 681b801c5e2..3db3611933d 100644 --- a/arch/x86/kernel/kprobes_64.c +++ b/arch/x86/kernel/kprobes_64.c @@ -551,12 +551,7 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs) resume_execution(cur, regs, kcb); regs->eflags |= kcb->kprobe_saved_rflags; -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT - if (raw_irqs_disabled_flags(regs->eflags)) - trace_hardirqs_off(); - else - trace_hardirqs_on(); -#endif + trace_hardirqs_fixup_flags(regs->eflags); /* Restore the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index cc9acace7e2..298d13ed3ab 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -789,6 +789,8 @@ void restart_nmi(void) #ifdef CONFIG_KPROBES fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) { + trace_hardirqs_fixup(); + if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) return; diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index d0c2bc7ab2e..4a6bd4965f5 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -807,6 +807,8 @@ asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs) /* runs on IST stack. */ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) { + trace_hardirqs_fixup(); + if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { return; } diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c index 503dfc05111..2e542e89a3e 100644 --- a/arch/x86/mm/fault_32.c +++ b/arch/x86/mm/fault_32.c @@ -303,6 +303,11 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, int write, si_code; int fault; + /* + * We can fault from pretty much anywhere, with unknown IRQ state. + */ + trace_hardirqs_fixup(); + /* get the address */ address = read_cr2(); diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c index 644b4f7ece1..0e26230669c 100644 --- a/arch/x86/mm/fault_64.c +++ b/arch/x86/mm/fault_64.c @@ -304,6 +304,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long flags; siginfo_t info; + /* + * We can fault from pretty much anywhere, with unknown IRQ state. + */ + trace_hardirqs_fixup(); + tsk = current; mm = tsk->mm; prefetchw(&mm->mmap_sem); diff --git a/include/asm-x86/irqflags_32.h b/include/asm-x86/irqflags_32.h index d058b04e008..4c7720089cb 100644 --- a/include/asm-x86/irqflags_32.h +++ b/include/asm-x86/irqflags_32.h @@ -129,6 +129,27 @@ static inline int raw_irqs_disabled(void) return raw_irqs_disabled_flags(flags); } + +/* + * makes the traced hardirq state match with the machine state + * + * should be a rarely used function, only in places where its + * otherwise impossible to know the irq state, like in traps. + */ +static inline void trace_hardirqs_fixup_flags(unsigned long flags) +{ + if (raw_irqs_disabled_flags(flags)) + trace_hardirqs_off(); + else + trace_hardirqs_on(); +} + +static inline void trace_hardirqs_fixup(void) +{ + unsigned long flags = __raw_local_save_flags(); + + trace_hardirqs_fixup_flags(flags); +} #endif /* __ASSEMBLY__ */ /* diff --git a/include/asm-x86/irqflags_64.h b/include/asm-x86/irqflags_64.h index 5341ea1f815..bb9163bb29d 100644 --- a/include/asm-x86/irqflags_64.h +++ b/include/asm-x86/irqflags_64.h @@ -111,6 +111,26 @@ static inline int raw_irqs_disabled(void) return raw_irqs_disabled_flags(flags); } +/* + * makes the traced hardirq state match with the machine state + * + * should be a rarely used function, only in places where its + * otherwise impossible to know the irq state, like in traps. + */ +static inline void trace_hardirqs_fixup_flags(unsigned long flags) +{ + if (raw_irqs_disabled_flags(flags)) + trace_hardirqs_off(); + else + trace_hardirqs_on(); +} + +static inline void trace_hardirqs_fixup(void) +{ + unsigned long flags = __raw_local_save_flags(); + + trace_hardirqs_fixup_flags(flags); +} /* * Used in the idle loop; sti takes one instruction cycle * to complete: -- cgit v1.2.3 From ab63a633cf072c719f885e46fa4814624312f672 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 25 Oct 2007 14:02:45 +0200 Subject: sched: fix unconditional irq lock Lockdep noticed that this lock can also be taken from hardirq context, and can thus not unconditionally disable/enable irqs. WARNING: at kernel/lockdep.c:2033 trace_hardirqs_on() [show_trace_log_lvl+26/48] show_trace_log_lvl+0x1a/0x30 [show_trace+18/32] show_trace+0x12/0x20 [dump_stack+22/32] dump_stack+0x16/0x20 [trace_hardirqs_on+405/416] trace_hardirqs_on+0x195/0x1a0 [_read_unlock_irq+34/48] _read_unlock_irq+0x22/0x30 [sched_debug_show+2615/4224] sched_debug_show+0xa37/0x1080 [show_state_filter+326/368] show_state_filter+0x146/0x170 [sysrq_handle_showstate+10/16] sysrq_handle_showstate+0xa/0x10 [__handle_sysrq+123/288] __handle_sysrq+0x7b/0x120 [handle_sysrq+40/64] handle_sysrq+0x28/0x40 [kbd_event+1045/1680] kbd_event+0x415/0x690 [input_pass_event+206/208] input_pass_event+0xce/0xd0 [input_handle_event+170/928] input_handle_event+0xaa/0x3a0 [input_event+95/112] input_event+0x5f/0x70 [atkbd_interrupt+434/1456] atkbd_interrupt+0x1b2/0x5b0 [serio_interrupt+59/128] serio_interrupt+0x3b/0x80 [i8042_interrupt+263/576] i8042_interrupt+0x107/0x240 [handle_IRQ_event+40/96] handle_IRQ_event+0x28/0x60 [handle_edge_irq+175/320] handle_edge_irq+0xaf/0x140 [do_IRQ+64/128] do_IRQ+0x40/0x80 [common_interrupt+46/52] common_interrupt+0x2e/0x34 Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/sched_debug.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index e6fb392e516..415e5c38554 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -80,6 +80,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) { struct task_struct *g, *p; + unsigned long flags; SEQ_printf(m, "\nrunnable tasks:\n" @@ -88,7 +89,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) "------------------------------------------------------" "----------------------------------------------------\n"); - read_lock_irq(&tasklist_lock); + read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, p) { if (!p->se.on_rq || task_cpu(p) != rq_cpu) @@ -97,7 +98,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) print_task(m, rq, p); } while_each_thread(g, p); - read_unlock_irq(&tasklist_lock); + read_unlock_irqrestore(&tasklist_lock, flags); } void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) -- cgit v1.2.3 From 17aacfb9cdf9a8329a6ece9c437551a29c93e47b Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Sun, 28 Oct 2007 20:47:01 +0100 Subject: lockdep: fix a typo in the __lock_acquire comment Fix a typo in the __lock_acquire comment. Signed-off-by: Gautham R Shenoy Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 55fe0c7cd95..ed38bbfc48a 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2424,7 +2424,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, return 0; /* - * Calculate the chain hash: it's the combined has of all the + * Calculate the chain hash: it's the combined hash of all the * lock keys along the dependency chain. We save the hash value * at every step so that we can get the current hash easily * after unlock. The chain hash is then used to cache dependency -- cgit v1.2.3