aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/autoprobe.c9
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/spurious.c113
-rw-r--r--kernel/itimer.c8
-rw-r--r--kernel/kexec.c10
-rw-r--r--kernel/sched.c9
6 files changed, 135 insertions, 16 deletions
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 98d62d8efea..3467097ca61 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -9,6 +9,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
/*
* Autodetection depends on the fact that any interrupt that
@@ -26,7 +27,7 @@ static DECLARE_MUTEX(probe_sem);
*/
unsigned long probe_irq_on(void)
{
- unsigned long val, delay;
+ unsigned long val;
irq_desc_t *desc;
unsigned int i;
@@ -45,8 +46,7 @@ unsigned long probe_irq_on(void)
}
/* Wait for longstanding interrupts to trigger. */
- for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
- /* about 20ms delay */ barrier();
+ msleep(20);
/*
* enable any unassigned irqs
@@ -68,8 +68,7 @@ unsigned long probe_irq_on(void)
/*
* Wait for spurious interrupts to trigger
*/
- for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
- /* about 100ms delay */ barrier();
+ msleep(100);
/*
* Now filter out any obviously spurious interrupts
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 436c7d93c00..c29f83c1649 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -172,7 +172,7 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
spin_lock(&desc->lock);
if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret, regs);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index ba039e827d5..7df9abd5ec8 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -11,6 +11,83 @@
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
+static int irqfixup;
+
+/*
+ * Recovery handler for misrouted interrupts.
+ */
+
+static int misrouted_irq(int irq, struct pt_regs *regs)
+{
+ int i;
+ irq_desc_t *desc;
+ int ok = 0;
+ int work = 0; /* Did we do work for a real IRQ */
+
+ for(i = 1; i < NR_IRQS; i++) {
+ struct irqaction *action;
+
+ if (i == irq) /* Already tried */
+ continue;
+ desc = &irq_desc[i];
+ spin_lock(&desc->lock);
+ action = desc->action;
+ /* Already running on another processor */
+ if (desc->status & IRQ_INPROGRESS) {
+ /*
+ * Already running: If it is shared get the other
+ * CPU to go looking for our mystery interrupt too
+ */
+ if (desc->action && (desc->action->flags & SA_SHIRQ))
+ desc->status |= IRQ_PENDING;
+ spin_unlock(&desc->lock);
+ continue;
+ }
+ /* Honour the normal IRQ locking */
+ desc->status |= IRQ_INPROGRESS;
+ spin_unlock(&desc->lock);
+ while (action) {
+ /* Only shared IRQ handlers are safe to call */
+ if (action->flags & SA_SHIRQ) {
+ if (action->handler(i, action->dev_id, regs) ==
+ IRQ_HANDLED)
+ ok = 1;
+ }
+ action = action->next;
+ }
+ local_irq_disable();
+ /* Now clean up the flags */
+ spin_lock(&desc->lock);
+ action = desc->action;
+
+ /*
+ * While we were looking for a fixup someone queued a real
+ * IRQ clashing with our walk
+ */
+
+ while ((desc->status & IRQ_PENDING) && action) {
+ /*
+ * Perform real IRQ processing for the IRQ we deferred
+ */
+ work = 1;
+ spin_unlock(&desc->lock);
+ handle_IRQ_event(i, regs, action);
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_PENDING;
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+ /*
+ * If we did actual work for the real IRQ line we must let the
+ * IRQ controller clean up too
+ */
+ if(work)
+ desc->handler->end(i);
+ spin_unlock(&desc->lock);
+ }
+ /* So the caller can adjust the irq error counts */
+ return ok;
+}
+
/*
* If 99,900 of the previous 100,000 interrupts have not been handled
* then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -31,7 +108,8 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
printk(KERN_ERR "irq event %d: bogus return value %x\n",
irq, action_ret);
} else {
- printk(KERN_ERR "irq %d: nobody cared!\n", irq);
+ printk(KERN_ERR "irq %d: nobody cared (try booting with "
+ "the \"irqpoll\" option)\n", irq);
}
dump_stack();
printk(KERN_ERR "handlers:\n");
@@ -55,7 +133,8 @@ static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t actio
}
}
-void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
+void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
+ struct pt_regs *regs)
{
if (action_ret != IRQ_HANDLED) {
desc->irqs_unhandled++;
@@ -63,6 +142,15 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
report_bad_irq(irq, desc, action_ret);
}
+ if (unlikely(irqfixup)) {
+ /* Don't punish working computers */
+ if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) {
+ int ok = misrouted_irq(irq, regs);
+ if (action_ret == IRQ_NONE)
+ desc->irqs_unhandled -= ok;
+ }
+ }
+
desc->irq_count++;
if (desc->irq_count < 100000)
return;
@@ -94,3 +182,24 @@ int __init noirqdebug_setup(char *str)
__setup("noirqdebug", noirqdebug_setup);
+static int __init irqfixup_setup(char *str)
+{
+ irqfixup = 1;
+ printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
+ printk(KERN_WARNING "This may impact system performance.\n");
+ return 1;
+}
+
+__setup("irqfixup", irqfixup_setup);
+
+static int __init irqpoll_setup(char *str)
+{
+ irqfixup = 2;
+ printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
+ "enabled\n");
+ printk(KERN_WARNING "This may significantly impact system "
+ "performance\n");
+ return 1;
+}
+
+__setup("irqpoll", irqpoll_setup);
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 1dc988e0d2c..a72cb0e5aa4 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -153,11 +153,15 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
switch (which) {
case ITIMER_REAL:
+again:
spin_lock_irq(&tsk->sighand->siglock);
interval = tsk->signal->it_real_incr;
val = it_real_value(tsk->signal);
- if (val)
- del_timer_sync(&tsk->signal->real_timer);
+ /* We are sharing ->siglock with it_real_fn() */
+ if (try_to_del_timer_sync(&tsk->signal->real_timer) < 0) {
+ spin_unlock_irq(&tsk->sighand->siglock);
+ goto again;
+ }
tsk->signal->it_real_incr =
timeval_to_jiffies(&value->it_interval);
it_real_arm(tsk, timeval_to_jiffies(&value->it_value));
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 7843548cf2d..cdd4dcd8fb6 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -241,7 +241,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
unsigned long nr_segments,
- struct kexec_segment *segments)
+ struct kexec_segment __user *segments)
{
int result;
struct kimage *image;
@@ -650,7 +650,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
}
}
- return 0;
+ return NULL;
}
static struct page *kimage_alloc_page(struct kimage *image,
@@ -696,7 +696,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
/* Allocate a page, if we run out of memory give up */
page = kimage_alloc_pages(gfp_mask, 0);
if (!page)
- return 0;
+ return NULL;
/* If the page cannot be used file it away */
if (page_to_pfn(page) >
(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
@@ -754,7 +754,7 @@ static int kimage_load_normal_segment(struct kimage *image,
unsigned long maddr;
unsigned long ubytes, mbytes;
int result;
- unsigned char *buf;
+ unsigned char __user *buf;
result = 0;
buf = segment->buf;
@@ -818,7 +818,7 @@ static int kimage_load_crash_segment(struct kimage *image,
unsigned long maddr;
unsigned long ubytes, mbytes;
int result;
- unsigned char *buf;
+ unsigned char __user *buf;
result = 0;
buf = segment->buf;
diff --git a/kernel/sched.c b/kernel/sched.c
index e2b0d3e4dd0..5f2182d4224 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4166,6 +4166,14 @@ void show_state(void)
read_unlock(&tasklist_lock);
}
+/**
+ * init_idle - set up an idle thread for a given CPU
+ * @idle: task in question
+ * @cpu: cpu the idle task belongs to
+ *
+ * NOTE: this function does not set the idle thread's NEED_RESCHED
+ * flag, to make booting more robust.
+ */
void __devinit init_idle(task_t *idle, int cpu)
{
runqueue_t *rq = cpu_rq(cpu);
@@ -4183,7 +4191,6 @@ void __devinit init_idle(task_t *idle, int cpu)
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
- set_tsk_need_resched(idle);
spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */