aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel/irq.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-09-24 09:56:20 +0200
committerIngo Molnar <mingo@elte.hu>2008-09-24 09:56:20 +0200
commitebdd90a8cb2e3963f55499850f02ce6003558b55 (patch)
treed153f917ed41d257ddafa22f9cc2201bfddf8f9c /arch/sparc64/kernel/irq.c
parent3c9339049df5cc3a468c11de6c4101a1ea8c3d83 (diff)
parent72d31053f62c4bc464c2783974926969614a8649 (diff)
Merge commit 'v2.6.27-rc7' into x86/pebs
Diffstat (limited to 'arch/sparc64/kernel/irq.c')
-rw-r--r--arch/sparc64/kernel/irq.c78
1 files changed, 73 insertions, 5 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c481673d249..7495bc77468 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/linkage.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
@@ -682,10 +683,32 @@ void ack_bad_irq(unsigned int virt_irq)
ino, virt_irq);
}
+void *hardirq_stack[NR_CPUS];
+void *softirq_stack[NR_CPUS];
+
+static __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+ void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+ __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+ if (orig_sp < sp ||
+ orig_sp > (sp + THREAD_SIZE)) {
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+ }
+
+ return orig_sp;
+}
+static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
void handler_irq(int irq, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
+ void *orig_sp;
clear_softint(1 << irq);
@@ -703,6 +726,8 @@ void handler_irq(int irq, struct pt_regs *regs)
"i" (PSTATE_IE)
: "memory");
+ orig_sp = set_hardirq_stack();
+
while (bucket_pa) {
struct irq_desc *desc;
unsigned long next_pa;
@@ -719,10 +744,38 @@ void handler_irq(int irq, struct pt_regs *regs)
bucket_pa = next_pa;
}
+ restore_hardirq_stack(orig_sp);
+
irq_exit();
set_irq_regs(old_regs);
}
+void do_softirq(void)
+{
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
+
+ __asm__ __volatile__("mov %%sp, %0\n\t"
+ "mov %1, %%sp"
+ : "=&r" (orig_sp)
+ : "r" (sp));
+ __do_softirq();
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
+ }
+
+ local_irq_restore(flags);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
@@ -740,6 +793,8 @@ void fixup_irqs(void)
}
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
+
+ tick_ops->disable_irq();
}
#endif
@@ -812,7 +867,7 @@ static void kill_prom_timer(void)
: "g1", "g2");
}
-void init_irqwork_curcpu(void)
+void notrace init_irqwork_curcpu(void)
{
int cpu = hard_smp_processor_id();
@@ -843,7 +898,7 @@ static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type
}
}
-void __cpuinit sun4v_register_mondo_queues(int this_cpu)
+void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
{
struct trap_per_cpu *tb = &trap_block[this_cpu];
@@ -915,12 +970,18 @@ static void __init sun4v_init_mondo_queues(void)
alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
tb->nonresum_qmask);
+ }
+}
+
+static void __init init_send_mondo_info(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct trap_per_cpu *tb = &trap_block[cpu];
init_cpu_send_mondo_info(tb);
}
-
- /* Load up the boot cpu's entries. */
- sun4v_register_mondo_queues(hard_smp_processor_id());
}
static struct irqaction timer_irq_action = {
@@ -949,6 +1010,13 @@ void __init init_IRQ(void)
if (tlb_type == hypervisor)
sun4v_init_mondo_queues();
+ init_send_mondo_info();
+
+ if (tlb_type == hypervisor) {
+ /* Load up the boot cpu's entries. */
+ sun4v_register_mondo_queues(hard_smp_processor_id());
+ }
+
/* We need to clear any IRQ's pending in the soft interrupt
* registers, a spurious one could be left around from the
* PROM timer which we just disabled.