aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-18 18:43:08 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-18 18:43:08 +0200
commitf1b0c8d3d3b5ff9c0b14bb2383a4bc38d8922bd1 (patch)
treeb7d9f439d5b99d8b03bf490d4b17156414e48d45 /arch/sh/kernel
parentd591b0a3ae25f587d0c4da1e1d1a425143590790 (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
Merge branch 'linus' into x86/amd-iommu
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/smp.c62
-rw-r--r--arch/sh/kernel/stacktrace.c1
-rw-r--r--arch/sh/kernel/vmlinux_32.lds.S1
-rw-r--r--arch/sh/kernel/vmlinux_64.lds.S1
4 files changed, 16 insertions, 49 deletions
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 5d039d168f5..60c50841143 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
-static atomic_t cpus_booted = ATOMIC_INIT(0);
-
-/*
- * Run specified function on a particular processor.
- */
-void __smp_call_function(unsigned int cpu);
-
static inline void __init smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
@@ -175,45 +168,20 @@ static void stop_this_cpu(void *unused)
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, 0, 1, 0);
+ smp_call_function(stop_this_cpu, 0, 0);
}
-struct smp_fn_call_struct smp_fn_call = {
- .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock),
- .finished = ATOMIC_INIT(0),
-};
-
-/*
- * The caller of this wants the passed function to run on every cpu. If wait
- * is set, wait until all cpus have finished the function before returning.
- * The lock is here to protect the call structure.
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- unsigned int nr_cpus = atomic_read(&cpus_booted);
- int i;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- spin_lock(&smp_fn_call.lock);
-
- atomic_set(&smp_fn_call.finished, 0);
- smp_fn_call.fn = func;
- smp_fn_call.data = info;
-
- for (i = 0; i < nr_cpus; i++)
- if (i != smp_processor_id())
- plat_send_ipi(i, SMP_MSG_FUNCTION);
-
- if (wait)
- while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
+ int cpu;
- spin_unlock(&smp_fn_call.lock);
+ for_each_cpu_mask(cpu, mask)
+ plat_send_ipi(cpu, SMP_MSG_FUNCTION);
+}
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
}
/* Not really SMP stuff ... */
@@ -229,7 +197,7 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void)
{
- on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
+ on_each_cpu(flush_tlb_all_ipi, 0, 1);
}
static void flush_tlb_mm_ipi(void *mm)
@@ -255,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -292,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
fd.vma = vma;
fd.addr1 = start;
fd.addr2 = end;
- smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -316,7 +284,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
fd.addr1 = start;
fd.addr2 = end;
- on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
}
static void flush_tlb_page_ipi(void *info)
@@ -335,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
fd.vma = vma;
fd.addr1 = page;
- smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -359,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr)
fd.addr1 = asid;
fd.addr2 = vaddr;
- smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
local_flush_tlb_one(asid, vaddr);
}
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index d41e561be20..1b2ae35c4a7 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -34,3 +34,4 @@ void save_stack_trace(struct stack_trace *trace)
}
}
}
+EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S
index c7113786ecd..7b4b82bd115 100644
--- a/arch/sh/kernel/vmlinux_32.lds.S
+++ b/arch/sh/kernel/vmlinux_32.lds.S
@@ -44,7 +44,6 @@ SECTIONS
_etext = .; /* End of text section */
- BUG_TABLE
NOTES
RO_DATA(PAGE_SIZE)
diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S
index d1e177009a4..33fa4645140 100644
--- a/arch/sh/kernel/vmlinux_64.lds.S
+++ b/arch/sh/kernel/vmlinux_64.lds.S
@@ -65,7 +65,6 @@ SECTIONS
_etext = .; /* End of text section */
- BUG_TABLE
NOTES
RO_DATA(PAGE_SIZE)