aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c49
1 files changed, 36 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 338950aeb6f..be35ffae10f 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -76,6 +76,8 @@ void smp_call_function_interrupt(void);
int smt_enabled_at_boot = 1;
+static int ipi_fail_ok;
+
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
#ifdef CONFIG_PPC64
@@ -181,12 +183,13 @@ static struct call_data_struct {
* <wait> If true, wait (atomically) until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ * <map> is a cpu map of the cpus to send IPI to.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
- int wait, cpumask_t map)
+static int __smp_call_function_map(void (*func) (void *info), void *info,
+ int nonatomic, int wait, cpumask_t map)
{
struct call_data_struct data;
int ret = -1, num_cpus;
@@ -203,8 +206,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
if (wait)
atomic_set(&data.finished, 0);
- spin_lock(&call_lock);
-
/* remove 'self' from the map */
if (cpu_isset(smp_processor_id(), map))
cpu_clear(smp_processor_id(), map);
@@ -231,7 +232,8 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
printk("smp_call_function on cpu %d: other cpus not "
"responding (%d)\n", smp_processor_id(),
atomic_read(&data.started));
- debugger(NULL);
+ if (!ipi_fail_ok)
+ debugger(NULL);
goto out;
}
}
@@ -258,14 +260,18 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
out:
call_data = NULL;
HMT_medium();
- spin_unlock(&call_lock);
return ret;
}
static int __smp_call_function(void (*func)(void *info), void *info,
int nonatomic, int wait)
{
- return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
+ int ret;
+ spin_lock(&call_lock);
+ ret =__smp_call_function_map(func, info, nonatomic, wait,
+ cpu_online_map);
+ spin_unlock(&call_lock);
+ return ret;
}
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
@@ -278,8 +284,8 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
}
EXPORT_SYMBOL(smp_call_function);
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic,
- int wait)
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
{
cpumask_t map = CPU_MASK_NONE;
int ret = 0;
@@ -291,9 +297,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int
return -EINVAL;
cpu_set(cpu, map);
- if (cpu != get_cpu())
- ret = smp_call_function_map(func,info,nonatomic,wait,map);
- else {
+ if (cpu != get_cpu()) {
+ spin_lock(&call_lock);
+ ret = __smp_call_function_map(func, info, nonatomic, wait, map);
+ spin_unlock(&call_lock);
+ } else {
local_irq_disable();
func(info);
local_irq_enable();
@@ -305,7 +313,22 @@ EXPORT_SYMBOL(smp_call_function_single);
void smp_send_stop(void)
{
- __smp_call_function(stop_this_cpu, NULL, 1, 0);
+ int nolock;
+
+ /* It's OK to fail sending the IPI, since the alternative is to
+ * be stuck forever waiting on the other CPU to take the interrupt.
+ *
+ * It's better to at least continue and go through reboot, since this
+ * function is usually called at panic or reboot time in the first
+ * place.
+ */
+ ipi_fail_ok = 1;
+
+ /* Don't deadlock in case we got called through panic */
+ nolock = !spin_trylock(&call_lock);
+ __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
+ if (!nolock)
+ spin_unlock(&call_lock);
}
void smp_call_function_interrupt(void)