aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-03-03 14:12:47 -0300
committerIngo Molnar <mingo@elte.hu>2008-04-17 17:40:55 +0200
commit2513926c286ca1d0d189c206966011bdd4080354 (patch)
treef93f8788a4c972e40231aae88c15015c79d019d7 /arch/x86/kernel
parent3a36d1e435af79ec3bc5ead871e5b22d5558ebf3 (diff)
x86: change x86_64 smp_call_function_mask to look alike i386
the two versions (the inner version, and the outer version, that takes the locks) of smp_call_function_mask are made into one. With the changes, i386 and x86_64 versions look exactly the same. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/smp_32.c2
-rw-r--r--arch/x86/kernel/smp_64.c57
2 files changed, 17 insertions, 42 deletions
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index dc0cde9d16f..e4a6b669a0b 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -583,7 +583,7 @@ native_smp_call_function_mask(cpumask_t mask,
atomic_set(&data.finished, 0);
call_data = &data;
- mb();
+ wmb();
/* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index aa2edb7f3a5..e4494e829df 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -354,26 +354,30 @@ static void __smp_call_function(void (*func) (void *info), void *info,
}
-/*
- * this function sends a 'generic call function' IPI to all other CPU
- * of the system defined in the mask.
- */
-static int __smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+int native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
int cpus;
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);
- if (!cpus)
+ if (!cpus) {
+ spin_unlock(&call_lock);
return 0;
+ }
data.func = func;
data.info = info;
@@ -395,43 +399,14 @@ static int __smp_call_function_mask(cpumask_t mask,
while (atomic_read(&data.started) != cpus)
cpu_relax();
- if (!wait)
- return 0;
+ if (wait)
+ while (atomic_read(&data.finished) != cpus)
+ cpu_relax();
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
+ spin_unlock(&call_lock);
return 0;
}
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on. Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int native_smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
-{
- int ret;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- spin_lock(&call_lock);
- ret = __smp_call_function_mask(mask, func, info, wait);
- spin_unlock(&call_lock);
- return ret;
-}
static void stop_this_cpu(void *dummy)
{