aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-28 08:37:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-28 08:37:46 -0700
commit37eaf8c7463e53cf1acf025fb566fb6c4573297f (patch)
tree9df7e9e3e7722d9ddf257e19fd8551425d27a292 /include
parent58f250714f2bfa3514798fde8b9d38a15e4a9836 (diff)
parent784e2d76007f90d69341b95967160c4fb7829299 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: stop_machine: fix up ftrace.c stop_machine: Wean existing callers off stop_machine_run() stop_machine(): stop_machine_run() changed to use cpu mask Hotplug CPU: don't check cpu_online after take_cpu_down Simplify stop_machine stop_machine: add ALL_CPUS option module: fix build warning with !CONFIG_KALLSYMS
Diffstat (limited to 'include')
-rw-r--r--include/linux/stop_machine.h50
1 files changed, 33 insertions, 17 deletions
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 5bfc553bdb2..f1cb0ba6d71 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -5,41 +5,43 @@
(and more). So the "read" side to such a lock is anything which
diables preeempt. */
#include <linux/cpu.h>
+#include <linux/cpumask.h>
#include <asm/system.h>
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
+
+/* Deprecated, but useful for transition. */
+#define ALL_CPUS ~0U
+
/**
- * stop_machine_run: freeze the machine on all CPUs and run this function
+ * stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr for the @fn()
- * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS.
+ * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
*
- * Description: This causes a thread to be scheduled on every other cpu,
- * each of which disables interrupts, and finally interrupts are disabled
- * on the current CPU. The result is that noone is holding a spinlock
- * or inside any other preempt-disabled region when @fn() runs.
+ * Description: This causes a thread to be scheduled on every cpu,
+ * each of which disables interrupts. The result is that noone is
+ * holding a spinlock or inside any other preempt-disabled region when
+ * @fn() runs.
*
* This can be thought of as a very heavy write lock, equivalent to
* grabbing every spinlock in the kernel. */
-int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu);
+int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);
/**
- * __stop_machine_run: freeze the machine on all CPUs and run this function
+ * __stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr for the @fn
- * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS.
+ * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
*
- * Description: This is a special version of the above, which returns the
- * thread which has run @fn(): kthread_stop will return the return value
- * of @fn(). Used by hotplug cpu.
+ * Description: This is a special version of the above, which assumes cpus
+ * won't come or go while it's being called. Used by hotplug cpu.
*/
-struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
- unsigned int cpu);
-
+int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);
#else
-static inline int stop_machine_run(int (*fn)(void *), void *data,
- unsigned int cpu)
+static inline int stop_machine(int (*fn)(void *), void *data,
+ const cpumask_t *cpus)
{
int ret;
local_irq_disable();
@@ -48,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data,
return ret;
}
#endif /* CONFIG_SMP */
+
+static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data,
+ unsigned int cpu)
+{
+ /* If they don't care which cpu fn runs on, just pick one. */
+ if (cpu == NR_CPUS)
+ return stop_machine(fn, data, NULL);
+ else if (cpu == ~0U)
+ return stop_machine(fn, data, &cpu_possible_map);
+ else {
+ cpumask_t cpus = cpumask_of_cpu(cpu);
+ return stop_machine(fn, data, &cpus);
+ }
+}
#endif /* _LINUX_STOP_MACHINE */