提交 eeec4fad 编写于 作者: R Rusty Russell

stop_machine(): stop_machine_run() changed to use cpu mask

Instead of a "cpu" arg with magic values NR_CPUS (any cpu) and ~0 (all
cpus), pass a cpumask_t.  Allow NULL for the common case (where we
don't care which CPU the function is run on): temporary cpumask_t's
are usually considered bad for stack space.

This deprecates stop_machine_run, to be removed soon when all the
callers are dead.
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
上级 04321587
......@@ -5,19 +5,19 @@
(and more). So the "read" side to such a lock is anything which
diables preeempt. */
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <asm/system.h>
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
/* Deprecated, but useful for transition. */
#define ALL_CPUS ~0U
/**
* stop_machine_run: freeze the machine on all CPUs and run this function
* stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr for the @fn()
* @cpu: if @cpu == n, run @fn() on cpu n
* if @cpu == NR_CPUS, run @fn() on any cpu
* if @cpu == ALL_CPUS, run @fn() on every online CPU.
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
*
* Description: This causes a thread to be scheduled on every cpu,
* each of which disables interrupts. The result is that noone is
......@@ -26,22 +26,22 @@
*
* This can be thought of as a very heavy write lock, equivalent to
* grabbing every spinlock in the kernel. */
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu);
int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);
/**
* __stop_machine_run: freeze the machine on all CPUs and run this function
* __stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr for the @fn
* @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS.
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
*
* Description: This is a special version of the above, which assumes cpus
* won't come or go while it's being called. Used by hotplug cpu.
*/
int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu);
int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);
#else
static inline int stop_machine_run(int (*fn)(void *), void *data,
unsigned int cpu)
static inline int stop_machine(int (*fn)(void *), void *data,
const cpumask_t *cpus)
{
int ret;
local_irq_disable();
......@@ -50,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data,
return ret;
}
#endif /* CONFIG_SMP */
static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data,
unsigned int cpu)
{
/* If they don't care which cpu fn runs on, just pick one. */
if (cpu == NR_CPUS)
return stop_machine(fn, data, NULL);
else if (cpu == ~0U)
return stop_machine(fn, data, &cpu_possible_map);
else {
cpumask_t cpus = cpumask_of_cpu(cpu);
return stop_machine(fn, data, &cpus);
}
}
#endif /* _LINUX_STOP_MACHINE */
......@@ -248,8 +248,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
cpus_setall(tmp);
cpu_clear(cpu, tmp);
set_cpus_allowed_ptr(current, &tmp);
tmp = cpumask_of_cpu(cpu);
err = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
......
......@@ -100,7 +100,7 @@ static int chill(void *unused)
return 0;
}
int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
{
int i, err;
struct stop_machine_data active, idle;
......@@ -112,10 +112,6 @@ int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
idle.fn = chill;
idle.data = NULL;
/* If they don't care which cpu fn runs on, just pick one. */
if (cpu == NR_CPUS)
cpu = any_online_cpu(cpu_online_map);
/* This could be too big for stack on large machines. */
threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
if (!threads)
......@@ -128,13 +124,16 @@ int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
set_state(STOPMACHINE_PREPARE);
for_each_online_cpu(i) {
struct stop_machine_data *smdata;
struct stop_machine_data *smdata = &idle;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
if (cpu == ALL_CPUS || i == cpu)
smdata = &active;
else
smdata = &idle;
if (!cpus) {
if (i == first_cpu(cpu_online_map))
smdata = &active;
} else {
if (cpu_isset(i, *cpus))
smdata = &active;
}
threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
i);
......@@ -154,7 +153,7 @@ int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
/* We've created all the threads. Wake them all: hold this CPU so one
* doesn't hit this CPU until we're ready. */
cpu = get_cpu();
get_cpu();
for_each_online_cpu(i)
wake_up_process(threads[i]);
......@@ -177,15 +176,15 @@ int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
return err;
}
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
{
int ret;
/* No CPUs can come up or down during this. */
get_online_cpus();
ret = __stop_machine_run(fn, data, cpu);
ret = __stop_machine(fn, data, cpus);
put_online_cpus();
return ret;
}
EXPORT_SYMBOL_GPL(stop_machine_run);
EXPORT_SYMBOL_GPL(stop_machine);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册