提交 65c01184 编写于 作者: M Mike Travis 提交者: Ingo Molnar

cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr

  * This patch replaces the dangerous lvalue version of cpumask_of_cpu
    with new cpumask_of_cpu_ptr macros.  These are patterned after the
    node_to_cpumask_ptr macros.

    In general terms, if there is a cpumask_of_cpu_map[] then a pointer to
    the cpumask_of_cpu_map[cpu] entry is used.  The cpumask_of_cpu_map
    is provided when there is a large NR_CPUS count, reducing
    greatly the amount of code generated and stack space used for
    cpumask_of_cpu().  The pointer to the cpumask_t value is needed for
    calling set_cpus_allowed_ptr() to reduce the amount of stack space
    needed to pass the cpumask_t value.

    If there isn't a cpumask_of_cpu_map[], then a temporary variable is
    declared and filled in with value from cpumask_of_cpu(cpu) as well as
    a pointer variable pointing to this temporary variable.  Afterwards,
    the pointer is used to reference the cpumask value.  The compiler
    will optimize out the extra dereference through the pointer as well
    as the stack space used for the pointer, resulting in identical code.

    A good example of the orthogonal usages is in net/sunrpc/svc.c:

	case SVC_POOL_PERCPU:
	{
		unsigned int cpu = m->pool_to[pidx];
		cpumask_of_cpu_ptr(cpumask, cpu);

		*oldmask = current->cpus_allowed;
		set_cpus_allowed_ptr(current, cpumask);
		return 1;
	}
	case SVC_POOL_PERNODE:
	{
		unsigned int node = m->pool_to[pidx];
		node_to_cpumask_ptr(nodecpumask, node);

		*oldmask = current->cpus_allowed;
		set_cpus_allowed_ptr(current, nodecpumask);
		return 1;
	}
Signed-off-by: NMike Travis <travis@sgi.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 bb2c018b
...@@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr(new_mask, cpu);
int retval; int retval;
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
unsigned int edx_part; unsigned int edx_part;
...@@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
/* Make sure we are running on right CPU */ /* Make sure we are running on right CPU */
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); retval = set_cpus_allowed_ptr(current, new_mask);
if (retval) if (retval)
return -1; return -1;
......
...@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd) ...@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd)
static void drv_write(struct drv_cmd *cmd) static void drv_write(struct drv_cmd *cmd)
{ {
cpumask_t saved_mask = current->cpus_allowed; cpumask_t saved_mask = current->cpus_allowed;
cpumask_of_cpu_ptr_declare(cpu_mask);
unsigned int i; unsigned int i;
for_each_cpu_mask_nr(i, cmd->mask) { for_each_cpu_mask_nr(i, cmd->mask) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); cpumask_of_cpu_ptr_next(cpu_mask, i);
set_cpus_allowed_ptr(current, cpu_mask);
do_drv_write(cmd); do_drv_write(cmd);
} }
...@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu) ...@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu)
} aperf_cur, mperf_cur; } aperf_cur, mperf_cur;
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr(cpu_mask, cpu);
unsigned int perf_percent; unsigned int perf_percent;
unsigned int retval; unsigned int retval;
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, cpu_mask);
if (get_cpu() != cpu) { if (get_cpu() != cpu) {
/* We were not able to run on requested processor */ /* We were not able to run on requested processor */
put_cpu(); put_cpu();
...@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu) ...@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
static unsigned int get_cur_freq_on_cpu(unsigned int cpu) static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{ {
cpumask_of_cpu_ptr(cpu_mask, cpu);
struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
unsigned int freq; unsigned int freq;
unsigned int cached_freq; unsigned int cached_freq;
...@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) ...@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
} }
cached_freq = data->freq_table[data->acpi_data->state].frequency; cached_freq = data->freq_table[data->acpi_data->state].frequency;
freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); freq = extract_freq(get_cur_val(cpu_mask), data);
if (freq != cached_freq) { if (freq != cached_freq) {
/* /*
* The dreaded BIOS frequency change behind our back. * The dreaded BIOS frequency change behind our back.
......
...@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi ...@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
static int check_supported_cpu(unsigned int cpu) static int check_supported_cpu(unsigned int cpu)
{ {
cpumask_t oldmask; cpumask_t oldmask;
cpumask_of_cpu_ptr(cpu_mask, cpu);
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
unsigned int rc = 0; unsigned int rc = 0;
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, cpu_mask);
if (smp_processor_id() != cpu) { if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
...@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i ...@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
{ {
cpumask_t oldmask; cpumask_t oldmask;
cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid; u32 checkfid;
u32 checkvid; u32 checkvid;
...@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi ...@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
/* only run on specific CPU from here on */ /* only run on specific CPU from here on */
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); set_cpus_allowed_ptr(current, cpu_mask);
if (smp_processor_id() != pol->cpu) { if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
...@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{ {
struct powernow_k8_data *data; struct powernow_k8_data *data;
cpumask_t oldmask; cpumask_t oldmask;
cpumask_of_cpu_ptr_declare(newmask);
int rc; int rc;
if (!cpu_online(pol->cpu)) if (!cpu_online(pol->cpu))
...@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
/* only run on specific CPU from here on */ /* only run on specific CPU from here on */
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); cpumask_of_cpu_ptr_next(newmask, pol->cpu);
set_cpus_allowed_ptr(current, newmask);
if (smp_processor_id() != pol->cpu) { if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
...@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
set_cpus_allowed_ptr(current, &oldmask); set_cpus_allowed_ptr(current, &oldmask);
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
pol->cpus = cpumask_of_cpu(pol->cpu); pol->cpus = *newmask;
else else
pol->cpus = per_cpu(cpu_core_map, pol->cpu); pol->cpus = per_cpu(cpu_core_map, pol->cpu);
data->available_cores = &(pol->cpus); data->available_cores = &(pol->cpus);
...@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu) ...@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
{ {
struct powernow_k8_data *data; struct powernow_k8_data *data;
cpumask_t oldmask = current->cpus_allowed; cpumask_t oldmask = current->cpus_allowed;
cpumask_of_cpu_ptr(newmask, cpu);
unsigned int khz = 0; unsigned int khz = 0;
unsigned int first; unsigned int first;
...@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu) ...@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
if (!data) if (!data)
return -EINVAL; return -EINVAL;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, newmask);
if (smp_processor_id() != cpu) { if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX printk(KERN_ERR PFX
"limiting to CPU %d failed in powernowk8_get\n", cpu); "limiting to CPU %d failed in powernowk8_get\n", cpu);
......
...@@ -313,9 +313,10 @@ static unsigned int get_cur_freq(unsigned int cpu) ...@@ -313,9 +313,10 @@ static unsigned int get_cur_freq(unsigned int cpu)
unsigned l, h; unsigned l, h;
unsigned clock_freq; unsigned clock_freq;
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr(new_mask, cpu);
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, new_mask);
if (smp_processor_id() != cpu) if (smp_processor_id() != cpu)
return 0; return 0;
...@@ -554,9 +555,11 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -554,9 +555,11 @@ static int centrino_target (struct cpufreq_policy *policy,
*/ */
if (!cpus_empty(covered_cpus)) { if (!cpus_empty(covered_cpus)) {
cpumask_of_cpu_ptr_declare(new_mask);
for_each_cpu_mask_nr(j, covered_cpus) { for_each_cpu_mask_nr(j, covered_cpus) {
set_cpus_allowed_ptr(current, cpumask_of_cpu_ptr_next(new_mask, j);
&cpumask_of_cpu(j)); set_cpus_allowed_ptr(current, new_mask);
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
} }
} }
......
...@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) ...@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
static unsigned int speedstep_get(unsigned int cpu) static unsigned int speedstep_get(unsigned int cpu)
{ {
return _speedstep_get(&cpumask_of_cpu(cpu)); cpumask_of_cpu_ptr(newmask, cpu);
return _speedstep_get(newmask);
} }
/** /**
......
...@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
unsigned long j; unsigned long j;
int retval; int retval;
cpumask_t oldmask; cpumask_t oldmask;
cpumask_of_cpu_ptr(newmask, cpu);
if (num_cache_leaves == 0) if (num_cache_leaves == 0)
return -ENOENT; return -ENOENT;
...@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
return -ENOMEM; return -ENOMEM;
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); retval = set_cpus_allowed_ptr(current, newmask);
if (retval) if (retval)
goto out; goto out;
......
...@@ -388,6 +388,7 @@ static int do_microcode_update (void) ...@@ -388,6 +388,7 @@ static int do_microcode_update (void)
void *new_mc = NULL; void *new_mc = NULL;
int cpu; int cpu;
cpumask_t old; cpumask_t old;
cpumask_of_cpu_ptr_declare(newmask);
old = current->cpus_allowed; old = current->cpus_allowed;
...@@ -404,7 +405,8 @@ static int do_microcode_update (void) ...@@ -404,7 +405,8 @@ static int do_microcode_update (void)
if (!uci->valid) if (!uci->valid)
continue; continue;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); cpumask_of_cpu_ptr_next(newmask, cpu);
set_cpus_allowed_ptr(current, newmask);
error = get_maching_microcode(new_mc, cpu); error = get_maching_microcode(new_mc, cpu);
if (error < 0) if (error < 0)
goto out; goto out;
...@@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu) ...@@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu)
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpumask_t old; cpumask_t old;
cpumask_of_cpu_ptr(newmask, cpu);
unsigned int val[2]; unsigned int val[2];
int err = 0; int err = 0;
...@@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu) ...@@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu)
return 0; return 0;
old = current->cpus_allowed; old = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, newmask);
/* Check if the microcode we have in memory matches the CPU */ /* Check if the microcode we have in memory matches the CPU */
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
...@@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu) ...@@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu)
static void microcode_init_cpu(int cpu, int resume) static void microcode_init_cpu(int cpu, int resume)
{ {
cpumask_t old; cpumask_t old;
cpumask_of_cpu_ptr(newmask, cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
old = current->cpus_allowed; old = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, newmask);
mutex_lock(&microcode_mutex); mutex_lock(&microcode_mutex);
collect_cpu_info(cpu); collect_cpu_info(cpu);
if (uci->valid && system_state == SYSTEM_RUNNING && !resume) if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
...@@ -656,11 +660,12 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) ...@@ -656,11 +660,12 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
return -EINVAL; return -EINVAL;
if (val == 1) { if (val == 1) {
cpumask_t old; cpumask_t old;
cpumask_of_cpu_ptr(newmask, cpu);
old = current->cpus_allowed; old = current->cpus_allowed;
get_online_cpus(); get_online_cpus();
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, newmask);
mutex_lock(&microcode_mutex); mutex_lock(&microcode_mutex);
if (uci->valid) if (uci->valid)
......
...@@ -403,24 +403,28 @@ void native_machine_shutdown(void) ...@@ -403,24 +403,28 @@ void native_machine_shutdown(void)
{ {
/* Stop the cpus and apics */ /* Stop the cpus and apics */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int reboot_cpu_id;
/* The boot cpu is always logical cpu 0 */ /* The boot cpu is always logical cpu 0 */
reboot_cpu_id = 0; int reboot_cpu_id = 0;
cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* See if there has been given a command line override */ /* See if there has been given a command line override */
if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
cpu_online(reboot_cpu)) cpu_online(reboot_cpu)) {
reboot_cpu_id = reboot_cpu; reboot_cpu_id = reboot_cpu;
cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
}
#endif #endif
/* Make certain the cpu I'm about to reboot on is online */ /* Make certain the cpu I'm about to reboot on is online */
if (!cpu_online(reboot_cpu_id)) if (!cpu_online(reboot_cpu_id)) {
reboot_cpu_id = smp_processor_id(); reboot_cpu_id = smp_processor_id();
cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
}
/* Make certain I only run on the appropriate processor */ /* Make certain I only run on the appropriate processor */
set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); set_cpus_allowed_ptr(current, newmask);
/* O.K Now that I'm on the appropriate processor, /* O.K Now that I'm on the appropriate processor,
* stop all of the others. * stop all of the others.
......
...@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) ...@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
static int acpi_processor_get_throttling(struct acpi_processor *pr) static int acpi_processor_get_throttling(struct acpi_processor *pr)
{ {
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr_declare(new_mask);
int ret; int ret;
if (!pr) if (!pr)
...@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) ...@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
* Migrate task to the cpu pointed by pr. * Migrate task to the cpu pointed by pr.
*/ */
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); cpumask_of_cpu_ptr_next(new_mask, pr->id);
set_cpus_allowed_ptr(current, new_mask);
ret = pr->throttling.acpi_processor_get_throttling(pr); ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */ /* restore the previous state */
set_cpus_allowed_ptr(current, &saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
...@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, ...@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state) int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{ {
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr_declare(new_mask);
int ret = 0; int ret = 0;
unsigned int i; unsigned int i;
struct acpi_processor *match_pr; struct acpi_processor *match_pr;
...@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr. * it can be called only for the cpu pointed by pr.
*/ */
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); cpumask_of_cpu_ptr_next(new_mask, pr->id);
set_cpus_allowed_ptr(current, new_mask);
ret = p_throttling->acpi_processor_set_throttling(pr, ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state); t_state.target_state);
} else { } else {
...@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue; continue;
} }
t_state.cpu = i; t_state.cpu = i;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); cpumask_of_cpu_ptr_next(new_mask, i);
set_cpus_allowed_ptr(current, new_mask);
ret = match_pr->throttling. ret = match_pr->throttling.
acpi_processor_set_throttling( acpi_processor_set_throttling(
match_pr, t_state.target_state); match_pr, t_state.target_state);
......
...@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, ...@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
static int smi_request(struct smi_cmd *smi_cmd) static int smi_request(struct smi_cmd *smi_cmd)
{ {
cpumask_t old_mask; cpumask_t old_mask;
cpumask_of_cpu_ptr(new_mask, 0);
int ret = 0; int ret = 0;
if (smi_cmd->magic != SMI_CMD_MAGIC) { if (smi_cmd->magic != SMI_CMD_MAGIC) {
...@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd) ...@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
/* SMI requires CPU 0 */ /* SMI requires CPU 0 */
old_mask = current->cpus_allowed; old_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); set_cpus_allowed_ptr(current, new_mask);
if (smp_processor_id() != 0) { if (smp_processor_id() != 0) {
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
__func__); __func__);
......
...@@ -62,6 +62,15 @@ ...@@ -62,6 +62,15 @@
* int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
* *
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
*ifdef CONFIG_HAS_CPUMASK_OF_CPU
* cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
* cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
* cpumask_of_cpu_ptr(v, cpu) Combines above two operations
*else
* cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
* cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
* cpumask_of_cpu_ptr(v, cpu) Combines above two operations
*endif
* CPU_MASK_ALL Initializer - all bits set * CPU_MASK_ALL Initializer - all bits set
* CPU_MASK_NONE Initializer - no bits set * CPU_MASK_NONE Initializer - no bits set
* unsigned long *cpus_addr(mask) Array of unsigned long's in mask * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
...@@ -236,11 +245,16 @@ static inline void __cpus_shift_left(cpumask_t *dstp, ...@@ -236,11 +245,16 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
extern cpumask_t *cpumask_of_cpu_map; extern cpumask_t *cpumask_of_cpu_map;
#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
#define cpumask_of_cpu_ptr(v, cpu) \
const cpumask_t *v = &cpumask_of_cpu(cpu)
#define cpumask_of_cpu_ptr_declare(v) \
const cpumask_t *v
#define cpumask_of_cpu_ptr_next(v, cpu) \
v = &cpumask_of_cpu(cpu)
#else #else
#define cpumask_of_cpu(cpu) \ #define cpumask_of_cpu(cpu) \
(*({ \ ({ \
typeof(_unused_cpumask_arg_) m; \ typeof(_unused_cpumask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \ if (sizeof(m) == sizeof(unsigned long)) { \
m.bits[0] = 1UL<<(cpu); \ m.bits[0] = 1UL<<(cpu); \
...@@ -248,8 +262,16 @@ extern cpumask_t *cpumask_of_cpu_map; ...@@ -248,8 +262,16 @@ extern cpumask_t *cpumask_of_cpu_map;
cpus_clear(m); \ cpus_clear(m); \
cpu_set((cpu), m); \ cpu_set((cpu), m); \
} \ } \
&m; \ m; \
})) })
#define cpumask_of_cpu_ptr(v, cpu) \
cpumask_t _##v = cpumask_of_cpu(cpu); \
const cpumask_t *v = &_##v
#define cpumask_of_cpu_ptr_declare(v) \
cpumask_t _##v; \
const cpumask_t *v = &_##v
#define cpumask_of_cpu_ptr_next(v, cpu) \
_##v = cpumask_of_cpu(cpu)
#endif #endif
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
......
...@@ -33,8 +33,9 @@ static int stopmachine(void *cpu) ...@@ -33,8 +33,9 @@ static int stopmachine(void *cpu)
{ {
int irqs_disabled = 0; int irqs_disabled = 0;
int prepared = 0; int prepared = 0;
cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); set_cpus_allowed_ptr(current, cpumask);
/* Ack: we are alive */ /* Ack: we are alive */
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
......
...@@ -213,7 +213,9 @@ static void start_stack_timers(void) ...@@ -213,7 +213,9 @@ static void start_stack_timers(void)
int cpu; int cpu;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); cpumask_of_cpu_ptr(new_mask, cpu);
set_cpus_allowed_ptr(current, new_mask);
start_stack_timer(cpu); start_stack_timer(cpu);
} }
set_cpus_allowed_ptr(current, &saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
......
...@@ -314,9 +314,10 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) ...@@ -314,9 +314,10 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
case SVC_POOL_PERCPU: case SVC_POOL_PERCPU:
{ {
unsigned int cpu = m->pool_to[pidx]; unsigned int cpu = m->pool_to[pidx];
cpumask_of_cpu_ptr(cpumask, cpu);
*oldmask = current->cpus_allowed; *oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, cpumask);
return 1; return 1;
} }
case SVC_POOL_PERNODE: case SVC_POOL_PERNODE:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册