提交 79f55997 编写于 作者: L Li Zefan 提交者: Rusty Russell

cpumask: use zalloc_cpumask_var() where possible

Remove open-coded zalloc_cpumask_var() and zalloc_cpumask_var_node().
Signed-off-by: NLi Zefan <lizf@cn.fujitsu.com>
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
上级 a724eada
...@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node) ...@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
if (cfg) { if (cfg) {
if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
kfree(cfg); kfree(cfg);
cfg = NULL; cfg = NULL;
} else if (!alloc_cpumask_var_node(&cfg->old_domain, } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
GFP_ATOMIC, node)) { GFP_ATOMIC, node)) {
free_cpumask_var(cfg->domain); free_cpumask_var(cfg->domain);
kfree(cfg); kfree(cfg);
cfg = NULL; cfg = NULL;
} else {
cpumask_clear(cfg->domain);
cpumask_clear(cfg->old_domain);
} }
} }
......
...@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) ...@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
void __init init_c1e_mask(void) void __init init_c1e_mask(void)
{ {
/* If we're using c1e_idle, we need to allocate c1e_mask. */ /* If we're using c1e_idle, we need to allocate c1e_mask. */
if (pm_idle == c1e_idle) { if (pm_idle == c1e_idle)
alloc_cpumask_var(&c1e_mask, GFP_KERNEL); zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
cpumask_clear(c1e_mask);
}
} }
static int __init idle_setup(char *str) static int __init idle_setup(char *str)
......
...@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
#endif #endif
current_thread_info()->cpu = 0; /* needed? */ current_thread_info()->cpu = 0; /* needed? */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
cpumask_clear(per_cpu(cpu_core_map, i));
cpumask_clear(per_cpu(cpu_sibling_map, i));
cpumask_clear(cpu_data(i).llc_shared_map);
} }
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
......
...@@ -511,7 +511,7 @@ int acpi_processor_preregister_performance( ...@@ -511,7 +511,7 @@ int acpi_processor_preregister_performance(
struct acpi_processor *match_pr; struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain; struct acpi_psd_package *match_pdomain;
if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
mutex_lock(&performance_mutex); mutex_lock(&performance_mutex);
...@@ -558,7 +558,6 @@ int acpi_processor_preregister_performance( ...@@ -558,7 +558,6 @@ int acpi_processor_preregister_performance(
* Now that we have _PSD data from all CPUs, lets setup P-state * Now that we have _PSD data from all CPUs, lets setup P-state
* domain info. * domain info.
*/ */
cpumask_clear(covered_cpus);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
pr = per_cpu(processors, i); pr = per_cpu(processors, i);
if (!pr) if (!pr)
......
...@@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void) ...@@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void)
struct acpi_tsd_package *pdomain, *match_pdomain; struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling; struct acpi_processor_throttling *pthrottling, *match_pthrottling;
if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
/* /*
...@@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void) ...@@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void)
if (retval) if (retval)
goto err_ret; goto err_ret;
cpumask_clear(covered_cpus);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
pr = per_cpu(processors, i); pr = per_cpu(processors, i);
if (!pr) if (!pr)
......
...@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void) ...@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
int count; int count;
int cpu; int cpu;
if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) { if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n"); "sfc: RSS disabled due to allocation failure\n");
return 1; return 1;
} }
cpumask_clear(core_mask);
count = 0; count = 0;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, core_mask)) { if (!cpumask_test_cpu(cpu, core_mask)) {
......
...@@ -154,9 +154,8 @@ int sync_start(void) ...@@ -154,9 +154,8 @@ int sync_start(void)
{ {
int err; int err;
if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
cpumask_clear(marked_cpus);
start_cpu_work(); start_cpu_work();
......
...@@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file) ...@@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
if (current_trace) if (current_trace)
*iter->trace = *current_trace; *iter->trace = *current_trace;
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail; goto fail;
cpumask_clear(iter->started);
if (current_trace && current_trace->print_max) if (current_trace && current_trace->print_max)
iter->tr = &max_tr; iter->tr = &max_tr;
else else
...@@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void) ...@@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask; goto out_free_buffer_mask;
if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask; goto out_free_tracing_cpumask;
/* To save memory, keep the ring buffer size to its minimum */ /* To save memory, keep the ring buffer size to its minimum */
...@@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void) ...@@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask); cpumask_copy(tracing_cpumask, cpu_all_mask);
cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */ /* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(ring_buf_size, global_trace.buffer = ring_buffer_alloc(ring_buf_size,
......
...@@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) ...@@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
bool called = true; bool called = true;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) zalloc_cpumask_var(&cpus, GFP_ATOMIC);
cpumask_clear(cpus);
spin_lock(&kvm->requests_lock); spin_lock(&kvm->requests_lock);
me = smp_processor_id(); me = smp_processor_id();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册