提交 645fcc9d 编写于 作者: L Li Zefan 提交者: Linus Torvalds

cpuset: don't allocate trial cpuset on stack

Impact: cleanups, reduce stack usage

This patch prepares for the next patch.  When we convert
cpuset.cpus_allowed to cpumask_var_t, (trialcs = *cs) no longer works.

Another result of this patch is reducing stack usage of trialcs.
sizeof(*cs) can be as large as 148 bytes on x86_64, so it's really not
good to have it on stack.
Signed-off-by: NLi Zefan <lizf@cn.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: NMike Travis <travis@sgi.com>
Cc: Paul Menage <menage@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 2341d1b6
...@@ -415,6 +415,24 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) ...@@ -415,6 +415,24 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
is_mem_exclusive(p) <= is_mem_exclusive(q); is_mem_exclusive(p) <= is_mem_exclusive(q);
} }
/**
* alloc_trial_cpuset - allocate a trial cpuset
* @cs: the cpuset that the trial cpuset duplicates
*/
static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
{
return kmemdup(cs, sizeof(*cs), GFP_KERNEL);
}
/**
* free_trial_cpuset - free the trial cpuset
* @trial: the trial cpuset to be freed
*/
static void free_trial_cpuset(struct cpuset *trial)
{
kfree(trial);
}
/* /*
* validate_change() - Used to validate that any proposed cpuset change * validate_change() - Used to validate that any proposed cpuset change
* follows the structural rules for cpusets. * follows the structural rules for cpusets.
...@@ -880,10 +898,10 @@ static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) ...@@ -880,10 +898,10 @@ static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
* @cs: the cpuset to consider * @cs: the cpuset to consider
* @buf: buffer of cpu numbers written to this cpuset * @buf: buffer of cpu numbers written to this cpuset
*/ */
static int update_cpumask(struct cpuset *cs, const char *buf) static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{ {
struct ptr_heap heap; struct ptr_heap heap;
struct cpuset trialcs;
int retval; int retval;
int is_load_balanced; int is_load_balanced;
...@@ -891,8 +909,6 @@ static int update_cpumask(struct cpuset *cs, const char *buf) ...@@ -891,8 +909,6 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
if (cs == &top_cpuset) if (cs == &top_cpuset)
return -EACCES; return -EACCES;
trialcs = *cs;
/* /*
* An empty cpus_allowed is ok only if the cpuset has no tasks. * An empty cpus_allowed is ok only if the cpuset has no tasks.
* Since cpulist_parse() fails on an empty mask, we special case * Since cpulist_parse() fails on an empty mask, we special case
...@@ -900,31 +916,31 @@ static int update_cpumask(struct cpuset *cs, const char *buf) ...@@ -900,31 +916,31 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
* with tasks have cpus. * with tasks have cpus.
*/ */
if (!*buf) { if (!*buf) {
cpus_clear(trialcs.cpus_allowed); cpus_clear(trialcs->cpus_allowed);
} else { } else {
retval = cpulist_parse(buf, &trialcs.cpus_allowed); retval = cpulist_parse(buf, &trialcs->cpus_allowed);
if (retval < 0) if (retval < 0)
return retval; return retval;
if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map)) if (!cpus_subset(trialcs->cpus_allowed, cpu_online_map))
return -EINVAL; return -EINVAL;
} }
retval = validate_change(cs, &trialcs); retval = validate_change(cs, trialcs);
if (retval < 0) if (retval < 0)
return retval; return retval;
/* Nothing to do if the cpus didn't change */ /* Nothing to do if the cpus didn't change */
if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) if (cpus_equal(cs->cpus_allowed, trialcs->cpus_allowed))
return 0; return 0;
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
if (retval) if (retval)
return retval; return retval;
is_load_balanced = is_sched_load_balance(&trialcs); is_load_balanced = is_sched_load_balance(trialcs);
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
cs->cpus_allowed = trialcs.cpus_allowed; cs->cpus_allowed = trialcs->cpus_allowed;
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
/* /*
...@@ -1099,9 +1115,9 @@ static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem) ...@@ -1099,9 +1115,9 @@ static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
* lock each such tasks mm->mmap_sem, scan its vma's and rebind * lock each such tasks mm->mmap_sem, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed. * their mempolicies to the cpusets new mems_allowed.
*/ */
static int update_nodemask(struct cpuset *cs, const char *buf) static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{ {
struct cpuset trialcs;
nodemask_t oldmem; nodemask_t oldmem;
int retval; int retval;
...@@ -1112,8 +1128,6 @@ static int update_nodemask(struct cpuset *cs, const char *buf) ...@@ -1112,8 +1128,6 @@ static int update_nodemask(struct cpuset *cs, const char *buf)
if (cs == &top_cpuset) if (cs == &top_cpuset)
return -EACCES; return -EACCES;
trialcs = *cs;
/* /*
* An empty mems_allowed is ok iff there are no tasks in the cpuset. * An empty mems_allowed is ok iff there are no tasks in the cpuset.
* Since nodelist_parse() fails on an empty mask, we special case * Since nodelist_parse() fails on an empty mask, we special case
...@@ -1121,27 +1135,27 @@ static int update_nodemask(struct cpuset *cs, const char *buf) ...@@ -1121,27 +1135,27 @@ static int update_nodemask(struct cpuset *cs, const char *buf)
* with tasks have memory. * with tasks have memory.
*/ */
if (!*buf) { if (!*buf) {
nodes_clear(trialcs.mems_allowed); nodes_clear(trialcs->mems_allowed);
} else { } else {
retval = nodelist_parse(buf, trialcs.mems_allowed); retval = nodelist_parse(buf, trialcs->mems_allowed);
if (retval < 0) if (retval < 0)
goto done; goto done;
if (!nodes_subset(trialcs.mems_allowed, if (!nodes_subset(trialcs->mems_allowed,
node_states[N_HIGH_MEMORY])) node_states[N_HIGH_MEMORY]))
return -EINVAL; return -EINVAL;
} }
oldmem = cs->mems_allowed; oldmem = cs->mems_allowed;
if (nodes_equal(oldmem, trialcs.mems_allowed)) { if (nodes_equal(oldmem, trialcs->mems_allowed)) {
retval = 0; /* Too easy - nothing to do */ retval = 0; /* Too easy - nothing to do */
goto done; goto done;
} }
retval = validate_change(cs, &trialcs); retval = validate_change(cs, trialcs);
if (retval < 0) if (retval < 0)
goto done; goto done;
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs.mems_allowed; cs->mems_allowed = trialcs->mems_allowed;
cs->mems_generation = cpuset_mems_generation++; cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
...@@ -1181,31 +1195,36 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) ...@@ -1181,31 +1195,36 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on) int turning_on)
{ {
struct cpuset trialcs; struct cpuset *trialcs;
int err; int err;
int balance_flag_changed; int balance_flag_changed;
trialcs = *cs; trialcs = alloc_trial_cpuset(cs);
if (!trialcs)
return -ENOMEM;
if (turning_on) if (turning_on)
set_bit(bit, &trialcs.flags); set_bit(bit, &trialcs->flags);
else else
clear_bit(bit, &trialcs.flags); clear_bit(bit, &trialcs->flags);
err = validate_change(cs, &trialcs); err = validate_change(cs, trialcs);
if (err < 0) if (err < 0)
return err; goto out;
balance_flag_changed = (is_sched_load_balance(cs) != balance_flag_changed = (is_sched_load_balance(cs) !=
is_sched_load_balance(&trialcs)); is_sched_load_balance(trialcs));
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
cs->flags = trialcs.flags; cs->flags = trialcs->flags;
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
if (!cpus_empty(trialcs.cpus_allowed) && balance_flag_changed) if (!cpus_empty(trialcs->cpus_allowed) && balance_flag_changed)
async_rebuild_sched_domains(); async_rebuild_sched_domains();
return 0; out:
free_trial_cpuset(trialcs);
return err;
} }
/* /*
...@@ -1453,21 +1472,29 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, ...@@ -1453,21 +1472,29 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
const char *buf) const char *buf)
{ {
int retval = 0; int retval = 0;
struct cpuset *cs = cgroup_cs(cgrp);
struct cpuset *trialcs;
if (!cgroup_lock_live_group(cgrp)) if (!cgroup_lock_live_group(cgrp))
return -ENODEV; return -ENODEV;
trialcs = alloc_trial_cpuset(cs);
if (!trialcs)
return -ENOMEM;
switch (cft->private) { switch (cft->private) {
case FILE_CPULIST: case FILE_CPULIST:
retval = update_cpumask(cgroup_cs(cgrp), buf); retval = update_cpumask(cs, trialcs, buf);
break; break;
case FILE_MEMLIST: case FILE_MEMLIST:
retval = update_nodemask(cgroup_cs(cgrp), buf); retval = update_nodemask(cs, trialcs, buf);
break; break;
default: default:
retval = -EINVAL; retval = -EINVAL;
break; break;
} }
free_trial_cpuset(trialcs);
cgroup_unlock(); cgroup_unlock();
return retval; return retval;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册