提交 700fe1ab 编写于 作者: P Paul Menage 提交者: Linus Torvalds

CGroup API files: update cpusets to use cgroup structured file API

Many of the cpusets control files are simple integer values, which don't
require the overhead of memory allocations for reads and writes.

Move the handlers for these control files into cpuset_read_u64() and
cpuset_write_u64().

[akpm@linux-foundation.org: ad dmissing `break']
Signed-off-by: NPaul Menage <menage@google.com>
Cc: "Li Zefan" <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Paul Jackson <pj@sgi.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: "YAMAMOTO Takashi" <yamamoto@valinux.co.jp>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 b7269dfc
...@@ -1023,19 +1023,6 @@ int current_cpuset_is_being_rebound(void) ...@@ -1023,19 +1023,6 @@ int current_cpuset_is_being_rebound(void)
return task_cs(current) == cpuset_being_rebound; return task_cs(current) == cpuset_being_rebound;
} }
/*
* Call with cgroup_mutex held.
*/
static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
{
if (simple_strtoul(buf, NULL, 10) != 0)
cpuset_memory_pressure_enabled = 1;
else
cpuset_memory_pressure_enabled = 0;
return 0;
}
static int update_relax_domain_level(struct cpuset *cs, char *buf) static int update_relax_domain_level(struct cpuset *cs, char *buf)
{ {
int val = simple_strtol(buf, NULL, 10); int val = simple_strtol(buf, NULL, 10);
...@@ -1063,15 +1050,13 @@ static int update_relax_domain_level(struct cpuset *cs, char *buf) ...@@ -1063,15 +1050,13 @@ static int update_relax_domain_level(struct cpuset *cs, char *buf)
* Call with cgroup_mutex held. * Call with cgroup_mutex held.
*/ */
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on)
{ {
int turning_on;
struct cpuset trialcs; struct cpuset trialcs;
int err; int err;
int cpus_nonempty, balance_flag_changed; int cpus_nonempty, balance_flag_changed;
turning_on = (simple_strtoul(buf, NULL, 10) != 0);
trialcs = *cs; trialcs = *cs;
if (turning_on) if (turning_on)
set_bit(bit, &trialcs.flags); set_bit(bit, &trialcs.flags);
...@@ -1289,46 +1274,68 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont, ...@@ -1289,46 +1274,68 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
case FILE_MEMLIST: case FILE_MEMLIST:
retval = update_nodemask(cs, buffer); retval = update_nodemask(cs, buffer);
break; break;
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
retval = update_relax_domain_level(cs, buffer);
break;
default:
retval = -EINVAL;
goto out2;
}
if (retval == 0)
retval = nbytes;
out2:
cgroup_unlock();
out1:
kfree(buffer);
return retval;
}
static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
int retval = 0;
struct cpuset *cs = cgroup_cs(cgrp);
cpuset_filetype_t type = cft->private;
cgroup_lock();
if (cgroup_is_removed(cgrp)) {
cgroup_unlock();
return -ENODEV;
}
switch (type) {
case FILE_CPU_EXCLUSIVE: case FILE_CPU_EXCLUSIVE:
retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer); retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
break; break;
case FILE_MEM_EXCLUSIVE: case FILE_MEM_EXCLUSIVE:
retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
break; break;
case FILE_SCHED_LOAD_BALANCE: case FILE_SCHED_LOAD_BALANCE:
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer); retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
break;
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
retval = update_relax_domain_level(cs, buffer);
break; break;
case FILE_MEMORY_MIGRATE: case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
break; break;
case FILE_MEMORY_PRESSURE_ENABLED: case FILE_MEMORY_PRESSURE_ENABLED:
retval = update_memory_pressure_enabled(cs, buffer); cpuset_memory_pressure_enabled = !!val;
break; break;
case FILE_MEMORY_PRESSURE: case FILE_MEMORY_PRESSURE:
retval = -EACCES; retval = -EACCES;
break; break;
case FILE_SPREAD_PAGE: case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, buffer); retval = update_flag(CS_SPREAD_PAGE, cs, val);
cs->mems_generation = cpuset_mems_generation++; cs->mems_generation = cpuset_mems_generation++;
break; break;
case FILE_SPREAD_SLAB: case FILE_SPREAD_SLAB:
retval = update_flag(CS_SPREAD_SLAB, cs, buffer); retval = update_flag(CS_SPREAD_SLAB, cs, val);
cs->mems_generation = cpuset_mems_generation++; cs->mems_generation = cpuset_mems_generation++;
break; break;
default: default:
retval = -EINVAL; retval = -EINVAL;
goto out2; break;
} }
if (retval == 0)
retval = nbytes;
out2:
cgroup_unlock(); cgroup_unlock();
out1:
kfree(buffer);
return retval; return retval;
} }
...@@ -1390,33 +1397,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont, ...@@ -1390,33 +1397,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
case FILE_MEMLIST: case FILE_MEMLIST:
s += cpuset_sprintf_memlist(s, cs); s += cpuset_sprintf_memlist(s, cs);
break; break;
case FILE_CPU_EXCLUSIVE:
*s++ = is_cpu_exclusive(cs) ? '1' : '0';
break;
case FILE_MEM_EXCLUSIVE:
*s++ = is_mem_exclusive(cs) ? '1' : '0';
break;
case FILE_SCHED_LOAD_BALANCE:
*s++ = is_sched_load_balance(cs) ? '1' : '0';
break;
case FILE_SCHED_RELAX_DOMAIN_LEVEL: case FILE_SCHED_RELAX_DOMAIN_LEVEL:
s += sprintf(s, "%d", cs->relax_domain_level); s += sprintf(s, "%d", cs->relax_domain_level);
break; break;
case FILE_MEMORY_MIGRATE:
*s++ = is_memory_migrate(cs) ? '1' : '0';
break;
case FILE_MEMORY_PRESSURE_ENABLED:
*s++ = cpuset_memory_pressure_enabled ? '1' : '0';
break;
case FILE_MEMORY_PRESSURE:
s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter));
break;
case FILE_SPREAD_PAGE:
*s++ = is_spread_page(cs) ? '1' : '0';
break;
case FILE_SPREAD_SLAB:
*s++ = is_spread_slab(cs) ? '1' : '0';
break;
default: default:
retval = -EINVAL; retval = -EINVAL;
goto out; goto out;
...@@ -1429,8 +1412,31 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont, ...@@ -1429,8 +1412,31 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
return retval; return retval;
} }
static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
{
struct cpuset *cs = cgroup_cs(cont);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_CPU_EXCLUSIVE:
return is_cpu_exclusive(cs);
case FILE_MEM_EXCLUSIVE:
return is_mem_exclusive(cs);
case FILE_SCHED_LOAD_BALANCE:
return is_sched_load_balance(cs);
case FILE_MEMORY_MIGRATE:
return is_memory_migrate(cs);
case FILE_MEMORY_PRESSURE_ENABLED:
return cpuset_memory_pressure_enabled;
case FILE_MEMORY_PRESSURE:
return fmeter_getrate(&cs->fmeter);
case FILE_SPREAD_PAGE:
return is_spread_page(cs);
case FILE_SPREAD_SLAB:
return is_spread_slab(cs);
default:
BUG();
}
}
/* /*
...@@ -1453,22 +1459,22 @@ static struct cftype cft_mems = { ...@@ -1453,22 +1459,22 @@ static struct cftype cft_mems = {
static struct cftype cft_cpu_exclusive = { static struct cftype cft_cpu_exclusive = {
.name = "cpu_exclusive", .name = "cpu_exclusive",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_CPU_EXCLUSIVE, .private = FILE_CPU_EXCLUSIVE,
}; };
static struct cftype cft_mem_exclusive = { static struct cftype cft_mem_exclusive = {
.name = "mem_exclusive", .name = "mem_exclusive",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_MEM_EXCLUSIVE, .private = FILE_MEM_EXCLUSIVE,
}; };
static struct cftype cft_sched_load_balance = { static struct cftype cft_sched_load_balance = {
.name = "sched_load_balance", .name = "sched_load_balance",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_SCHED_LOAD_BALANCE, .private = FILE_SCHED_LOAD_BALANCE,
}; };
...@@ -1481,36 +1487,36 @@ static struct cftype cft_sched_relax_domain_level = { ...@@ -1481,36 +1487,36 @@ static struct cftype cft_sched_relax_domain_level = {
static struct cftype cft_memory_migrate = { static struct cftype cft_memory_migrate = {
.name = "memory_migrate", .name = "memory_migrate",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_MIGRATE, .private = FILE_MEMORY_MIGRATE,
}; };
static struct cftype cft_memory_pressure_enabled = { static struct cftype cft_memory_pressure_enabled = {
.name = "memory_pressure_enabled", .name = "memory_pressure_enabled",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE_ENABLED, .private = FILE_MEMORY_PRESSURE_ENABLED,
}; };
static struct cftype cft_memory_pressure = { static struct cftype cft_memory_pressure = {
.name = "memory_pressure", .name = "memory_pressure",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE, .private = FILE_MEMORY_PRESSURE,
}; };
static struct cftype cft_spread_page = { static struct cftype cft_spread_page = {
.name = "memory_spread_page", .name = "memory_spread_page",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_PAGE, .private = FILE_SPREAD_PAGE,
}; };
static struct cftype cft_spread_slab = { static struct cftype cft_spread_slab = {
.name = "memory_spread_slab", .name = "memory_spread_slab",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_SLAB, .private = FILE_SPREAD_SLAB,
}; };
...@@ -1643,7 +1649,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) ...@@ -1643,7 +1649,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
cpuset_update_task_memory_state(); cpuset_update_task_memory_state();
if (is_sched_load_balance(cs)) if (is_sched_load_balance(cs))
update_flag(CS_SCHED_LOAD_BALANCE, cs, "0"); update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
number_of_cpusets--; number_of_cpusets--;
kfree(cs); kfree(cs);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册