提交 0999821b 编写于 作者: G Glauber Costa 提交者: Linus Torvalds

memcg: replace cgroup_lock with memcg specific memcg_lock

After the preparation work done in earlier patches, the cgroup_lock can
be trivially replaced with a memcg-specific lock.  This is an automatic
translation at every site where the values involved were queried.

The sites where values are written, however, used to be naturally called
under cgroup_lock.  This is the case for instance in the css_online
callback.  For those, we now need to explicitly add the memcg lock.

With this, all the calls to cgroup_lock outside cgroup core are gone.
Signed-off-by: NGlauber Costa <glommer@parallels.com>
Acked-by: NMichal Hocko <mhocko@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 b5f99b53
...@@ -488,6 +488,13 @@ enum res_type { ...@@ -488,6 +488,13 @@ enum res_type {
#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
/*
* The memcg_create_mutex will be held whenever a new cgroup is created.
* As a consequence, any change that needs to protect against new child cgroups
* appearing has to hold it as well.
*/
static DEFINE_MUTEX(memcg_create_mutex);
static void mem_cgroup_get(struct mem_cgroup *memcg); static void mem_cgroup_get(struct mem_cgroup *memcg);
static void mem_cgroup_put(struct mem_cgroup *memcg); static void mem_cgroup_put(struct mem_cgroup *memcg);
...@@ -4778,8 +4785,8 @@ static inline bool __memcg_has_children(struct mem_cgroup *memcg) ...@@ -4778,8 +4785,8 @@ static inline bool __memcg_has_children(struct mem_cgroup *memcg)
} }
/* /*
* Must be called with cgroup_lock held, unless the cgroup is guaranteed to be * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
* already dead (in mem_cgroup_force_empty(), for instance). This is different * to be already dead (as in mem_cgroup_force_empty, for instance). This is
* from mem_cgroup_count_children(), in the sense that we don't really care how * from mem_cgroup_count_children(), in the sense that we don't really care how
* many children we have; we only need to know if we have any. It also counts * many children we have; we only need to know if we have any. It also counts
* any memcg without hierarchy as infertile. * any memcg without hierarchy as infertile.
...@@ -4859,7 +4866,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, ...@@ -4859,7 +4866,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
if (parent) if (parent)
parent_memcg = mem_cgroup_from_cont(parent); parent_memcg = mem_cgroup_from_cont(parent);
cgroup_lock(); mutex_lock(&memcg_create_mutex);
if (memcg->use_hierarchy == val) if (memcg->use_hierarchy == val)
goto out; goto out;
...@@ -4882,7 +4889,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, ...@@ -4882,7 +4889,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
retval = -EINVAL; retval = -EINVAL;
out: out:
cgroup_unlock(); mutex_unlock(&memcg_create_mutex);
return retval; return retval;
} }
...@@ -4981,14 +4988,8 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) ...@@ -4981,14 +4988,8 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
* *
* After it first became limited, changes in the value of the limit are * After it first became limited, changes in the value of the limit are
* of course permitted. * of course permitted.
*
* Taking the cgroup_lock is really offensive, but it is so far the only
* way to guarantee that no children will appear. There are plenty of
* other offenders, and they should all go away. Fine grained locking
* is probably the way to go here. When we are fully hierarchical, we
* can also get rid of the use_hierarchy check.
*/ */
cgroup_lock(); mutex_lock(&memcg_create_mutex);
mutex_lock(&set_limit_mutex); mutex_lock(&set_limit_mutex);
if (!memcg->kmem_account_flags && val != RESOURCE_MAX) { if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
if (cgroup_task_count(cont) || memcg_has_children(memcg)) { if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
...@@ -5015,7 +5016,7 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) ...@@ -5015,7 +5016,7 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
ret = res_counter_set_limit(&memcg->kmem, val); ret = res_counter_set_limit(&memcg->kmem, val);
out: out:
mutex_unlock(&set_limit_mutex); mutex_unlock(&set_limit_mutex);
cgroup_unlock(); mutex_unlock(&memcg_create_mutex);
/* /*
* We are by now familiar with the fact that we can't inc the static * We are by now familiar with the fact that we can't inc the static
...@@ -5396,17 +5397,17 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, ...@@ -5396,17 +5397,17 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
parent = mem_cgroup_from_cont(cgrp->parent); parent = mem_cgroup_from_cont(cgrp->parent);
cgroup_lock(); mutex_lock(&memcg_create_mutex);
/* If under hierarchy, only empty-root can set this value */ /* If under hierarchy, only empty-root can set this value */
if ((parent->use_hierarchy) || memcg_has_children(memcg)) { if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
cgroup_unlock(); mutex_unlock(&memcg_create_mutex);
return -EINVAL; return -EINVAL;
} }
memcg->swappiness = val; memcg->swappiness = val;
cgroup_unlock(); mutex_unlock(&memcg_create_mutex);
return 0; return 0;
} }
...@@ -5732,16 +5733,16 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, ...@@ -5732,16 +5733,16 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
parent = mem_cgroup_from_cont(cgrp->parent); parent = mem_cgroup_from_cont(cgrp->parent);
cgroup_lock(); mutex_lock(&memcg_create_mutex);
/* oom-kill-disable is a flag for subhierarchy. */ /* oom-kill-disable is a flag for subhierarchy. */
if ((parent->use_hierarchy) || memcg_has_children(memcg)) { if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
cgroup_unlock(); mutex_unlock(&memcg_create_mutex);
return -EINVAL; return -EINVAL;
} }
memcg->oom_kill_disable = val; memcg->oom_kill_disable = val;
if (!val) if (!val)
memcg_oom_recover(memcg); memcg_oom_recover(memcg);
cgroup_unlock(); mutex_unlock(&memcg_create_mutex);
return 0; return 0;
} }
...@@ -6170,6 +6171,7 @@ mem_cgroup_css_online(struct cgroup *cont) ...@@ -6170,6 +6171,7 @@ mem_cgroup_css_online(struct cgroup *cont)
if (!cont->parent) if (!cont->parent)
return 0; return 0;
mutex_lock(&memcg_create_mutex);
memcg = mem_cgroup_from_cont(cont); memcg = mem_cgroup_from_cont(cont);
parent = mem_cgroup_from_cont(cont->parent); parent = mem_cgroup_from_cont(cont->parent);
...@@ -6203,6 +6205,7 @@ mem_cgroup_css_online(struct cgroup *cont) ...@@ -6203,6 +6205,7 @@ mem_cgroup_css_online(struct cgroup *cont)
} }
error = memcg_init_kmem(memcg, &mem_cgroup_subsys); error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
mutex_unlock(&memcg_create_mutex);
if (error) { if (error) {
/* /*
* We call put now because our (and parent's) refcnts * We call put now because our (and parent's) refcnts
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册