提交 cede86ac 编写于 作者: L Li Zefan 提交者: Linus Torvalds

memcg: clean up checking of the disabled flag

Those checks are unnecessary, because when the subsystem is disabled
it can't be mounted, so those functions won't get called.

The check is needed in functions which will be called in other places
except cgroup.

[hugh@veritas.com: further checking of disabled flag]
Signed-off-by: NLi Zefan <lizf@cn.fujitsu.com>
Acked-by: NBalbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: NHugh Dickins <hugh@veritas.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 accf163e
...@@ -354,6 +354,9 @@ void mem_cgroup_move_lists(struct page *page, bool active) ...@@ -354,6 +354,9 @@ void mem_cgroup_move_lists(struct page *page, bool active)
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
unsigned long flags; unsigned long flags;
if (mem_cgroup_subsys.disabled)
return;
/* /*
* We cannot lock_page_cgroup while holding zone's lru_lock, * We cannot lock_page_cgroup while holding zone's lru_lock,
* because other holders of lock_page_cgroup can be interrupted * because other holders of lock_page_cgroup can be interrupted
...@@ -533,9 +536,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -533,9 +536,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
if (mem_cgroup_subsys.disabled)
return 0;
pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask); pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
if (unlikely(pc == NULL)) if (unlikely(pc == NULL))
goto err; goto err;
...@@ -620,6 +620,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -620,6 +620,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
{ {
if (mem_cgroup_subsys.disabled)
return 0;
/* /*
* If already mapped, we don't have to account. * If already mapped, we don't have to account.
* If page cache, page->mapping has address_space. * If page cache, page->mapping has address_space.
...@@ -638,6 +641,9 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) ...@@ -638,6 +641,9 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
if (mem_cgroup_subsys.disabled)
return 0;
/* /*
* Corner case handling. This is called from add_to_page_cache() * Corner case handling. This is called from add_to_page_cache()
* in usual. But some FS (shmem) precharges this page before calling it * in usual. But some FS (shmem) precharges this page before calling it
...@@ -788,6 +794,9 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) ...@@ -788,6 +794,9 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
int progress = 0; int progress = 0;
int retry = MEM_CGROUP_RECLAIM_RETRIES; int retry = MEM_CGROUP_RECLAIM_RETRIES;
if (mem_cgroup_subsys.disabled)
return 0;
rcu_read_lock(); rcu_read_lock();
mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
css_get(&mem->css); css_get(&mem->css);
...@@ -857,9 +866,6 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem) ...@@ -857,9 +866,6 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
int ret = -EBUSY; int ret = -EBUSY;
int node, zid; int node, zid;
if (mem_cgroup_subsys.disabled)
return 0;
css_get(&mem->css); css_get(&mem->css);
/* /*
* page reclaim code (kswapd etc..) will move pages between * page reclaim code (kswapd etc..) will move pages between
...@@ -1103,8 +1109,6 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss, ...@@ -1103,8 +1109,6 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
static int mem_cgroup_populate(struct cgroup_subsys *ss, static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont) struct cgroup *cont)
{ {
if (mem_cgroup_subsys.disabled)
return 0;
return cgroup_add_files(cont, ss, mem_cgroup_files, return cgroup_add_files(cont, ss, mem_cgroup_files,
ARRAY_SIZE(mem_cgroup_files)); ARRAY_SIZE(mem_cgroup_files));
} }
...@@ -1117,9 +1121,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, ...@@ -1117,9 +1121,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct mm_struct *mm; struct mm_struct *mm;
struct mem_cgroup *mem, *old_mem; struct mem_cgroup *mem, *old_mem;
if (mem_cgroup_subsys.disabled)
return;
mm = get_task_mm(p); mm = get_task_mm(p);
if (mm == NULL) if (mm == NULL)
return; return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册