提交 18eca2e6 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

mm: memcontrol: remove unnecessary PCG_MEMSW memory+swap charge flag

Now that mem_cgroup_swapout() fully uncharges the page, every page that is
still in use when reaching mem_cgroup_uncharge() is known to carry both
the memory and the memory+swap charge.  Simplify the uncharge path and
remove the PCG_MEMSW page flag accordingly.
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Acked-by: NMichal Hocko <mhocko@suse.cz>
Reviewed-by: NVladimir Davydov <vdavydov@parallels.com>
Acked-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 7bdd143c
...@@ -5,7 +5,6 @@ enum { ...@@ -5,7 +5,6 @@ enum {
/* flags for mem_cgroup */ /* flags for mem_cgroup */
PCG_USED = 0x01, /* This page is charged to a memcg */ PCG_USED = 0x01, /* This page is charged to a memcg */
PCG_MEM = 0x02, /* This page holds a memory charge */ PCG_MEM = 0x02, /* This page holds a memory charge */
PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */
}; };
struct pglist_data; struct pglist_data;
......
...@@ -2614,7 +2614,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, ...@@ -2614,7 +2614,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
* have the page locked * have the page locked
*/ */
pc->mem_cgroup = memcg; pc->mem_cgroup = memcg;
pc->flags = PCG_USED | PCG_MEM | (do_swap_account ? PCG_MEMSW : 0); pc->flags = PCG_USED | PCG_MEM;
if (lrucare) if (lrucare)
unlock_page_lru(page, isolated); unlock_page_lru(page, isolated);
...@@ -5793,7 +5793,6 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) ...@@ -5793,7 +5793,6 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!PageCgroupUsed(pc)) if (!PageCgroupUsed(pc))
return; return;
VM_BUG_ON_PAGE(!(pc->flags & PCG_MEMSW), page);
memcg = pc->mem_cgroup; memcg = pc->mem_cgroup;
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
...@@ -5989,17 +5988,16 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) ...@@ -5989,17 +5988,16 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
} }
static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
unsigned long nr_mem, unsigned long nr_memsw,
unsigned long nr_anon, unsigned long nr_file, unsigned long nr_anon, unsigned long nr_file,
unsigned long nr_huge, struct page *dummy_page) unsigned long nr_huge, struct page *dummy_page)
{ {
unsigned long nr_pages = nr_anon + nr_file;
unsigned long flags; unsigned long flags;
if (!mem_cgroup_is_root(memcg)) { if (!mem_cgroup_is_root(memcg)) {
if (nr_mem) page_counter_uncharge(&memcg->memory, nr_pages);
page_counter_uncharge(&memcg->memory, nr_mem); if (do_swap_account)
if (nr_memsw) page_counter_uncharge(&memcg->memsw, nr_pages);
page_counter_uncharge(&memcg->memsw, nr_memsw);
memcg_oom_recover(memcg); memcg_oom_recover(memcg);
} }
...@@ -6008,23 +6006,21 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, ...@@ -6008,23 +6006,21 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
__this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file); __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
memcg_check_events(memcg, dummy_page); memcg_check_events(memcg, dummy_page);
local_irq_restore(flags); local_irq_restore(flags);
if (!mem_cgroup_is_root(memcg)) if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, max(nr_mem, nr_memsw)); css_put_many(&memcg->css, nr_pages);
} }
static void uncharge_list(struct list_head *page_list) static void uncharge_list(struct list_head *page_list)
{ {
struct mem_cgroup *memcg = NULL; struct mem_cgroup *memcg = NULL;
unsigned long nr_memsw = 0;
unsigned long nr_anon = 0; unsigned long nr_anon = 0;
unsigned long nr_file = 0; unsigned long nr_file = 0;
unsigned long nr_huge = 0; unsigned long nr_huge = 0;
unsigned long pgpgout = 0; unsigned long pgpgout = 0;
unsigned long nr_mem = 0;
struct list_head *next; struct list_head *next;
struct page *page; struct page *page;
...@@ -6051,10 +6047,9 @@ static void uncharge_list(struct list_head *page_list) ...@@ -6051,10 +6047,9 @@ static void uncharge_list(struct list_head *page_list)
if (memcg != pc->mem_cgroup) { if (memcg != pc->mem_cgroup) {
if (memcg) { if (memcg) {
uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw, uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
nr_anon, nr_file, nr_huge, page); nr_huge, page);
pgpgout = nr_mem = nr_memsw = 0; pgpgout = nr_anon = nr_file = nr_huge = 0;
nr_anon = nr_file = nr_huge = 0;
} }
memcg = pc->mem_cgroup; memcg = pc->mem_cgroup;
} }
...@@ -6070,18 +6065,14 @@ static void uncharge_list(struct list_head *page_list) ...@@ -6070,18 +6065,14 @@ static void uncharge_list(struct list_head *page_list)
else else
nr_file += nr_pages; nr_file += nr_pages;
if (pc->flags & PCG_MEM)
nr_mem += nr_pages;
if (pc->flags & PCG_MEMSW)
nr_memsw += nr_pages;
pc->flags = 0; pc->flags = 0;
pgpgout++; pgpgout++;
} while (next != page_list); } while (next != page_list);
if (memcg) if (memcg)
uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw, uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
nr_anon, nr_file, nr_huge, page); nr_huge, page);
} }
/** /**
...@@ -6166,7 +6157,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, ...@@ -6166,7 +6157,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
return; return;
VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage); VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage);
VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage);
if (lrucare) if (lrucare)
lock_page_lru(oldpage, &isolated); lock_page_lru(oldpage, &isolated);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册