提交 97a6c37b 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

memcg: change page_cgroup_zoneinfo signature

Instead of passing a whole struct page_cgroup to this function, let it
take only what it really needs from it: the struct mem_cgroup and the
page.

This has the advantage that reading pc->mem_cgroup is now done at the same
place where the ordering rules for this pointer are enforced and
explained.

It is also in preparation for removing the pc->page backpointer.
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Acked-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 ad324e94
...@@ -85,16 +85,6 @@ SETPCGFLAG(Migration, MIGRATION) ...@@ -85,16 +85,6 @@ SETPCGFLAG(Migration, MIGRATION)
CLEARPCGFLAG(Migration, MIGRATION) CLEARPCGFLAG(Migration, MIGRATION)
TESTPCGFLAG(Migration, MIGRATION) TESTPCGFLAG(Migration, MIGRATION)
static inline int page_cgroup_nid(struct page_cgroup *pc)
{
return page_to_nid(pc->page);
}
static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
{
return page_zonenum(pc->page);
}
static inline void lock_page_cgroup(struct page_cgroup *pc) static inline void lock_page_cgroup(struct page_cgroup *pc)
{ {
/* /*
......
...@@ -364,11 +364,10 @@ struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) ...@@ -364,11 +364,10 @@ struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
} }
static struct mem_cgroup_per_zone * static struct mem_cgroup_per_zone *
page_cgroup_zoneinfo(struct page_cgroup *pc) page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
{ {
struct mem_cgroup *mem = pc->mem_cgroup; int nid = page_to_nid(page);
int nid = page_cgroup_nid(pc); int zid = page_zonenum(page);
int zid = page_cgroup_zid(pc);
return mem_cgroup_zoneinfo(mem, nid, zid); return mem_cgroup_zoneinfo(mem, nid, zid);
} }
...@@ -800,7 +799,7 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) ...@@ -800,7 +799,7 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
* We don't check PCG_USED bit. It's cleared when the "page" is finally * We don't check PCG_USED bit. It's cleared when the "page" is finally
* removed from global LRU. * removed from global LRU.
*/ */
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
/* huge page split is done under lru_lock. so, we have no races. */ /* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
if (mem_cgroup_is_root(pc->mem_cgroup)) if (mem_cgroup_is_root(pc->mem_cgroup))
...@@ -836,7 +835,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page) ...@@ -836,7 +835,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page)
smp_rmb(); smp_rmb();
if (mem_cgroup_is_root(pc->mem_cgroup)) if (mem_cgroup_is_root(pc->mem_cgroup))
return; return;
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
list_move_tail(&pc->lru, &mz->lists[lru]); list_move_tail(&pc->lru, &mz->lists[lru]);
} }
...@@ -856,7 +855,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) ...@@ -856,7 +855,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
smp_rmb(); smp_rmb();
if (mem_cgroup_is_root(pc->mem_cgroup)) if (mem_cgroup_is_root(pc->mem_cgroup))
return; return;
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
list_move(&pc->lru, &mz->lists[lru]); list_move(&pc->lru, &mz->lists[lru]);
} }
...@@ -873,7 +872,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) ...@@ -873,7 +872,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
return; return;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb(); smp_rmb();
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
/* huge page split is done under lru_lock. so, we have no races. */ /* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
SetPageCgroupAcctLRU(pc); SetPageCgroupAcctLRU(pc);
...@@ -1043,7 +1042,7 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) ...@@ -1043,7 +1042,7 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
return NULL; return NULL;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb(); smp_rmb();
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
if (!mz) if (!mz)
return NULL; return NULL;
...@@ -2192,7 +2191,7 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) ...@@ -2192,7 +2191,7 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
* We hold lru_lock, then, reduce counter directly. * We hold lru_lock, then, reduce counter directly.
*/ */
lru = page_lru(head); lru = page_lru(head);
mz = page_cgroup_zoneinfo(head_pc); mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
MEM_CGROUP_ZSTAT(mz, lru) -= 1; MEM_CGROUP_ZSTAT(mz, lru) -= 1;
} }
tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册