提交 1eb49272 编写于 作者: H Hugh Dickins 提交者: Linus Torvalds

memcg: lru_size instead of MEM_CGROUP_ZSTAT

I never understood why we need a MEM_CGROUP_ZSTAT(mz, idx) macro to
obscure the LRU counts.  For easier searching? So call it lru_size
rather than bare count (lru_length sounds better, but would be wrong,
since each huge page raises lru_size hugely).
Signed-off-by: NHugh Dickins <hughd@google.com>
Acked-by: NKirill A. Shutemov <kirill@shutemov.name>
Acked-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 d79154bb
...@@ -135,7 +135,7 @@ struct mem_cgroup_reclaim_iter { ...@@ -135,7 +135,7 @@ struct mem_cgroup_reclaim_iter {
*/ */
struct mem_cgroup_per_zone { struct mem_cgroup_per_zone {
struct lruvec lruvec; struct lruvec lruvec;
unsigned long count[NR_LRU_LISTS]; unsigned long lru_size[NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
...@@ -147,8 +147,6 @@ struct mem_cgroup_per_zone { ...@@ -147,8 +147,6 @@ struct mem_cgroup_per_zone {
struct mem_cgroup *memcg; /* Back pointer, we cannot */ struct mem_cgroup *memcg; /* Back pointer, we cannot */
/* use container_of */ /* use container_of */
}; };
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
struct mem_cgroup_per_node { struct mem_cgroup_per_node {
struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
...@@ -728,7 +726,7 @@ mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, ...@@ -728,7 +726,7 @@ mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
for_each_lru(l) { for_each_lru(l) {
if (BIT(l) & lru_mask) if (BIT(l) & lru_mask)
ret += MEM_CGROUP_ZSTAT(mz, l); ret += mz->lru_size[l];
} }
return ret; return ret;
} }
...@@ -1077,7 +1075,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, ...@@ -1077,7 +1075,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
mz = page_cgroup_zoneinfo(memcg, page); mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */ /* compound_order() is stabilized through lru_lock */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); mz->lru_size[lru] += 1 << compound_order(page);
return &mz->lruvec; return &mz->lruvec;
} }
...@@ -1105,8 +1103,8 @@ void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) ...@@ -1105,8 +1103,8 @@ void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
VM_BUG_ON(!memcg); VM_BUG_ON(!memcg);
mz = page_cgroup_zoneinfo(memcg, page); mz = page_cgroup_zoneinfo(memcg, page);
/* huge page split is done under lru_lock. so, we have no races. */ /* huge page split is done under lru_lock. so, we have no races. */
VM_BUG_ON(MEM_CGROUP_ZSTAT(mz, lru) < (1 << compound_order(page))); VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); mz->lru_size[lru] -= 1 << compound_order(page);
} }
void mem_cgroup_lru_del(struct page *page) void mem_cgroup_lru_del(struct page *page)
...@@ -3629,7 +3627,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, ...@@ -3629,7 +3627,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
mz = mem_cgroup_zoneinfo(memcg, node, zid); mz = mem_cgroup_zoneinfo(memcg, node, zid);
list = &mz->lruvec.lists[lru]; list = &mz->lruvec.lists[lru];
loop = MEM_CGROUP_ZSTAT(mz, lru); loop = mz->lru_size[lru];
/* give some margin against EBUSY etc...*/ /* give some margin against EBUSY etc...*/
loop += 256; loop += 256;
busy = NULL; busy = NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册