You need to sign in or sign up before continuing.
提交 47caf84f 编写于 作者: S Shakeel Butt 提交者: Yang Yingliang

mm/memcg: optimize memory.numa_stat like memory.stat

mainline inclusion
from mainline-v5.8-rc1
commit dd8657b6
CVE: NA

--------------------------------

Currently reading memory.numa_stat traverses the underlying memcg tree
multiple times to accumulate the stats to present the hierarchical view of
the memcg tree.  However the kernel already maintains the hierarchical
view of the stats and use it in memory.stat.  Just use the same mechanism
in memory.numa_stat as well.

I ran a simple benchmark which reads root_mem_cgroup's memory.numa_stat
file in the presense of 10000 memcgs.  The results are:

Without the patch:
$ time cat /dev/cgroup/memory/memory.numa_stat > /dev/null

real    0m0.700s
user    0m0.001s
sys     0m0.697s

With the patch:
$ time cat /dev/cgroup/memory/memory.numa_stat > /dev/null

real    0m0.001s
user    0m0.001s
sys     0m0.000s

[akpm@linux-foundation.org: avoid forcing out-of-line code generation]
Signed-off-by: NShakeel Butt <shakeelb@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: NAndrew Morton <akpm@linux-foundation.org>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Michal Hocko <mhocko@kernel.org>
Link: http://lkml.kernel.org/r/20200304022058.248270-1-shakeelb@google.comSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>

Conflicts:
	mm/memcontrol.c
Signed-off-by: NJing Xiangfeng <jingxiangfeng@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 1a874243
...@@ -3742,7 +3742,7 @@ int sysctl_memcg_qos_handler(struct ctl_table *table, int write, ...@@ -3742,7 +3742,7 @@ int sysctl_memcg_qos_handler(struct ctl_table *table, int write,
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1) #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask) int nid, unsigned int lru_mask, bool tree)
{ {
struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
unsigned long nr = 0; unsigned long nr = 0;
...@@ -3753,13 +3753,17 @@ static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, ...@@ -3753,13 +3753,17 @@ static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
for_each_lru(lru) { for_each_lru(lru) {
if (!(BIT(lru) & lru_mask)) if (!(BIT(lru) & lru_mask))
continue; continue;
nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); if (tree)
nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
else
nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
} }
return nr; return nr;
} }
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
unsigned int lru_mask) unsigned int lru_mask,
bool tree)
{ {
unsigned long nr = 0; unsigned long nr = 0;
enum lru_list lru; enum lru_list lru;
...@@ -3767,7 +3771,10 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, ...@@ -3767,7 +3771,10 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
for_each_lru(lru) { for_each_lru(lru) {
if (!(BIT(lru) & lru_mask)) if (!(BIT(lru) & lru_mask))
continue; continue;
nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); if (tree)
nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
else
nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
} }
return nr; return nr;
} }
...@@ -3787,34 +3794,28 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v) ...@@ -3787,34 +3794,28 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
}; };
const struct numa_stat *stat; const struct numa_stat *stat;
int nid; int nid;
unsigned long nr;
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); seq_printf(m, "%s=%lu", stat->name,
seq_printf(m, "%s=%lu", stat->name, nr); mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
for_each_node_state(nid, N_MEMORY) { false));
nr = mem_cgroup_node_nr_lru_pages(memcg, nid, for_each_node_state(nid, N_MEMORY)
stat->lru_mask); seq_printf(m, " N%d=%lu", nid,
seq_printf(m, " N%d=%lu", nid, nr); mem_cgroup_node_nr_lru_pages(memcg, nid,
} stat->lru_mask, false));
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
struct mem_cgroup *iter;
seq_printf(m, "hierarchical_%s=%lu", stat->name,
nr = 0; mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
for_each_mem_cgroup_tree(iter, memcg) true));
nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); for_each_node_state(nid, N_MEMORY)
seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); seq_printf(m, " N%d=%lu", nid,
for_each_node_state(nid, N_MEMORY) { mem_cgroup_node_nr_lru_pages(memcg, nid,
nr = 0; stat->lru_mask, true));
for_each_mem_cgroup_tree(iter, memcg)
nr += mem_cgroup_node_nr_lru_pages(
iter, nid, stat->lru_mask);
seq_printf(m, " N%d=%lu", nid, nr);
}
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册