提交 b047501c 编写于 作者: V Vladimir Davydov 提交者: Linus Torvalds

memcg: use generic slab iterators for showing slabinfo

Let's use generic slab_start/next/stop for showing memcg caches info.  In
contrast to the current implementation, this will work even if all memcg
caches' info doesn't fit into a seq buffer (a page), plus it simply looks
neater.

Actually, the main reason I do this isn't mere cleanup.  I'm going to zap
the memcg_slab_caches list, because I find it useless provided we have the
slab_caches list, and this patch is a step in this direction.

It should be noted that before this patch an attempt to read
memory.kmem.slabinfo of a cgroup that doesn't have kmem limit set resulted
in -EIO, while after this patch it will silently show nothing except the
header, but I don't think it will frustrate anyone.
Signed-off-by: NVladimir Davydov <vdavydov@parallels.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 4ef461e8
...@@ -513,10 +513,6 @@ struct memcg_cache_params { ...@@ -513,10 +513,6 @@ struct memcg_cache_params {
int memcg_update_all_caches(int num_memcgs); int memcg_update_all_caches(int num_memcgs);
struct seq_file;
int cache_show(struct kmem_cache *s, struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);
/** /**
* kmalloc_array - allocate memory for an array. * kmalloc_array - allocate memory for an array.
* @n: number of elements. * @n: number of elements.
......
...@@ -2547,26 +2547,6 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) ...@@ -2547,26 +2547,6 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg)); return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
} }
#ifdef CONFIG_SLABINFO
static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
struct memcg_cache_params *params;
if (!memcg_kmem_is_active(memcg))
return -EIO;
print_slabinfo_header(m);
mutex_lock(&memcg_slab_mutex);
list_for_each_entry(params, &memcg->memcg_slab_caches, list)
cache_show(memcg_params_to_cache(params), m);
mutex_unlock(&memcg_slab_mutex);
return 0;
}
#endif
static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
unsigned long nr_pages) unsigned long nr_pages)
{ {
...@@ -4708,7 +4688,10 @@ static struct cftype mem_cgroup_files[] = { ...@@ -4708,7 +4688,10 @@ static struct cftype mem_cgroup_files[] = {
#ifdef CONFIG_SLABINFO #ifdef CONFIG_SLABINFO
{ {
.name = "kmem.slabinfo", .name = "kmem.slabinfo",
.seq_show = mem_cgroup_slabinfo_read, .seq_start = slab_start,
.seq_next = slab_next,
.seq_stop = slab_stop,
.seq_show = memcg_slab_show,
}, },
#endif #endif
#endif #endif
......
...@@ -360,5 +360,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) ...@@ -360,5 +360,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
void *slab_start(struct seq_file *m, loff_t *pos); void *slab_start(struct seq_file *m, loff_t *pos);
void *slab_next(struct seq_file *m, void *p, loff_t *pos); void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p); void slab_stop(struct seq_file *m, void *p);
int memcg_slab_show(struct seq_file *m, void *p);
#endif /* MM_SLAB_H */ #endif /* MM_SLAB_H */
...@@ -811,7 +811,7 @@ EXPORT_SYMBOL(kmalloc_order_trace); ...@@ -811,7 +811,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
#define SLABINFO_RIGHTS S_IRUSR #define SLABINFO_RIGHTS S_IRUSR
#endif #endif
void print_slabinfo_header(struct seq_file *m) static void print_slabinfo_header(struct seq_file *m)
{ {
/* /*
* Output format version, so at least we can change it * Output format version, so at least we can change it
...@@ -876,7 +876,7 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) ...@@ -876,7 +876,7 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
} }
} }
int cache_show(struct kmem_cache *s, struct seq_file *m) static void cache_show(struct kmem_cache *s, struct seq_file *m)
{ {
struct slabinfo sinfo; struct slabinfo sinfo;
...@@ -895,7 +895,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m) ...@@ -895,7 +895,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m)
sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
slabinfo_show_stats(m, s); slabinfo_show_stats(m, s);
seq_putc(m, '\n'); seq_putc(m, '\n');
return 0;
} }
static int slab_show(struct seq_file *m, void *p) static int slab_show(struct seq_file *m, void *p)
...@@ -904,10 +903,24 @@ static int slab_show(struct seq_file *m, void *p) ...@@ -904,10 +903,24 @@ static int slab_show(struct seq_file *m, void *p)
if (p == slab_caches.next) if (p == slab_caches.next)
print_slabinfo_header(m); print_slabinfo_header(m);
if (!is_root_cache(s)) if (is_root_cache(s))
return 0; cache_show(s, m);
return cache_show(s, m); return 0;
}
#ifdef CONFIG_MEMCG_KMEM
int memcg_slab_show(struct seq_file *m, void *p)
{
struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
if (p == slab_caches.next)
print_slabinfo_header(m);
if (!is_root_cache(s) && s->memcg_params->memcg == memcg)
cache_show(s, m);
return 0;
} }
#endif
/* /*
* slabinfo_op - iterator that generates /proc/slabinfo * slabinfo_op - iterator that generates /proc/slabinfo
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册