提交 c67a8a68 编写于 作者: V Vladimir Davydov 提交者: Linus Torvalds

memcg, slab: merge memcg_{bind,release}_pages to memcg_{un}charge_slab

Currently we have two pairs of kmemcg-related functions that are called on
slab alloc/free.  The first is memcg_{bind,release}_pages that count the
total number of pages allocated on a kmem cache.  The second is
memcg_{un}charge_slab that {un}charge slab pages to kmemcg resource
counter.  Let's just merge them to keep the code clean.
Signed-off-by: NVladimir Davydov <vdavydov@parallels.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@gmail.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 1e32e77f
...@@ -506,8 +506,8 @@ void memcg_update_array_size(int num_groups); ...@@ -506,8 +506,8 @@ void memcg_update_array_size(int num_groups);
struct kmem_cache * struct kmem_cache *
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size); int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size); void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
int __kmem_cache_destroy_memcg_children(struct kmem_cache *s); int __kmem_cache_destroy_memcg_children(struct kmem_cache *s);
......
...@@ -2954,7 +2954,7 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) ...@@ -2954,7 +2954,7 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
} }
#endif #endif
int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
{ {
struct res_counter *fail_res; struct res_counter *fail_res;
int ret = 0; int ret = 0;
...@@ -2992,7 +2992,7 @@ int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) ...@@ -2992,7 +2992,7 @@ int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
return ret; return ret;
} }
void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
{ {
res_counter_uncharge(&memcg->res, size); res_counter_uncharge(&memcg->res, size);
if (do_swap_account) if (do_swap_account)
...@@ -3390,6 +3390,24 @@ static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, ...@@ -3390,6 +3390,24 @@ static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
__memcg_create_cache_enqueue(memcg, cachep); __memcg_create_cache_enqueue(memcg, cachep);
memcg_resume_kmem_account(); memcg_resume_kmem_account();
} }
int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
{
int res;
res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp,
PAGE_SIZE << order);
if (!res)
atomic_add(1 << order, &cachep->memcg_params->nr_pages);
return res;
}
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
{
memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
atomic_sub(1 << order, &cachep->memcg_params->nr_pages);
}
/* /*
* Return the kmem_cache we're supposed to use for a slab allocation. * Return the kmem_cache we're supposed to use for a slab allocation.
* We try to use the current memcg's version of the cache. * We try to use the current memcg's version of the cache.
......
...@@ -1712,7 +1712,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, ...@@ -1712,7 +1712,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
__SetPageSlab(page); __SetPageSlab(page);
if (page->pfmemalloc) if (page->pfmemalloc)
SetPageSlabPfmemalloc(page); SetPageSlabPfmemalloc(page);
memcg_bind_pages(cachep, cachep->gfporder);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
...@@ -1748,7 +1747,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) ...@@ -1748,7 +1747,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
page_mapcount_reset(page); page_mapcount_reset(page);
page->mapping = NULL; page->mapping = NULL;
memcg_release_pages(cachep, cachep->gfporder);
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed; current->reclaim_state->reclaimed_slab += nr_freed;
__free_pages(page, cachep->gfporder); __free_pages(page, cachep->gfporder);
......
...@@ -121,18 +121,6 @@ static inline bool is_root_cache(struct kmem_cache *s) ...@@ -121,18 +121,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
return !s->memcg_params || s->memcg_params->is_root_cache; return !s->memcg_params || s->memcg_params->is_root_cache;
} }
static inline void memcg_bind_pages(struct kmem_cache *s, int order)
{
if (!is_root_cache(s))
atomic_add(1 << order, &s->memcg_params->nr_pages);
}
static inline void memcg_release_pages(struct kmem_cache *s, int order)
{
if (!is_root_cache(s))
atomic_sub(1 << order, &s->memcg_params->nr_pages);
}
static inline bool slab_equal_or_root(struct kmem_cache *s, static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p) struct kmem_cache *p)
{ {
...@@ -198,8 +186,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s, ...@@ -198,8 +186,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
return 0; return 0;
if (is_root_cache(s)) if (is_root_cache(s))
return 0; return 0;
return memcg_charge_kmem(s->memcg_params->memcg, gfp, return __memcg_charge_slab(s, gfp, order);
PAGE_SIZE << order);
} }
static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
...@@ -208,7 +195,7 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) ...@@ -208,7 +195,7 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
return; return;
if (is_root_cache(s)) if (is_root_cache(s))
return; return;
memcg_uncharge_kmem(s->memcg_params->memcg, PAGE_SIZE << order); __memcg_uncharge_slab(s, order);
} }
#else #else
static inline bool is_root_cache(struct kmem_cache *s) static inline bool is_root_cache(struct kmem_cache *s)
...@@ -216,14 +203,6 @@ static inline bool is_root_cache(struct kmem_cache *s) ...@@ -216,14 +203,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
return true; return true;
} }
static inline void memcg_bind_pages(struct kmem_cache *s, int order)
{
}
static inline void memcg_release_pages(struct kmem_cache *s, int order)
{
}
static inline bool slab_equal_or_root(struct kmem_cache *s, static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p) struct kmem_cache *p)
{ {
......
...@@ -1422,7 +1422,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1422,7 +1422,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
order = compound_order(page); order = compound_order(page);
inc_slabs_node(s, page_to_nid(page), page->objects); inc_slabs_node(s, page_to_nid(page), page->objects);
memcg_bind_pages(s, order);
page->slab_cache = s; page->slab_cache = s;
__SetPageSlab(page); __SetPageSlab(page);
if (page->pfmemalloc) if (page->pfmemalloc)
...@@ -1473,7 +1472,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1473,7 +1472,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlabPfmemalloc(page); __ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page); __ClearPageSlab(page);
memcg_release_pages(s, order);
page_mapcount_reset(page); page_mapcount_reset(page);
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages; current->reclaim_state->reclaimed_slab += pages;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册