提交 f3ccb2c4 编写于 作者: V Vladimir Davydov 提交者: Linus Torvalds

memcg: unify slab and other kmem pages charging

We have memcg_kmem_charge and memcg_kmem_uncharge methods for charging and
uncharging kmem pages to memcg, but currently they are not used for
charging slab pages (i.e.  they are only used for charging pages allocated
with alloc_kmem_pages).  The only reason why the slab subsystem uses
special helpers, memcg_charge_slab and memcg_uncharge_slab, is that it
needs to charge to the memcg of kmem cache while memcg_charge_kmem charges
to the memcg that the current task belongs to.

To remove this diversity, this patch adds an extra argument to
__memcg_kmem_charge that can be a pointer to a memcg or NULL.  If it is
not NULL, the function tries to charge to the memcg it points to,
otherwise it charge to the current context.  Next, it makes the slab
subsystem use this function to charge slab pages.

Since memcg_charge_kmem and memcg_uncharge_kmem helpers are now used only
in __memcg_kmem_charge and __memcg_kmem_uncharge, they are inlined.  Since
__memcg_kmem_charge stores a pointer to the memcg in the page struct, we
don't need memcg_uncharge_slab anymore and can use free_kmem_pages.
Besides, one can now detect which memcg a slab page belongs to by reading
/proc/kpagecgroup.

Note, this patch switches slab to charge-after-alloc design.  Since this
design is already used for all other memcg charges, it should not make any
difference.

[hannes@cmpxchg.org: better to have an outer function than a magic parameter for the memcg lookup]
Signed-off-by: NVladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: NMichal Hocko <mhocko@suse.com>
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 d05e83a6
......@@ -752,6 +752,8 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
* conditions, but because they are pretty simple, they are expected to be
* fast.
*/
int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
struct mem_cgroup *memcg);
int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge(struct page *page, int order);
......@@ -770,10 +772,6 @@ void __memcg_kmem_put_cache(struct kmem_cache *cachep);
struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
unsigned long nr_pages);
void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
static inline bool __memcg_kmem_bypass(gfp_t gfp)
{
if (!memcg_kmem_enabled())
......
......@@ -2215,34 +2215,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
}
#ifdef CONFIG_MEMCG_KMEM
int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
unsigned long nr_pages)
{
struct page_counter *counter;
int ret = 0;
ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
if (ret < 0)
return ret;
ret = try_charge(memcg, gfp, nr_pages);
if (ret)
page_counter_uncharge(&memcg->kmem, nr_pages);
return ret;
}
void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
{
page_counter_uncharge(&memcg->memory, nr_pages);
if (do_swap_account)
page_counter_uncharge(&memcg->memsw, nr_pages);
page_counter_uncharge(&memcg->kmem, nr_pages);
css_put_many(&memcg->css, nr_pages);
}
static int memcg_alloc_cache_id(void)
{
int id, size;
......@@ -2404,36 +2376,59 @@ void __memcg_kmem_put_cache(struct kmem_cache *cachep)
css_put(&cachep->memcg_params.memcg->css);
}
int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
struct mem_cgroup *memcg)
{
struct mem_cgroup *memcg;
int ret;
memcg = get_mem_cgroup_from_mm(current->mm);
unsigned int nr_pages = 1 << order;
struct page_counter *counter;
int ret = 0;
if (!memcg_kmem_is_active(memcg)) {
css_put(&memcg->css);
if (!memcg_kmem_is_active(memcg))
return 0;
ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
if (ret)
return ret;
ret = try_charge(memcg, gfp, nr_pages);
if (ret) {
page_counter_uncharge(&memcg->kmem, nr_pages);
return ret;
}
ret = memcg_charge_kmem(memcg, gfp, 1 << order);
page->mem_cgroup = memcg;
return 0;
}
int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
{
struct mem_cgroup *memcg;
int ret;
memcg = get_mem_cgroup_from_mm(current->mm);
ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
css_put(&memcg->css);
page->mem_cgroup = memcg;
return ret;
}
void __memcg_kmem_uncharge(struct page *page, int order)
{
struct mem_cgroup *memcg = page->mem_cgroup;
unsigned int nr_pages = 1 << order;
if (!memcg)
return;
VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
memcg_uncharge_kmem(memcg, 1 << order);
page_counter_uncharge(&memcg->kmem, nr_pages);
page_counter_uncharge(&memcg->memory, nr_pages);
if (do_swap_account)
page_counter_uncharge(&memcg->memsw, nr_pages);
page->mem_cgroup = NULL;
css_put_many(&memcg->css, nr_pages);
}
struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
......
......@@ -1593,16 +1593,17 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
flags |= __GFP_RECLAIMABLE;
if (memcg_charge_slab(cachep, flags, cachep->gfporder))
return NULL;
page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
if (!page) {
memcg_uncharge_slab(cachep, cachep->gfporder);
slab_out_of_memory(cachep, flags, nodeid);
return NULL;
}
if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
__free_pages(page, cachep->gfporder);
return NULL;
}
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (page_is_pfmemalloc(page))
pfmemalloc_active = true;
......@@ -1654,8 +1655,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
__free_pages(page, cachep->gfporder);
memcg_uncharge_slab(cachep, cachep->gfporder);
__free_kmem_pages(page, cachep->gfporder);
}
static void kmem_rcu_free(struct rcu_head *head)
......
......@@ -236,23 +236,16 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
return s->memcg_params.root_cache;
}
static __always_inline int memcg_charge_slab(struct kmem_cache *s,
gfp_t gfp, int order)
static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
if (!memcg_kmem_enabled())
return 0;
if (is_root_cache(s))
return 0;
return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order);
}
static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
{
if (!memcg_kmem_enabled())
return;
if (is_root_cache(s))
return;
memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order);
return __memcg_kmem_charge_memcg(page, gfp, order,
s->memcg_params.memcg);
}
extern void slab_init_memcg_params(struct kmem_cache *);
......@@ -289,15 +282,12 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
return s;
}
static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
struct kmem_cache *s)
{
return 0;
}
static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
{
}
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
......
......@@ -1328,16 +1328,15 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
flags |= __GFP_NOTRACK;
if (memcg_charge_slab(s, flags, order))
return NULL;
if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order);
else
page = __alloc_pages_node(node, flags, order);
if (!page)
memcg_uncharge_slab(s, order);
if (page && memcg_charge_slab(page, flags, order, s)) {
__free_pages(page, order);
page = NULL;
}
return page;
}
......@@ -1476,8 +1475,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
page_mapcount_reset(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
__free_pages(page, order);
memcg_uncharge_slab(s, order);
__free_kmem_pages(page, order);
}
#define need_reserve_slab_rcu \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册