提交 cbc1215f 编写于 作者: R Roman Gushchin 提交者: Yang Yingliang

mm: memcg/slab: unify SLAB and SLUB page accounting

mainline inclusion
from mainline-5.3-rc1
commit 6cea1d56
category: bugfix
bugzilla: 34611
CVE: NA

-------------------------------------------------

Currently the page accounting code is duplicated in SLAB and SLUB
internals.  Let's move it into new (un)charge_slab_page helpers in the
slab_common.c file.  These helpers will be responsible for statistics
(global and memcg-aware) and memcg charging.  So they are replacing direct
memcg_(un)charge_slab() calls.

Link: http://lkml.kernel.org/r/20190611231813.3148843-6-guro@fb.comSigned-off-by: NRoman Gushchin <guro@fb.com>
Reviewed-by: NShakeel Butt <shakeelb@google.com>
Acked-by: NChristoph Lameter <cl@linux.com>
Acked-by: NVladimir Davydov <vdavydov.dev@gmail.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Waiman Long <longman@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Andrei Vagin <avagin@gmail.com>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 6cea1d56)
Signed-off-by: NKefeng Wang <wangkefeng.wang@huawei.com>

 Conflicts:
	mm/slab.h
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 6e28d7f7
...@@ -1405,7 +1405,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, ...@@ -1405,7 +1405,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nodeid) int nodeid)
{ {
struct page *page; struct page *page;
int nr_pages;
flags |= cachep->allocflags; flags |= cachep->allocflags;
...@@ -1415,17 +1414,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, ...@@ -1415,17 +1414,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL; return NULL;
} }
if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) { if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
__free_pages(page, cachep->gfporder); __free_pages(page, cachep->gfporder);
return NULL; return NULL;
} }
nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
else
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
__SetPageSlab(page); __SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (sk_memalloc_socks() && page_is_pfmemalloc(page)) if (sk_memalloc_socks() && page_is_pfmemalloc(page))
...@@ -1440,12 +1433,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, ...@@ -1440,12 +1433,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
static void kmem_freepages(struct kmem_cache *cachep, struct page *page) static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{ {
int order = cachep->gfporder; int order = cachep->gfporder;
unsigned long nr_freed = (1 << order);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
else
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
BUG_ON(!PageSlab(page)); BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page); __ClearPageSlabPfmemalloc(page);
...@@ -1454,8 +1441,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) ...@@ -1454,8 +1441,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
page->mapping = NULL; page->mapping = NULL;
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed; current->reclaim_state->reclaimed_slab += 1 << order;
memcg_uncharge_slab(page, order, cachep); uncharge_slab_page(page, order, cachep);
__free_pages(page, order); __free_pages(page, order);
} }
......
...@@ -205,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, ...@@ -205,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
static inline int cache_vmstat_idx(struct kmem_cache *s)
{
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
}
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
/* List of all root caches. */ /* List of all root caches. */
...@@ -350,6 +356,25 @@ static inline void memcg_link_cache(struct kmem_cache *s, ...@@ -350,6 +356,25 @@ static inline void memcg_link_cache(struct kmem_cache *s,
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
static __always_inline int charge_slab_page(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
int ret = memcg_charge_slab(page, gfp, order, s);
if (!ret)
mod_lruvec_page_state(page, cache_vmstat_idx(s), 1 << order);
return ret;
}
static __always_inline void uncharge_slab_page(struct page *page, int order,
struct kmem_cache *s)
{
mod_lruvec_page_state(page, cache_vmstat_idx(s), -(1 << order));
memcg_uncharge_slab(page, order, s);
}
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
......
...@@ -1457,7 +1457,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, ...@@ -1457,7 +1457,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else else
page = __alloc_pages_node(node, flags, order); page = __alloc_pages_node(node, flags, order);
if (page && memcg_charge_slab(page, flags, order, s)) { if (page && charge_slab_page(page, flags, order, s)) {
__free_pages(page, order); __free_pages(page, order);
page = NULL; page = NULL;
} }
...@@ -1649,11 +1649,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1649,11 +1649,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page) if (!page)
return NULL; return NULL;
mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo));
inc_slabs_node(s, page_to_nid(page), page->objects); inc_slabs_node(s, page_to_nid(page), page->objects);
return page; return page;
...@@ -1687,18 +1682,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1687,18 +1682,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
check_object(s, page, p, SLUB_RED_INACTIVE); check_object(s, page, p, SLUB_RED_INACTIVE);
} }
mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);
__ClearPageSlabPfmemalloc(page); __ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page); __ClearPageSlab(page);
page->mapping = NULL; page->mapping = NULL;
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages; current->reclaim_state->reclaimed_slab += pages;
memcg_uncharge_slab(page, order, s); uncharge_slab_page(page, order, s);
__free_pages(page, order); __free_pages(page, order);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册