提交 c0768f5d 编写于 作者: V Vlastimil Babka 提交者: Yang Yingliang

mm, sl[ou]b: improve memory accounting

mainline inclusion
from mainline-5.4-rc3
commit 6a486c0a
category: feature
bugzilla: 51349
CVE: NA

-------------------------------------------------

Patch series "guarantee natural alignment for kmalloc()", v2.

This patch (of 2):

SLOB currently doesn't account its pages at all, so in /proc/meminfo the
Slab field shows zero.  Modifying a counter on page allocation and
freeing should be acceptable even for the small system scenarios SLOB is
intended for.  Since reclaimable caches are not separated in SLOB,
account everything as unreclaimable.

SLUB currently doesn't account kmalloc() and kmalloc_node() allocations
larger than order-1 page, that are passed directly to the page
allocator.  As they also don't appear in /proc/slabinfo, it might look
like a memory leak.  For consistency, account them as well.  (SLAB
doesn't actually use page allocator directly, so no change there).

Ideally SLOB and SLUB would be handled in separate patches, but due to
the shared kmalloc_order() function and different kfree()
implementations, it's easier to patch both at once to prevent
inconsistencies.

Link: http://lkml.kernel.org/r/20190826111627.7505-2-vbabka@suse.czSigned-off-by: NVlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Darrick J . Wong" <darrick.wong@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 6a486c0a)
Signed-off-by: NYongqiang Liu <liuyongqiang13@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 e75e9f0b
...@@ -1216,12 +1216,16 @@ void __init create_kmalloc_caches(slab_flags_t flags) ...@@ -1216,12 +1216,16 @@ void __init create_kmalloc_caches(slab_flags_t flags)
*/ */
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{ {
void *ret; void *ret = NULL;
struct page *page; struct page *page;
flags |= __GFP_COMP; flags |= __GFP_COMP;
page = alloc_pages(flags, order); page = alloc_pages(flags, order);
ret = page ? page_address(page) : NULL; if (likely(page)) {
ret = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
}
kmemleak_alloc(ret, size, 1, flags); kmemleak_alloc(ret, size, 1, flags);
kasan_kmalloc_large(ret, size, flags); kasan_kmalloc_large(ret, size, flags);
return ret; return ret;
......
...@@ -190,7 +190,7 @@ static int slob_last(slob_t *s) ...@@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
static void *slob_new_pages(gfp_t gfp, int order, int node) static void *slob_new_pages(gfp_t gfp, int order, int node)
{ {
void *page; struct page *page;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE) if (node != NUMA_NO_NODE)
...@@ -202,14 +202,21 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) ...@@ -202,14 +202,21 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page) if (!page)
return NULL; return NULL;
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
return page_address(page); return page_address(page);
} }
static void slob_free_pages(void *b, int order) static void slob_free_pages(void *b, int order)
{ {
struct page *sp = virt_to_page(b);
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order; current->reclaim_state->reclaimed_slab += 1 << order;
free_pages((unsigned long)b, order);
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(sp, order);
} }
/* /*
...@@ -517,8 +524,13 @@ void kfree(const void *block) ...@@ -517,8 +524,13 @@ void kfree(const void *block)
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align); unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align); slob_free(m, *m + align);
} else } else {
__free_pages(sp, compound_order(sp)); unsigned int order = compound_order(sp);
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(sp, order);
}
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
......
...@@ -3803,11 +3803,15 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) ...@@ -3803,11 +3803,15 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{ {
struct page *page; struct page *page;
void *ptr = NULL; void *ptr = NULL;
unsigned int order = get_order(size);
flags |= __GFP_COMP; flags |= __GFP_COMP;
page = alloc_pages_node(node, flags, get_order(size)); page = alloc_pages_node(node, flags, order);
if (page) if (page) {
ptr = page_address(page); ptr = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
}
kmalloc_large_node_hook(ptr, size, flags); kmalloc_large_node_hook(ptr, size, flags);
return ptr; return ptr;
...@@ -3942,9 +3946,13 @@ void kfree(const void *x) ...@@ -3942,9 +3946,13 @@ void kfree(const void *x)
page = virt_to_head_page(x); page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
unsigned int order = compound_order(page);
BUG_ON(!PageCompound(page)); BUG_ON(!PageCompound(page));
kfree_hook(object); kfree_hook(object);
__free_pages(page, compound_order(page)); mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(page, order);
return; return;
} }
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册