提交 d5465c2d 编写于 作者: Y Yang Yingliang

Revert "mm, sl[ou]b: improve memory accounting"

hulk inclusion
category: bugfix
bugzilla: 51349
CVE: NA

-------------------------------------------------

This patchset https://patchwork.kernel.org/project/linux-block/cover/20190826111627.7505-1-vbabka@suse.cz/
will cause perfmance regression, so revert it and use another way to fix the
warning introduced by fix CVE-2021-27365.
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 cf060db6
...@@ -1216,16 +1216,12 @@ void __init create_kmalloc_caches(slab_flags_t flags) ...@@ -1216,16 +1216,12 @@ void __init create_kmalloc_caches(slab_flags_t flags)
*/ */
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{ {
void *ret = NULL; void *ret;
struct page *page; struct page *page;
flags |= __GFP_COMP; flags |= __GFP_COMP;
page = alloc_pages(flags, order); page = alloc_pages(flags, order);
if (likely(page)) { ret = page ? page_address(page) : NULL;
ret = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
}
kmemleak_alloc(ret, size, 1, flags); kmemleak_alloc(ret, size, 1, flags);
kasan_kmalloc_large(ret, size, flags); kasan_kmalloc_large(ret, size, flags);
return ret; return ret;
......
...@@ -190,7 +190,7 @@ static int slob_last(slob_t *s) ...@@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
static void *slob_new_pages(gfp_t gfp, int order, int node) static void *slob_new_pages(gfp_t gfp, int order, int node)
{ {
struct page *page; void *page;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE) if (node != NUMA_NO_NODE)
...@@ -202,21 +202,14 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) ...@@ -202,21 +202,14 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page) if (!page)
return NULL; return NULL;
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
return page_address(page); return page_address(page);
} }
static void slob_free_pages(void *b, int order) static void slob_free_pages(void *b, int order)
{ {
struct page *sp = virt_to_page(b);
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order; current->reclaim_state->reclaimed_slab += 1 << order;
free_pages((unsigned long)b, order);
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(sp, order);
} }
/* /*
...@@ -524,13 +517,8 @@ void kfree(const void *block) ...@@ -524,13 +517,8 @@ void kfree(const void *block)
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align); unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align); slob_free(m, *m + align);
} else { } else
unsigned int order = compound_order(sp); __free_pages(sp, compound_order(sp));
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(sp, order);
}
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
......
...@@ -3803,15 +3803,11 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) ...@@ -3803,15 +3803,11 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{ {
struct page *page; struct page *page;
void *ptr = NULL; void *ptr = NULL;
unsigned int order = get_order(size);
flags |= __GFP_COMP; flags |= __GFP_COMP;
page = alloc_pages_node(node, flags, order); page = alloc_pages_node(node, flags, get_order(size));
if (page) { if (page)
ptr = page_address(page); ptr = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
}
kmalloc_large_node_hook(ptr, size, flags); kmalloc_large_node_hook(ptr, size, flags);
return ptr; return ptr;
...@@ -3946,13 +3942,9 @@ void kfree(const void *x) ...@@ -3946,13 +3942,9 @@ void kfree(const void *x)
page = virt_to_head_page(x); page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
unsigned int order = compound_order(page);
BUG_ON(!PageCompound(page)); BUG_ON(!PageCompound(page));
kfree_hook(object); kfree_hook(object);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, __free_pages(page, compound_order(page));
-(1 << order));
__free_pages(page, order);
return; return;
} }
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册