提交 d5465c2d 编写于 作者: Y Yang Yingliang

Revert "mm, sl[ou]b: improve memory accounting"

hulk inclusion
category: bugfix
bugzilla: 51349
CVE: NA

-------------------------------------------------

This patchset https://patchwork.kernel.org/project/linux-block/cover/20190826111627.7505-1-vbabka@suse.cz/
will cause perfmance regression, so revert it and use another way to fix the
warning introduced by fix CVE-2021-27365.
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 cf060db6
......@@ -1216,16 +1216,12 @@ void __init create_kmalloc_caches(slab_flags_t flags)
*/
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
void *ret = NULL;
void *ret;
struct page *page;
flags |= __GFP_COMP;
page = alloc_pages(flags, order);
if (likely(page)) {
ret = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
}
ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
kasan_kmalloc_large(ret, size, flags);
return ret;
......
......@@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
static void *slob_new_pages(gfp_t gfp, int order, int node)
{
struct page *page;
void *page;
#ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE)
......@@ -202,21 +202,14 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page)
return NULL;
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
return page_address(page);
}
static void slob_free_pages(void *b, int order)
{
struct page *sp = virt_to_page(b);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order;
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(sp, order);
free_pages((unsigned long)b, order);
}
/*
......@@ -524,13 +517,8 @@ void kfree(const void *block)
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else {
unsigned int order = compound_order(sp);
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(sp, order);
}
} else
__free_pages(sp, compound_order(sp));
}
EXPORT_SYMBOL(kfree);
......
......@@ -3803,15 +3803,11 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
void *ptr = NULL;
unsigned int order = get_order(size);
flags |= __GFP_COMP;
page = alloc_pages_node(node, flags, order);
if (page) {
page = alloc_pages_node(node, flags, get_order(size));
if (page)
ptr = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
}
kmalloc_large_node_hook(ptr, size, flags);
return ptr;
......@@ -3946,13 +3942,9 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
unsigned int order = compound_order(page);
BUG_ON(!PageCompound(page));
kfree_hook(object);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(page, order);
__free_pages(page, compound_order(page));
return;
}
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册