提交 9ba54aa4 编写于 作者: Y Yang Yingliang

Revert "mm, sl[aou]b: guarantee natural alignment for kmalloc(power-of-two)"

hulk inclusion
category: bugfix
bugzilla: 51349
CVE: NA

-------------------------------------------------

This patchset https://patchwork.kernel.org/project/linux-block/cover/20190826111627.7505-1-vbabka@suse.cz/
will cause perfmance regression, so revert it and use another way to fix the
warning introduced by fix CVE-2021-27365.
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 d5465c2d
...@@ -457,10 +457,6 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) ...@@ -457,10 +457,6 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* kmalloc is the normal method of allocating memory * kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel. * for objects smaller than page size in the kernel.
* *
* The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
* bytes. For @size of power of two bytes, the alignment is also guaranteed
* to be at least to the size.
*
* The @flags argument may be one of: * The @flags argument may be one of:
* *
* %GFP_USER - Allocate memory on behalf of user. May sleep. * %GFP_USER - Allocate memory on behalf of user. May sleep.
......
...@@ -970,19 +970,10 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, ...@@ -970,19 +970,10 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
unsigned int useroffset, unsigned int usersize) unsigned int useroffset, unsigned int usersize)
{ {
int err; int err;
unsigned int align = ARCH_KMALLOC_MINALIGN;
s->name = name; s->name = name;
s->size = s->object_size = size; s->size = s->object_size = size;
s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
/*
* For power of two sizes, guarantee natural alignment for kmalloc
* caches, regardless of SL*B debugging options.
*/
if (is_power_of_2(size))
align = max(align, size);
s->align = calculate_alignment(flags, align, size);
s->useroffset = useroffset; s->useroffset = useroffset;
s->usersize = usersize; s->usersize = usersize;
......
...@@ -215,7 +215,7 @@ static void slob_free_pages(void *b, int order) ...@@ -215,7 +215,7 @@ static void slob_free_pages(void *b, int order)
/* /*
* Allocate a slob block within a given slob_page sp. * Allocate a slob block within a given slob_page sp.
*/ */
static void *slob_page_alloc(struct page *sp, size_t size, int align, int align_offset) static void *slob_page_alloc(struct page *sp, size_t size, int align)
{ {
slob_t *prev, *cur, *aligned = NULL; slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size); int delta = 0, units = SLOB_UNITS(size);
...@@ -223,17 +223,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align, int align_ ...@@ -223,17 +223,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align, int align_
for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
slobidx_t avail = slob_units(cur); slobidx_t avail = slob_units(cur);
/*
* 'aligned' will hold the address of the slob block so that the
* address 'aligned'+'align_offset' is aligned according to the
* 'align' parameter. This is for kmalloc() which prepends the
* allocated block with its size, so that the block itself is
* aligned when needed.
*/
if (align) { if (align) {
aligned = (slob_t *) aligned = (slob_t *)ALIGN((unsigned long)cur, align);
(ALIGN((unsigned long)cur + align_offset, align)
- align_offset);
delta = aligned - cur; delta = aligned - cur;
} }
if (avail >= units + delta) { /* room enough? */ if (avail >= units + delta) { /* room enough? */
...@@ -275,8 +266,7 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align, int align_ ...@@ -275,8 +266,7 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align, int align_
/* /*
* slob_alloc: entry point into the slob allocator. * slob_alloc: entry point into the slob allocator.
*/ */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
int align_offset)
{ {
struct page *sp; struct page *sp;
struct list_head *prev; struct list_head *prev;
...@@ -308,7 +298,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, ...@@ -308,7 +298,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
/* Attempt to alloc */ /* Attempt to alloc */
prev = sp->lru.prev; prev = sp->lru.prev;
b = slob_page_alloc(sp, size, align, align_offset); b = slob_page_alloc(sp, size, align);
if (!b) if (!b)
continue; continue;
...@@ -336,7 +326,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, ...@@ -336,7 +326,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
INIT_LIST_HEAD(&sp->lru); INIT_LIST_HEAD(&sp->lru);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list); set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align, align_offset); b = slob_page_alloc(sp, size, align);
BUG_ON(!b); BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags); spin_unlock_irqrestore(&slob_lock, flags);
} }
...@@ -438,7 +428,7 @@ static __always_inline void * ...@@ -438,7 +428,7 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{ {
unsigned int *m; unsigned int *m;
int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
void *ret; void *ret;
gfp &= gfp_allowed_mask; gfp &= gfp_allowed_mask;
...@@ -446,28 +436,19 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) ...@@ -446,28 +436,19 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
fs_reclaim_acquire(gfp); fs_reclaim_acquire(gfp);
fs_reclaim_release(gfp); fs_reclaim_release(gfp);
if (size < PAGE_SIZE - minalign) { if (size < PAGE_SIZE - align) {
int align = minalign;
/*
* For power of two sizes, guarantee natural alignment for
* kmalloc()'d objects.
*/
if (is_power_of_2(size))
align = max(minalign, (int) size);
if (!size) if (!size)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
m = slob_alloc(size + minalign, gfp, align, node, minalign); m = slob_alloc(size + align, gfp, align, node);
if (!m) if (!m)
return NULL; return NULL;
*m = size; *m = size;
ret = (void *)m + minalign; ret = (void *)m + align;
trace_kmalloc_node(caller, ret, trace_kmalloc_node(caller, ret,
size, size + minalign, gfp, node); size, size + align, gfp, node);
} else { } else {
unsigned int order = get_order(size); unsigned int order = get_order(size);
...@@ -563,7 +544,7 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) ...@@ -563,7 +544,7 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
fs_reclaim_release(flags); fs_reclaim_release(flags);
if (c->size < PAGE_SIZE) { if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node, 0); b = slob_alloc(c->size, flags, c->align, node);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
SLOB_UNITS(c->size) * SLOB_UNIT, SLOB_UNITS(c->size) * SLOB_UNIT,
flags, node); flags, node);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册