提交 249247b6 编写于 作者: J Joonsoo Kim 提交者: Linus Torvalds

mm/slab: remove object status buffer for DEBUG_SLAB_LEAK

Now, we don't use object status buffer in any setup. Remove it.
Signed-off-by: NJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 d31676df
...@@ -380,22 +380,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) ...@@ -380,22 +380,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif #endif
#define OBJECT_FREE (0)
#define OBJECT_ACTIVE (1)
#ifdef CONFIG_DEBUG_SLAB_LEAK #ifdef CONFIG_DEBUG_SLAB_LEAK
static void set_obj_status(struct page *page, int idx, int val)
{
int freelist_size;
char *status;
struct kmem_cache *cachep = page->slab_cache;
freelist_size = cachep->num * sizeof(freelist_idx_t);
status = (char *)page->freelist + freelist_size;
status[idx] = val;
}
static inline bool is_store_user_clean(struct kmem_cache *cachep) static inline bool is_store_user_clean(struct kmem_cache *cachep)
{ {
return atomic_read(&cachep->store_user_clean) == 1; return atomic_read(&cachep->store_user_clean) == 1;
...@@ -413,7 +399,6 @@ static inline void set_store_user_dirty(struct kmem_cache *cachep) ...@@ -413,7 +399,6 @@ static inline void set_store_user_dirty(struct kmem_cache *cachep)
} }
#else #else
static inline void set_obj_status(struct page *page, int idx, int val) {}
static inline void set_store_user_dirty(struct kmem_cache *cachep) {} static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
#endif #endif
...@@ -476,9 +461,6 @@ static size_t calculate_freelist_size(int nr_objs, size_t align) ...@@ -476,9 +461,6 @@ static size_t calculate_freelist_size(int nr_objs, size_t align)
size_t freelist_size; size_t freelist_size;
freelist_size = nr_objs * sizeof(freelist_idx_t); freelist_size = nr_objs * sizeof(freelist_idx_t);
if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
freelist_size += nr_objs * sizeof(char);
if (align) if (align)
freelist_size = ALIGN(freelist_size, align); freelist_size = ALIGN(freelist_size, align);
...@@ -491,10 +473,7 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size, ...@@ -491,10 +473,7 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
int nr_objs; int nr_objs;
size_t remained_size; size_t remained_size;
size_t freelist_size; size_t freelist_size;
int extra_space = 0;
if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
extra_space = sizeof(char);
/* /*
* Ignore padding for the initial guess. The padding * Ignore padding for the initial guess. The padding
* is at most @align-1 bytes, and @buffer_size is at * is at most @align-1 bytes, and @buffer_size is at
...@@ -503,7 +482,7 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size, ...@@ -503,7 +482,7 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
* into the memory allocation when taking the padding * into the memory allocation when taking the padding
* into account. * into account.
*/ */
nr_objs = slab_size / (buffer_size + idx_size + extra_space); nr_objs = slab_size / (buffer_size + idx_size);
/* /*
* This calculated number will be either the right * This calculated number will be either the right
...@@ -1961,16 +1940,13 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, ...@@ -1961,16 +1940,13 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
break; break;
if (flags & CFLGS_OFF_SLAB) { if (flags & CFLGS_OFF_SLAB) {
size_t freelist_size_per_obj = sizeof(freelist_idx_t);
/* /*
* Max number of objs-per-slab for caches which * Max number of objs-per-slab for caches which
* use off-slab slabs. Needed to avoid a possible * use off-slab slabs. Needed to avoid a possible
* looping condition in cache_grow(). * looping condition in cache_grow().
*/ */
if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
freelist_size_per_obj += sizeof(char);
offslab_limit = size; offslab_limit = size;
offslab_limit /= freelist_size_per_obj; offslab_limit /= sizeof(freelist_idx_t);
if (num > offslab_limit) if (num > offslab_limit)
break; break;
...@@ -2533,7 +2509,6 @@ static void cache_init_objs(struct kmem_cache *cachep, ...@@ -2533,7 +2509,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
if (cachep->ctor) if (cachep->ctor)
cachep->ctor(objp); cachep->ctor(objp);
#endif #endif
set_obj_status(page, i, OBJECT_FREE);
set_free_obj(page, i, i); set_free_obj(page, i, i);
} }
} }
...@@ -2745,7 +2720,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -2745,7 +2720,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
BUG_ON(objnr >= cachep->num); BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, page, objnr)); BUG_ON(objp != index_to_obj(cachep, page, objnr));
set_obj_status(page, objnr, OBJECT_FREE);
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE); poison_obj(cachep, objp, POISON_FREE);
slab_kernel_map(cachep, objp, 0, caller); slab_kernel_map(cachep, objp, 0, caller);
...@@ -2878,8 +2852,6 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, ...@@ -2878,8 +2852,6 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, unsigned long caller) gfp_t flags, void *objp, unsigned long caller)
{ {
struct page *page;
if (!objp) if (!objp)
return objp; return objp;
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
...@@ -2904,8 +2876,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -2904,8 +2876,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
*dbg_redzone2(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE;
} }
page = virt_to_head_page(objp);
set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
objp += obj_offset(cachep); objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp); cachep->ctor(objp);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册