提交 f7cb1933 编写于 作者: C Christoph Lameter 提交者: Pekka Enberg

SLUB: Pass active and inactive redzone flags instead of boolean to debug functions

Pass the actual values used for inactive and active redzoning to the
functions that check the objects. Avoids a lot of the ? : things to
lookup the values in the functions.
Acked-by: NDavid Rientjes <rientjes@google.com>
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NPekka Enberg <penberg@kernel.org>
上级 7340cc84
...@@ -490,7 +490,7 @@ static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) ...@@ -490,7 +490,7 @@ static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
dump_stack(); dump_stack();
} }
static void init_object(struct kmem_cache *s, void *object, int active) static void init_object(struct kmem_cache *s, void *object, u8 val)
{ {
u8 *p = object; u8 *p = object;
...@@ -500,9 +500,7 @@ static void init_object(struct kmem_cache *s, void *object, int active) ...@@ -500,9 +500,7 @@ static void init_object(struct kmem_cache *s, void *object, int active)
} }
if (s->flags & SLAB_RED_ZONE) if (s->flags & SLAB_RED_ZONE)
memset(p + s->objsize, memset(p + s->objsize, val, s->inuse - s->objsize);
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
s->inuse - s->objsize);
} }
static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
...@@ -637,17 +635,14 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) ...@@ -637,17 +635,14 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
} }
static int check_object(struct kmem_cache *s, struct page *page, static int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) void *object, u8 val)
{ {
u8 *p = object; u8 *p = object;
u8 *endobject = object + s->objsize; u8 *endobject = object + s->objsize;
if (s->flags & SLAB_RED_ZONE) { if (s->flags & SLAB_RED_ZONE) {
unsigned int red =
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
if (!check_bytes_and_report(s, page, object, "Redzone", if (!check_bytes_and_report(s, page, object, "Redzone",
endobject, red, s->inuse - s->objsize)) endobject, val, s->inuse - s->objsize))
return 0; return 0;
} else { } else {
if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
...@@ -657,7 +652,7 @@ static int check_object(struct kmem_cache *s, struct page *page, ...@@ -657,7 +652,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
} }
if (s->flags & SLAB_POISON) { if (s->flags & SLAB_POISON) {
if (!active && (s->flags & __OBJECT_POISON) && if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p, (!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) || POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison", !check_bytes_and_report(s, page, p, "Poison",
...@@ -669,7 +664,7 @@ static int check_object(struct kmem_cache *s, struct page *page, ...@@ -669,7 +664,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
check_pad_bytes(s, page, p); check_pad_bytes(s, page, p);
} }
if (!s->offset && active) if (!s->offset && val == SLUB_RED_ACTIVE)
/* /*
* Object and freepointer overlap. Cannot check * Object and freepointer overlap. Cannot check
* freepointer while object is allocated. * freepointer while object is allocated.
...@@ -887,7 +882,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, ...@@ -887,7 +882,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
return; return;
init_object(s, object, 0); init_object(s, object, SLUB_RED_INACTIVE);
init_tracking(s, object); init_tracking(s, object);
} }
...@@ -907,14 +902,14 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa ...@@ -907,14 +902,14 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa
goto bad; goto bad;
} }
if (!check_object(s, page, object, 0)) if (!check_object(s, page, object, SLUB_RED_INACTIVE))
goto bad; goto bad;
/* Success perform special debug activities for allocs */ /* Success perform special debug activities for allocs */
if (s->flags & SLAB_STORE_USER) if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_ALLOC, addr); set_track(s, object, TRACK_ALLOC, addr);
trace(s, page, object, 1); trace(s, page, object, 1);
init_object(s, object, 1); init_object(s, object, SLUB_RED_ACTIVE);
return 1; return 1;
bad: bad:
...@@ -947,7 +942,7 @@ static noinline int free_debug_processing(struct kmem_cache *s, ...@@ -947,7 +942,7 @@ static noinline int free_debug_processing(struct kmem_cache *s,
goto fail; goto fail;
} }
if (!check_object(s, page, object, 1)) if (!check_object(s, page, object, SLUB_RED_ACTIVE))
return 0; return 0;
if (unlikely(s != page->slab)) { if (unlikely(s != page->slab)) {
...@@ -971,7 +966,7 @@ static noinline int free_debug_processing(struct kmem_cache *s, ...@@ -971,7 +966,7 @@ static noinline int free_debug_processing(struct kmem_cache *s,
if (s->flags & SLAB_STORE_USER) if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr); set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0); trace(s, page, object, 0);
init_object(s, object, 0); init_object(s, object, SLUB_RED_INACTIVE);
return 1; return 1;
fail: fail:
...@@ -1075,7 +1070,7 @@ static inline int free_debug_processing(struct kmem_cache *s, ...@@ -1075,7 +1070,7 @@ static inline int free_debug_processing(struct kmem_cache *s,
static inline int slab_pad_check(struct kmem_cache *s, struct page *page) static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; } { return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page, static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) { return 1; } void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache_node *n, struct page *page) {} static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long objsize, static inline unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name, unsigned long flags, const char *name,
...@@ -1235,7 +1230,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1235,7 +1230,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
slab_pad_check(s, page); slab_pad_check(s, page);
for_each_object(p, s, page_address(page), for_each_object(p, s, page_address(page),
page->objects) page->objects)
check_object(s, page, p, 0); check_object(s, page, p, SLUB_RED_INACTIVE);
} }
kmemcheck_free_shadow(page, compound_order(page)); kmemcheck_free_shadow(page, compound_order(page));
...@@ -2143,7 +2138,7 @@ static void early_kmem_cache_node_alloc(int node) ...@@ -2143,7 +2138,7 @@ static void early_kmem_cache_node_alloc(int node)
page->inuse++; page->inuse++;
kmem_cache_node->node[node] = n; kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
init_object(kmem_cache_node, n, 1); init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n); init_tracking(kmem_cache_node, n);
#endif #endif
init_kmem_cache_node(n, kmem_cache_node); init_kmem_cache_node(n, kmem_cache_node);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册