diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 34023c65d4669bc4c4894d392e9d915db3710034..bfee0bd1d43545742504225e0ab0754a315e7aa9 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -64,7 +64,10 @@ struct page { #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; #endif - struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct { + struct kmem_cache *slab; /* SLUB: Pointer to slab */ + void *end; /* SLUB: end marker */ + }; struct page *first_page; /* Compound tail pages */ }; union { diff --git a/mm/slub.c b/mm/slub.c index 2dacaf519c4dcfdde8c537ba9262258efaa9d418..5995626e0cf1720c646409111d0ce7aba43b8ffc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -280,15 +280,32 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) #endif } +/* + * The end pointer in a slab is special. It points to the first object in the + * slab but has bit 0 set to mark it. + * + * Note that SLUB relies on page_mapping returning NULL for pages with bit 0 + * in the mapping set. + */ +static inline int is_end(void *addr) +{ + return (unsigned long)addr & PAGE_MAPPING_ANON; +} + +void *slab_address(struct page *page) +{ + return page->end - PAGE_MAPPING_ANON; +} + static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) { void *base; - if (!object) + if (object == page->end) return 1; - base = page_address(page); + base = slab_address(page); if (object < base || object >= base + s->objects * s->size || (object - base) % s->size) { return 0; @@ -321,7 +338,8 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) /* Scan freelist */ #define for_each_free_object(__p, __s, __free) \ - for (__p = (__free); __p; __p = get_freepointer((__s), __p)) + for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\ + __p)) /* Determine object index from a given position */ static inline int slab_index(void *p, struct kmem_cache *s, void *addr) @@ -473,7 +491,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ - u8 *addr = page_address(page); + u8 *addr = slab_address(page); print_tracking(s, p); @@ -651,7 +669,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) if (!(s->flags & SLAB_POISON)) return 1; - start = page_address(page); + start = slab_address(page); end = start + (PAGE_SIZE << s->order); length = s->objects * s->size; remainder = end - (start + length); @@ -718,7 +736,7 @@ static int check_object(struct kmem_cache *s, struct page *page, * of the free objects in this slab. May cause * another error because the object count is now wrong. */ - set_freepointer(s, p, NULL); + set_freepointer(s, p, page->end); return 0; } return 1; @@ -752,18 +770,18 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) void *fp = page->freelist; void *object = NULL; - while (fp && nr <= s->objects) { + while (fp != page->end && nr <= s->objects) { if (fp == search) return 1; if (!check_valid_pointer(s, page, fp)) { if (object) { object_err(s, page, object, "Freechain corrupt"); - set_freepointer(s, object, NULL); + set_freepointer(s, object, page->end); break; } else { slab_err(s, page, "Freepointer corrupt"); - page->freelist = NULL; + page->freelist = page->end; page->inuse = s->objects; slab_fix(s, "Freelist cleared"); return 0; @@ -869,7 +887,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page, */ slab_fix(s, "Marking all objects used"); page->inuse = s->objects; - page->freelist = NULL; + page->freelist = page->end; } return 0; } @@ -910,7 +928,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, } /* Special debug activities for freeing objects */ - if (!SlabFrozen(page) && !page->freelist) + if (!SlabFrozen(page) && page->freelist == page->end) remove_full(s, page); if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_FREE, addr); @@ -1102,6 +1120,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) SetSlabDebug(page); start = page_address(page); + page->end = start + 1; if (unlikely(s->flags & SLAB_POISON)) memset(start, POISON_INUSE, PAGE_SIZE << s->order); @@ -1113,7 +1132,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) last = p; } setup_object(s, page, last); - set_freepointer(s, last, NULL); + set_freepointer(s, last, page->end); page->freelist = start; page->inuse = 0; @@ -1129,7 +1148,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) void *p; slab_pad_check(s, page); - for_each_object(p, s, page_address(page)) + for_each_object(p, s, slab_address(page)) check_object(s, page, p, 0); ClearSlabDebug(page); } @@ -1139,6 +1158,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, -pages); + page->mapping = NULL; __free_pages(page, s->order); } @@ -1341,7 +1361,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) ClearSlabFrozen(page); if (page->inuse) { - if (page->freelist) + if (page->freelist != page->end) add_partial(n, page, tail); else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) add_full(n, page); @@ -1377,8 +1397,12 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) * Merge cpu freelist into freelist. Typically we get here * because both freelists are empty. So this is unlikely * to occur. + * + * We need to use _is_end here because deactivate slab may + * be called for a debug slab. Then c->freelist may contain + * a dummy pointer. */ - while (unlikely(c->freelist)) { + while (unlikely(!is_end(c->freelist))) { void **object; tail = 0; /* Hot objects. Put the slab first */ @@ -1478,7 +1502,7 @@ static void *__slab_alloc(struct kmem_cache *s, goto another_slab; load_freelist: object = c->page->freelist; - if (unlikely(!object)) + if (unlikely(object == c->page->end)) goto another_slab; if (unlikely(SlabDebug(c->page))) goto debug; @@ -1486,7 +1510,7 @@ static void *__slab_alloc(struct kmem_cache *s, object = c->page->freelist; c->freelist = object[c->offset]; c->page->inuse = s->objects; - c->page->freelist = NULL; + c->page->freelist = c->page->end; c->node = page_to_nid(c->page); slab_unlock(c->page); return object; @@ -1550,7 +1574,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->freelist || !node_match(c, node))) + if (unlikely(is_end(c->freelist) || !node_match(c, node))) object = __slab_alloc(s, gfpflags, node, addr, c); @@ -1614,7 +1638,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * was not on the partial list before * then add it. */ - if (unlikely(!prior)) + if (unlikely(prior == page->end)) add_partial(get_node(s, page_to_nid(page)), page, 1); out_unlock: @@ -1622,7 +1646,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, return; slab_empty: - if (prior) + if (prior != page->end) /* * Slab still on the partial list. */ @@ -1842,7 +1866,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, struct kmem_cache_cpu *c) { c->page = NULL; - c->freelist = NULL; + c->freelist = (void *)PAGE_MAPPING_ANON; c->node = 0; c->offset = s->offset / sizeof(void *); c->objsize = s->objsize; @@ -3105,7 +3129,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { void *p; - void *addr = page_address(page); + void *addr = slab_address(page); if (!check_slab(s, page) || !on_freelist(s, page, NULL)) @@ -3385,7 +3409,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, static void process_slab(struct loc_track *t, struct kmem_cache *s, struct page *page, enum track_item alloc) { - void *addr = page_address(page); + void *addr = slab_address(page); DECLARE_BITMAP(map, s->objects); void *p;