提交 434e245d 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

SLUB: Do not allocate object bit array on stack

The objects per slab increase with the current patches in mm since we allow up
to order 3 allocs by default.  More patches in mm actually allow to use 2M or
higher sized slabs.  For slab validation we need per object bitmaps in order
to check a slab.  We end up with up to 64k objects per slab resulting in a
potential requirement of 8K stack space.  That does not look good.

Allocate the bit arrays via kmalloc.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 94f6030c
...@@ -2764,11 +2764,11 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -2764,11 +2764,11 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
} }
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page) static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{ {
void *p; void *p;
void *addr = page_address(page); void *addr = page_address(page);
DECLARE_BITMAP(map, s->objects);
if (!check_slab(s, page) || if (!check_slab(s, page) ||
!on_freelist(s, page, NULL)) !on_freelist(s, page, NULL))
...@@ -2790,10 +2790,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page) ...@@ -2790,10 +2790,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page)
return 1; return 1;
} }
static void validate_slab_slab(struct kmem_cache *s, struct page *page) static void validate_slab_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{ {
if (slab_trylock(page)) { if (slab_trylock(page)) {
validate_slab(s, page); validate_slab(s, page, map);
slab_unlock(page); slab_unlock(page);
} else } else
printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
...@@ -2810,7 +2811,8 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page) ...@@ -2810,7 +2811,8 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page)
} }
} }
static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) static int validate_slab_node(struct kmem_cache *s,
struct kmem_cache_node *n, unsigned long *map)
{ {
unsigned long count = 0; unsigned long count = 0;
struct page *page; struct page *page;
...@@ -2819,7 +2821,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -2819,7 +2821,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru) { list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page); validate_slab_slab(s, page, map);
count++; count++;
} }
if (count != n->nr_partial) if (count != n->nr_partial)
...@@ -2830,7 +2832,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -2830,7 +2832,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
goto out; goto out;
list_for_each_entry(page, &n->full, lru) { list_for_each_entry(page, &n->full, lru) {
validate_slab_slab(s, page); validate_slab_slab(s, page, map);
count++; count++;
} }
if (count != atomic_long_read(&n->nr_slabs)) if (count != atomic_long_read(&n->nr_slabs))
...@@ -2843,17 +2845,23 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -2843,17 +2845,23 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
return count; return count;
} }
static unsigned long validate_slab_cache(struct kmem_cache *s) static long validate_slab_cache(struct kmem_cache *s)
{ {
int node; int node;
unsigned long count = 0; unsigned long count = 0;
unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
sizeof(unsigned long), GFP_KERNEL);
if (!map)
return -ENOMEM;
flush_all(s); flush_all(s);
for_each_online_node(node) { for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node); struct kmem_cache_node *n = get_node(s, node);
count += validate_slab_node(s, n); count += validate_slab_node(s, n, map);
} }
kfree(map);
return count; return count;
} }
...@@ -3467,11 +3475,14 @@ static ssize_t validate_show(struct kmem_cache *s, char *buf) ...@@ -3467,11 +3475,14 @@ static ssize_t validate_show(struct kmem_cache *s, char *buf)
static ssize_t validate_store(struct kmem_cache *s, static ssize_t validate_store(struct kmem_cache *s,
const char *buf, size_t length) const char *buf, size_t length)
{ {
if (buf[0] == '1') int ret = -EINVAL;
validate_slab_cache(s);
else if (buf[0] == '1') {
return -EINVAL; ret = validate_slab_cache(s);
return length; if (ret >= 0)
ret = length;
}
return ret;
} }
SLAB_ATTR(validate); SLAB_ATTR(validate);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册