提交 2ed3a4ef 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] slab: do not panic when alloc_kmemlist fails and slab is up

It is fairly easy to get a system to oops by simply sizing a cache via
/proc in such a way that one of the chaches (shared is easiest) becomes
bigger than the maximum allowed slab allocation size.  This occurs because
enable_cpucache() fails if it cannot reallocate some caches.

However, enable_cpucache() is used for multiple purposes: resizing caches,
cache creation and bootstrap.

If the slab is already up then we already have working caches.  The resize
can fail without a problem.  We just need to return the proper error code.
F.e.  after this patch:

# echo "size-64 10000 50 1000" >/proc/slabinfo
-bash: echo: write error: Cannot allocate memory

notice no OOPS.

If we are doing a kmem_cache_create() then we also should not panic but
return -ENOMEM.

If on the other hand we do not have a fully bootstrapped slab allocator yet
then we should indeed panic since we are unable to bring up the slab to its
full functionality.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 117f6eb1
...@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
struct kmem_list3 *l3, int tofree); struct kmem_list3 *l3, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len, static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node); int node);
static void enable_cpucache(struct kmem_cache *cachep); static int enable_cpucache(struct kmem_cache *cachep);
static void cache_reap(void *unused); static void cache_reap(void *unused);
/* /*
...@@ -1490,7 +1490,8 @@ void __init kmem_cache_init(void) ...@@ -1490,7 +1490,8 @@ void __init kmem_cache_init(void)
struct kmem_cache *cachep; struct kmem_cache *cachep;
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep); if (enable_cpucache(cachep))
BUG();
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
} }
...@@ -1924,12 +1925,11 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, ...@@ -1924,12 +1925,11 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
return left_over; return left_over;
} }
static void setup_cpu_cache(struct kmem_cache *cachep) static int setup_cpu_cache(struct kmem_cache *cachep)
{ {
if (g_cpucache_up == FULL) { if (g_cpucache_up == FULL)
enable_cpucache(cachep); return enable_cpucache(cachep);
return;
}
if (g_cpucache_up == NONE) { if (g_cpucache_up == NONE) {
/* /*
* Note: the first kmem_cache_create must create the cache * Note: the first kmem_cache_create must create the cache
...@@ -1976,6 +1976,7 @@ static void setup_cpu_cache(struct kmem_cache *cachep) ...@@ -1976,6 +1976,7 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
cpu_cache_get(cachep)->touched = 0; cpu_cache_get(cachep)->touched = 0;
cachep->batchcount = 1; cachep->batchcount = 1;
cachep->limit = BOOT_CPUCACHE_ENTRIES; cachep->limit = BOOT_CPUCACHE_ENTRIES;
return 0;
} }
/** /**
...@@ -2242,8 +2243,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2242,8 +2243,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->dtor = dtor; cachep->dtor = dtor;
cachep->name = name; cachep->name = name;
if (setup_cpu_cache(cachep)) {
setup_cpu_cache(cachep); __kmem_cache_destroy(cachep);
cachep = NULL;
goto oops;
}
/* cache setup completed, link it into the list */ /* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain); list_add(&cachep->next, &cache_chain);
...@@ -3693,7 +3697,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, ...@@ -3693,7 +3697,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared) int batchcount, int shared)
{ {
struct ccupdate_struct new; struct ccupdate_struct new;
int i, err; int i;
memset(&new.new, 0, sizeof(new.new)); memset(&new.new, 0, sizeof(new.new));
for_each_online_cpu(i) { for_each_online_cpu(i) {
...@@ -3724,17 +3728,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, ...@@ -3724,17 +3728,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
kfree(ccold); kfree(ccold);
} }
err = alloc_kmemlist(cachep); return alloc_kmemlist(cachep);
if (err) {
printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
cachep->name, -err);
BUG();
}
return 0;
} }
/* Called with cache_chain_mutex held always */ /* Called with cache_chain_mutex held always */
static void enable_cpucache(struct kmem_cache *cachep) static int enable_cpucache(struct kmem_cache *cachep)
{ {
int err; int err;
int limit, shared; int limit, shared;
...@@ -3786,6 +3784,7 @@ static void enable_cpucache(struct kmem_cache *cachep) ...@@ -3786,6 +3784,7 @@ static void enable_cpucache(struct kmem_cache *cachep)
if (err) if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
cachep->name, -err); cachep->name, -err);
return err;
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册