提交 8a13a4cc 编写于 作者: C Christoph Lameter 提交者: Pekka Enberg

mm/sl[aou]b: Shrink __kmem_cache_create() parameter lists

Do the initial settings of the fields in common code. This will allow us
to push more processing into common code later and improve readability.
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NPekka Enberg <penberg@kernel.org>
上级 278b1bb1
...@@ -1677,20 +1677,20 @@ void __init kmem_cache_init(void) ...@@ -1677,20 +1677,20 @@ void __init kmem_cache_init(void)
*/ */
sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
sizes[INDEX_AC].cs_size, sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
ARCH_KMALLOC_MINALIGN, sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
ARCH_KMALLOC_FLAGS|SLAB_PANIC, sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
NULL); __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
if (INDEX_AC != INDEX_L3) { if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
__kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
sizes[INDEX_L3].cs_size, sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
ARCH_KMALLOC_MINALIGN, sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
ARCH_KMALLOC_FLAGS|SLAB_PANIC, sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
NULL); __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
} }
...@@ -1706,22 +1706,21 @@ void __init kmem_cache_init(void) ...@@ -1706,22 +1706,21 @@ void __init kmem_cache_init(void)
*/ */
if (!sizes->cs_cachep) { if (!sizes->cs_cachep) {
sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
__kmem_cache_create(sizes->cs_cachep, names->name, sizes->cs_cachep->name = names->name;
sizes->cs_size, sizes->cs_cachep->size = sizes->cs_size;
ARCH_KMALLOC_MINALIGN, sizes->cs_cachep->object_size = sizes->cs_size;
ARCH_KMALLOC_FLAGS|SLAB_PANIC, sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
NULL); __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes->cs_cachep->list, &slab_caches); list_add(&sizes->cs_cachep->list, &slab_caches);
} }
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
sizes->cs_dmacachep->name = names->name_dma;
sizes->cs_dmacachep->size = sizes->cs_size;
sizes->cs_dmacachep->object_size = sizes->cs_size;
sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
__kmem_cache_create(sizes->cs_dmacachep, __kmem_cache_create(sizes->cs_dmacachep,
names->name_dma, ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL);
list_add(&sizes->cs_dmacachep->list, &slab_caches); list_add(&sizes->cs_dmacachep->list, &slab_caches);
#endif #endif
sizes++; sizes++;
...@@ -2360,12 +2359,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2360,12 +2359,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
* as davem. * as davem.
*/ */
int int
__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
unsigned long flags, void (*ctor)(void *))
{ {
size_t left_over, slab_size, ralign; size_t left_over, slab_size, ralign;
gfp_t gfp; gfp_t gfp;
int err; int err;
size_t size = cachep->size;
#if DEBUG #if DEBUG
#if FORCED_DEBUG #if FORCED_DEBUG
...@@ -2437,8 +2436,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ...@@ -2437,8 +2436,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
ralign = ARCH_SLAB_MINALIGN; ralign = ARCH_SLAB_MINALIGN;
} }
/* 3) caller mandated alignment */ /* 3) caller mandated alignment */
if (ralign < align) { if (ralign < cachep->align) {
ralign = align; ralign = cachep->align;
} }
/* disable debug if necessary */ /* disable debug if necessary */
if (ralign > __alignof__(unsigned long long)) if (ralign > __alignof__(unsigned long long))
...@@ -2446,7 +2445,7 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ...@@ -2446,7 +2445,7 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
/* /*
* 4) Store it. * 4) Store it.
*/ */
align = ralign; cachep->align = ralign;
if (slab_is_available()) if (slab_is_available())
gfp = GFP_KERNEL; gfp = GFP_KERNEL;
...@@ -2454,8 +2453,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ...@@ -2454,8 +2453,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
gfp = GFP_NOWAIT; gfp = GFP_NOWAIT;
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
cachep->object_size = size;
cachep->align = align;
#if DEBUG #if DEBUG
/* /*
...@@ -2500,17 +2497,15 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ...@@ -2500,17 +2497,15 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
*/ */
flags |= CFLGS_OFF_SLAB; flags |= CFLGS_OFF_SLAB;
size = ALIGN(size, align); size = ALIGN(size, cachep->align);
left_over = calculate_slab_order(cachep, size, align, flags); left_over = calculate_slab_order(cachep, size, cachep->align, flags);
if (!cachep->num) { if (!cachep->num)
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
return -E2BIG; return -E2BIG;
}
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align); + sizeof(struct slab), cachep->align);
/* /*
* If the slab has been placed off-slab, and we have enough space then * If the slab has been placed off-slab, and we have enough space then
...@@ -2538,8 +2533,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ...@@ -2538,8 +2533,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
cachep->colour_off = cache_line_size(); cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */ /* Offset must be a multiple of the alignment. */
if (cachep->colour_off < align) if (cachep->colour_off < cachep->align)
cachep->colour_off = align; cachep->colour_off = cachep->align;
cachep->colour = left_over / cachep->colour_off; cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size; cachep->slab_size = slab_size;
cachep->flags = flags; cachep->flags = flags;
...@@ -2560,8 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ...@@ -2560,8 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
*/ */
BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
} }
cachep->ctor = ctor;
cachep->name = name;
cachep->refcount = 1; cachep->refcount = 1;
err = setup_cpu_cache(cachep, gfp); err = setup_cpu_cache(cachep, gfp);
......
...@@ -33,8 +33,7 @@ extern struct list_head slab_caches; ...@@ -33,8 +33,7 @@ extern struct list_head slab_caches;
extern struct kmem_cache *kmem_cache; extern struct kmem_cache *kmem_cache;
/* Functions provided by the slab allocators */ /* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, const char *name, extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
size_t size, size_t align, unsigned long flags, void (*ctor)(void *));
#ifdef CONFIG_SLUB #ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
......
...@@ -100,7 +100,6 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align ...@@ -100,7 +100,6 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
{ {
struct kmem_cache *s = NULL; struct kmem_cache *s = NULL;
int err = 0; int err = 0;
char *n;
get_online_cpus(); get_online_cpus();
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
...@@ -109,32 +108,33 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align ...@@ -109,32 +108,33 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
goto out_locked; goto out_locked;
n = kstrdup(name, GFP_KERNEL);
if (!n) {
err = -ENOMEM;
goto out_locked;
}
s = __kmem_cache_alias(name, size, align, flags, ctor); s = __kmem_cache_alias(name, size, align, flags, ctor);
if (s) if (s)
goto out_locked; goto out_locked;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (s) { if (s) {
err = __kmem_cache_create(s, n, size, align, flags, ctor); s->object_size = s->size = size;
s->align = align;
s->ctor = ctor;
s->name = kstrdup(name, GFP_KERNEL);
if (!s->name) {
kmem_cache_free(kmem_cache, s);
err = -ENOMEM;
goto out_locked;
}
err = __kmem_cache_create(s, flags);
if (!err) if (!err)
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
else { else {
kfree(n); kfree(s->name);
kmem_cache_free(kmem_cache, s); kmem_cache_free(kmem_cache, s);
} }
} else
} else {
kfree(n);
err = -ENOMEM; err = -ENOMEM;
}
out_locked: out_locked:
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
......
...@@ -508,17 +508,15 @@ size_t ksize(const void *block) ...@@ -508,17 +508,15 @@ size_t ksize(const void *block)
} }
EXPORT_SYMBOL(ksize); EXPORT_SYMBOL(ksize);
int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size, int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
size_t align, unsigned long flags, void (*ctor)(void *))
{ {
c->name = name; size_t align = c->size;
c->size = size;
if (flags & SLAB_DESTROY_BY_RCU) { if (flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */ /* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu); c->size += sizeof(struct slob_rcu);
} }
c->flags = flags; c->flags = flags;
c->ctor = ctor;
/* ignore alignment unless it's forced */ /* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN) if (c->align < ARCH_SLAB_MINALIGN)
......
...@@ -3029,16 +3029,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3029,16 +3029,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
} }
static int kmem_cache_open(struct kmem_cache *s, static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *))
{ {
s->name = name; s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->ctor = ctor;
s->object_size = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
s->reserved = 0; s->reserved = 0;
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
...@@ -3115,7 +3108,7 @@ static int kmem_cache_open(struct kmem_cache *s, ...@@ -3115,7 +3108,7 @@ static int kmem_cache_open(struct kmem_cache *s,
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u " panic("Cannot create slab %s size=%lu realsize=%u "
"order=%u offset=%u flags=%lx\n", "order=%u offset=%u flags=%lx\n",
s->name, (unsigned long)size, s->size, oo_order(s->oo), s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
s->offset, flags); s->offset, flags);
return -EINVAL; return -EINVAL;
} }
...@@ -3261,12 +3254,15 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, ...@@ -3261,12 +3254,15 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
s->name = name;
s->size = s->object_size = size;
s->align = ARCH_KMALLOC_MINALIGN;
/* /*
* This function is called with IRQs disabled during early-boot on * This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slab_mutex here. * single CPU so there's no need to take slab_mutex here.
*/ */
if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, if (kmem_cache_open(s, flags))
flags, NULL))
goto panic; goto panic;
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
...@@ -3719,9 +3715,10 @@ void __init kmem_cache_init(void) ...@@ -3719,9 +3715,10 @@ void __init kmem_cache_init(void)
*/ */
kmem_cache_node = (void *)kmem_cache + kmalloc_size; kmem_cache_node = (void *)kmem_cache + kmalloc_size;
kmem_cache_open(kmem_cache_node, "kmem_cache_node", kmem_cache_node->name = "kmem_cache_node";
sizeof(struct kmem_cache_node), kmem_cache_node->size = kmem_cache_node->object_size =
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); sizeof(struct kmem_cache_node);
kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
...@@ -3729,8 +3726,10 @@ void __init kmem_cache_init(void) ...@@ -3729,8 +3726,10 @@ void __init kmem_cache_init(void)
slab_state = PARTIAL; slab_state = PARTIAL;
temp_kmem_cache = kmem_cache; temp_kmem_cache = kmem_cache;
kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, kmem_cache->name = "kmem_cache";
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); kmem_cache->size = kmem_cache->object_size = kmem_size;
kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
memcpy(kmem_cache, temp_kmem_cache, kmem_size); memcpy(kmem_cache, temp_kmem_cache, kmem_size);
...@@ -3943,11 +3942,9 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, ...@@ -3943,11 +3942,9 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
return s; return s;
} }
int __kmem_cache_create(struct kmem_cache *s, int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{ {
return kmem_cache_open(s, name, size, align, flags, ctor); return kmem_cache_open(s, flags);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册