提交 ce8eb6c4 编写于 作者: C Christoph Lameter 提交者: Pekka Enberg

slab: Rename list3/l3 to node

The list3 or l3 pointers are pointing to per node structures. Reflect
that in the names of variables used.
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NPekka Enberg <penberg@kernel.org>
上级 2c59dd65
...@@ -306,13 +306,13 @@ struct kmem_cache_node { ...@@ -306,13 +306,13 @@ struct kmem_cache_node {
* Need this for bootstrapping a per node allocator. * Need this for bootstrapping a per node allocator.
*/ */
#define NUM_INIT_LISTS (3 * MAX_NUMNODES) #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
static struct kmem_cache_node __initdata initkmem_list3[NUM_INIT_LISTS]; static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
#define CACHE_CACHE 0 #define CACHE_CACHE 0
#define SIZE_AC MAX_NUMNODES #define SIZE_AC MAX_NUMNODES
#define SIZE_L3 (2 * MAX_NUMNODES) #define SIZE_NODE (2 * MAX_NUMNODES)
static int drain_freelist(struct kmem_cache *cache, static int drain_freelist(struct kmem_cache *cache,
struct kmem_cache_node *l3, int tofree); struct kmem_cache_node *n, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len, static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node); int node);
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
...@@ -321,9 +321,9 @@ static void cache_reap(struct work_struct *unused); ...@@ -321,9 +321,9 @@ static void cache_reap(struct work_struct *unused);
static int slab_early_init = 1; static int slab_early_init = 1;
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
#define INDEX_L3 kmalloc_index(sizeof(struct kmem_cache_node)) #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
static void kmem_list3_init(struct kmem_cache_node *parent) static void kmem_cache_node_init(struct kmem_cache_node *parent)
{ {
INIT_LIST_HEAD(&parent->slabs_full); INIT_LIST_HEAD(&parent->slabs_full);
INIT_LIST_HEAD(&parent->slabs_partial); INIT_LIST_HEAD(&parent->slabs_partial);
...@@ -538,15 +538,15 @@ static void slab_set_lock_classes(struct kmem_cache *cachep, ...@@ -538,15 +538,15 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
int q) int q)
{ {
struct array_cache **alc; struct array_cache **alc;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
int r; int r;
l3 = cachep->node[q]; n = cachep->node[q];
if (!l3) if (!n)
return; return;
lockdep_set_class(&l3->list_lock, l3_key); lockdep_set_class(&n->list_lock, l3_key);
alc = l3->alien; alc = n->alien;
/* /*
* FIXME: This check for BAD_ALIEN_MAGIC * FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to * should go away when common slab code is taught to
...@@ -583,14 +583,14 @@ static void init_node_lock_keys(int q) ...@@ -583,14 +583,14 @@ static void init_node_lock_keys(int q)
return; return;
for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
struct kmem_cache_node *l3; struct kmem_cache_node *n;
struct kmem_cache *cache = kmalloc_caches[i]; struct kmem_cache *cache = kmalloc_caches[i];
if (!cache) if (!cache)
continue; continue;
l3 = cache->node[q]; n = cache->node[q];
if (!l3 || OFF_SLAB(cache)) if (!n || OFF_SLAB(cache))
continue; continue;
slab_set_lock_classes(cache, &on_slab_l3_key, slab_set_lock_classes(cache, &on_slab_l3_key,
...@@ -857,29 +857,29 @@ static inline bool is_slab_pfmemalloc(struct slab *slabp) ...@@ -857,29 +857,29 @@ static inline bool is_slab_pfmemalloc(struct slab *slabp)
static void recheck_pfmemalloc_active(struct kmem_cache *cachep, static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
struct array_cache *ac) struct array_cache *ac)
{ {
struct kmem_cache_node *l3 = cachep->node[numa_mem_id()]; struct kmem_cache_node *n = cachep->node[numa_mem_id()];
struct slab *slabp; struct slab *slabp;
unsigned long flags; unsigned long flags;
if (!pfmemalloc_active) if (!pfmemalloc_active)
return; return;
spin_lock_irqsave(&l3->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(slabp, &l3->slabs_full, list) list_for_each_entry(slabp, &n->slabs_full, list)
if (is_slab_pfmemalloc(slabp)) if (is_slab_pfmemalloc(slabp))
goto out; goto out;
list_for_each_entry(slabp, &l3->slabs_partial, list) list_for_each_entry(slabp, &n->slabs_partial, list)
if (is_slab_pfmemalloc(slabp)) if (is_slab_pfmemalloc(slabp))
goto out; goto out;
list_for_each_entry(slabp, &l3->slabs_free, list) list_for_each_entry(slabp, &n->slabs_free, list)
if (is_slab_pfmemalloc(slabp)) if (is_slab_pfmemalloc(slabp))
goto out; goto out;
pfmemalloc_active = false; pfmemalloc_active = false;
out: out:
spin_unlock_irqrestore(&l3->list_lock, flags); spin_unlock_irqrestore(&n->list_lock, flags);
} }
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
...@@ -890,7 +890,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, ...@@ -890,7 +890,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
if (unlikely(is_obj_pfmemalloc(objp))) { if (unlikely(is_obj_pfmemalloc(objp))) {
struct kmem_cache_node *l3; struct kmem_cache_node *n;
if (gfp_pfmemalloc_allowed(flags)) { if (gfp_pfmemalloc_allowed(flags)) {
clear_obj_pfmemalloc(&objp); clear_obj_pfmemalloc(&objp);
...@@ -912,8 +912,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, ...@@ -912,8 +912,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
* If there are empty slabs on the slabs_free list and we are * If there are empty slabs on the slabs_free list and we are
* being forced to refill the cache, mark this one !pfmemalloc. * being forced to refill the cache, mark this one !pfmemalloc.
*/ */
l3 = cachep->node[numa_mem_id()]; n = cachep->node[numa_mem_id()];
if (!list_empty(&l3->slabs_free) && force_refill) { if (!list_empty(&n->slabs_free) && force_refill) {
struct slab *slabp = virt_to_slab(objp); struct slab *slabp = virt_to_slab(objp);
ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
clear_obj_pfmemalloc(&objp); clear_obj_pfmemalloc(&objp);
...@@ -990,7 +990,7 @@ static int transfer_objects(struct array_cache *to, ...@@ -990,7 +990,7 @@ static int transfer_objects(struct array_cache *to,
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
#define drain_alien_cache(cachep, alien) do { } while (0) #define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0) #define reap_alien(cachep, n) do { } while (0)
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{ {
...@@ -1062,33 +1062,33 @@ static void free_alien_cache(struct array_cache **ac_ptr) ...@@ -1062,33 +1062,33 @@ static void free_alien_cache(struct array_cache **ac_ptr)
static void __drain_alien_cache(struct kmem_cache *cachep, static void __drain_alien_cache(struct kmem_cache *cachep,
struct array_cache *ac, int node) struct array_cache *ac, int node)
{ {
struct kmem_cache_node *rl3 = cachep->node[node]; struct kmem_cache_node *n = cachep->node[node];
if (ac->avail) { if (ac->avail) {
spin_lock(&rl3->list_lock); spin_lock(&n->list_lock);
/* /*
* Stuff objects into the remote nodes shared array first. * Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects * That way we could avoid the overhead of putting the objects
* into the free lists and getting them back later. * into the free lists and getting them back later.
*/ */
if (rl3->shared) if (n->shared)
transfer_objects(rl3->shared, ac, ac->limit); transfer_objects(n->shared, ac, ac->limit);
free_block(cachep, ac->entry, ac->avail, node); free_block(cachep, ac->entry, ac->avail, node);
ac->avail = 0; ac->avail = 0;
spin_unlock(&rl3->list_lock); spin_unlock(&n->list_lock);
} }
} }
/* /*
* Called from cache_reap() to regularly drain alien caches round robin. * Called from cache_reap() to regularly drain alien caches round robin.
*/ */
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *l3) static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
{ {
int node = __this_cpu_read(slab_reap_node); int node = __this_cpu_read(slab_reap_node);
if (l3->alien) { if (n->alien) {
struct array_cache *ac = l3->alien[node]; struct array_cache *ac = n->alien[node];
if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
__drain_alien_cache(cachep, ac, node); __drain_alien_cache(cachep, ac, node);
...@@ -1118,7 +1118,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) ...@@ -1118,7 +1118,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{ {
struct slab *slabp = virt_to_slab(objp); struct slab *slabp = virt_to_slab(objp);
int nodeid = slabp->nodeid; int nodeid = slabp->nodeid;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
struct array_cache *alien = NULL; struct array_cache *alien = NULL;
int node; int node;
...@@ -1131,10 +1131,10 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) ...@@ -1131,10 +1131,10 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
if (likely(slabp->nodeid == node)) if (likely(slabp->nodeid == node))
return 0; return 0;
l3 = cachep->node[node]; n = cachep->node[node];
STATS_INC_NODEFREES(cachep); STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) { if (n->alien && n->alien[nodeid]) {
alien = l3->alien[nodeid]; alien = n->alien[nodeid];
spin_lock(&alien->lock); spin_lock(&alien->lock);
if (unlikely(alien->avail == alien->limit)) { if (unlikely(alien->avail == alien->limit)) {
STATS_INC_ACOVERFLOW(cachep); STATS_INC_ACOVERFLOW(cachep);
...@@ -1153,7 +1153,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) ...@@ -1153,7 +1153,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
/* /*
* Allocates and initializes node for a node on each slab cache, used for * Allocates and initializes node for a node on each slab cache, used for
* either memory or cpu hotplug. If memory is being hot-added, the kmem_list3 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
* will be allocated off-node since memory is not yet online for the new node. * will be allocated off-node since memory is not yet online for the new node.
* When hotplugging memory or a cpu, existing node are not replaced if * When hotplugging memory or a cpu, existing node are not replaced if
* already in use. * already in use.
...@@ -1163,7 +1163,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) ...@@ -1163,7 +1163,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
static int init_cache_node_node(int node) static int init_cache_node_node(int node)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
const int memsize = sizeof(struct kmem_cache_node); const int memsize = sizeof(struct kmem_cache_node);
list_for_each_entry(cachep, &slab_caches, list) { list_for_each_entry(cachep, &slab_caches, list) {
...@@ -1173,11 +1173,11 @@ static int init_cache_node_node(int node) ...@@ -1173,11 +1173,11 @@ static int init_cache_node_node(int node)
* node has not already allocated this * node has not already allocated this
*/ */
if (!cachep->node[node]) { if (!cachep->node[node]) {
l3 = kmalloc_node(memsize, GFP_KERNEL, node); n = kmalloc_node(memsize, GFP_KERNEL, node);
if (!l3) if (!n)
return -ENOMEM; return -ENOMEM;
kmem_list3_init(l3); kmem_cache_node_init(n);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3; ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
/* /*
...@@ -1185,7 +1185,7 @@ static int init_cache_node_node(int node) ...@@ -1185,7 +1185,7 @@ static int init_cache_node_node(int node)
* go. slab_mutex is sufficient * go. slab_mutex is sufficient
* protection here. * protection here.
*/ */
cachep->node[node] = l3; cachep->node[node] = n;
} }
spin_lock_irq(&cachep->node[node]->list_lock); spin_lock_irq(&cachep->node[node]->list_lock);
...@@ -1200,7 +1200,7 @@ static int init_cache_node_node(int node) ...@@ -1200,7 +1200,7 @@ static int init_cache_node_node(int node)
static void __cpuinit cpuup_canceled(long cpu) static void __cpuinit cpuup_canceled(long cpu)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
struct kmem_cache_node *l3 = NULL; struct kmem_cache_node *n = NULL;
int node = cpu_to_mem(cpu); int node = cpu_to_mem(cpu);
const struct cpumask *mask = cpumask_of_node(node); const struct cpumask *mask = cpumask_of_node(node);
...@@ -1212,34 +1212,34 @@ static void __cpuinit cpuup_canceled(long cpu) ...@@ -1212,34 +1212,34 @@ static void __cpuinit cpuup_canceled(long cpu)
/* cpu is dead; no one can alloc from it. */ /* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu]; nc = cachep->array[cpu];
cachep->array[cpu] = NULL; cachep->array[cpu] = NULL;
l3 = cachep->node[node]; n = cachep->node[node];
if (!l3) if (!n)
goto free_array_cache; goto free_array_cache;
spin_lock_irq(&l3->list_lock); spin_lock_irq(&n->list_lock);
/* Free limit for this kmem_list3 */ /* Free limit for this kmem_cache_node */
l3->free_limit -= cachep->batchcount; n->free_limit -= cachep->batchcount;
if (nc) if (nc)
free_block(cachep, nc->entry, nc->avail, node); free_block(cachep, nc->entry, nc->avail, node);
if (!cpumask_empty(mask)) { if (!cpumask_empty(mask)) {
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
goto free_array_cache; goto free_array_cache;
} }
shared = l3->shared; shared = n->shared;
if (shared) { if (shared) {
free_block(cachep, shared->entry, free_block(cachep, shared->entry,
shared->avail, node); shared->avail, node);
l3->shared = NULL; n->shared = NULL;
} }
alien = l3->alien; alien = n->alien;
l3->alien = NULL; n->alien = NULL;
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
kfree(shared); kfree(shared);
if (alien) { if (alien) {
...@@ -1255,17 +1255,17 @@ static void __cpuinit cpuup_canceled(long cpu) ...@@ -1255,17 +1255,17 @@ static void __cpuinit cpuup_canceled(long cpu)
* shrink each nodelist to its limit. * shrink each nodelist to its limit.
*/ */
list_for_each_entry(cachep, &slab_caches, list) { list_for_each_entry(cachep, &slab_caches, list) {
l3 = cachep->node[node]; n = cachep->node[node];
if (!l3) if (!n)
continue; continue;
drain_freelist(cachep, l3, l3->free_objects); drain_freelist(cachep, n, n->free_objects);
} }
} }
static int __cpuinit cpuup_prepare(long cpu) static int __cpuinit cpuup_prepare(long cpu)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
struct kmem_cache_node *l3 = NULL; struct kmem_cache_node *n = NULL;
int node = cpu_to_mem(cpu); int node = cpu_to_mem(cpu);
int err; int err;
...@@ -1273,7 +1273,7 @@ static int __cpuinit cpuup_prepare(long cpu) ...@@ -1273,7 +1273,7 @@ static int __cpuinit cpuup_prepare(long cpu)
* We need to do this right in the beginning since * We need to do this right in the beginning since
* alloc_arraycache's are going to use this list. * alloc_arraycache's are going to use this list.
* kmalloc_node allows us to add the slab to the right * kmalloc_node allows us to add the slab to the right
* kmem_list3 and not this cpu's kmem_list3 * kmem_cache_node and not this cpu's kmem_cache_node
*/ */
err = init_cache_node_node(node); err = init_cache_node_node(node);
if (err < 0) if (err < 0)
...@@ -1310,25 +1310,25 @@ static int __cpuinit cpuup_prepare(long cpu) ...@@ -1310,25 +1310,25 @@ static int __cpuinit cpuup_prepare(long cpu)
} }
} }
cachep->array[cpu] = nc; cachep->array[cpu] = nc;
l3 = cachep->node[node]; n = cachep->node[node];
BUG_ON(!l3); BUG_ON(!n);
spin_lock_irq(&l3->list_lock); spin_lock_irq(&n->list_lock);
if (!l3->shared) { if (!n->shared) {
/* /*
* We are serialised from CPU_DEAD or * We are serialised from CPU_DEAD or
* CPU_UP_CANCELLED by the cpucontrol lock * CPU_UP_CANCELLED by the cpucontrol lock
*/ */
l3->shared = shared; n->shared = shared;
shared = NULL; shared = NULL;
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (!l3->alien) { if (!n->alien) {
l3->alien = alien; n->alien = alien;
alien = NULL; alien = NULL;
} }
#endif #endif
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
kfree(shared); kfree(shared);
free_alien_cache(alien); free_alien_cache(alien);
if (cachep->flags & SLAB_DEBUG_OBJECTS) if (cachep->flags & SLAB_DEBUG_OBJECTS)
...@@ -1383,9 +1383,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, ...@@ -1383,9 +1383,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
/* /*
* Even if all the cpus of a node are down, we don't free the * Even if all the cpus of a node are down, we don't free the
* kmem_list3 of any cache. This to avoid a race between * kmem_cache_node of any cache. This to avoid a race between
* cpu_down, and a kmalloc allocation from another cpu for * cpu_down, and a kmalloc allocation from another cpu for
* memory from the node of the cpu going down. The list3 * memory from the node of the cpu going down. The node
* structure is usually allocated from kmem_cache_create() and * structure is usually allocated from kmem_cache_create() and
* gets destroyed at kmem_cache_destroy(). * gets destroyed at kmem_cache_destroy().
*/ */
...@@ -1419,16 +1419,16 @@ static int __meminit drain_cache_node_node(int node) ...@@ -1419,16 +1419,16 @@ static int __meminit drain_cache_node_node(int node)
int ret = 0; int ret = 0;
list_for_each_entry(cachep, &slab_caches, list) { list_for_each_entry(cachep, &slab_caches, list) {
struct kmem_cache_node *l3; struct kmem_cache_node *n;
l3 = cachep->node[node]; n = cachep->node[node];
if (!l3) if (!n)
continue; continue;
drain_freelist(cachep, l3, l3->free_objects); drain_freelist(cachep, n, n->free_objects);
if (!list_empty(&l3->slabs_full) || if (!list_empty(&n->slabs_full) ||
!list_empty(&l3->slabs_partial)) { !list_empty(&n->slabs_partial)) {
ret = -EBUSY; ret = -EBUSY;
break; break;
} }
...@@ -1470,7 +1470,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self, ...@@ -1470,7 +1470,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
/* /*
* swap the static kmem_list3 with kmalloced memory * swap the static kmem_cache_node with kmalloced memory
*/ */
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
int nodeid) int nodeid)
...@@ -1491,15 +1491,15 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node * ...@@ -1491,15 +1491,15 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
} }
/* /*
* For setting up all the kmem_list3s for cache whose buffer_size is same as * For setting up all the kmem_cache_node for cache whose buffer_size is same as
* size of kmem_list3. * size of kmem_cache_node.
*/ */
static void __init set_up_list3s(struct kmem_cache *cachep, int index) static void __init set_up_node(struct kmem_cache *cachep, int index)
{ {
int node; int node;
for_each_online_node(node) { for_each_online_node(node) {
cachep->node[node] = &initkmem_list3[index + node]; cachep->node[node] = &init_kmem_cache_node[index + node];
cachep->node[node]->next_reap = jiffies + cachep->node[node]->next_reap = jiffies +
REAPTIMEOUT_LIST3 + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3; ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
...@@ -1530,9 +1530,9 @@ void __init kmem_cache_init(void) ...@@ -1530,9 +1530,9 @@ void __init kmem_cache_init(void)
use_alien_caches = 0; use_alien_caches = 0;
for (i = 0; i < NUM_INIT_LISTS; i++) for (i = 0; i < NUM_INIT_LISTS; i++)
kmem_list3_init(&initkmem_list3[i]); kmem_cache_node_init(&init_kmem_cache_node[i]);
set_up_list3s(kmem_cache, CACHE_CACHE); set_up_node(kmem_cache, CACHE_CACHE);
/* /*
* Fragmentation resistance on low memory - only use bigger * Fragmentation resistance on low memory - only use bigger
...@@ -1548,7 +1548,7 @@ void __init kmem_cache_init(void) ...@@ -1548,7 +1548,7 @@ void __init kmem_cache_init(void)
* kmem_cache structures of all caches, except kmem_cache itself: * kmem_cache structures of all caches, except kmem_cache itself:
* kmem_cache is statically allocated. * kmem_cache is statically allocated.
* Initially an __init data area is used for the head array and the * Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated * kmem_cache_node structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap. * array at the end of the bootstrap.
* 2) Create the first kmalloc cache. * 2) Create the first kmalloc cache.
* The struct kmem_cache for the new cache is allocated normally. * The struct kmem_cache for the new cache is allocated normally.
...@@ -1557,7 +1557,7 @@ void __init kmem_cache_init(void) ...@@ -1557,7 +1557,7 @@ void __init kmem_cache_init(void)
* head arrays. * head arrays.
* 4) Replace the __init data head arrays for kmem_cache and the first * 4) Replace the __init data head arrays for kmem_cache and the first
* kmalloc cache with kmalloc allocated arrays. * kmalloc cache with kmalloc allocated arrays.
* 5) Replace the __init data for kmem_list3 for kmem_cache and * 5) Replace the __init data for kmem_cache_node for kmem_cache and
* the other cache's with kmalloc allocated memory. * the other cache's with kmalloc allocated memory.
* 6) Resize the head arrays of the kmalloc caches to their final sizes. * 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/ */
...@@ -1577,17 +1577,17 @@ void __init kmem_cache_init(void) ...@@ -1577,17 +1577,17 @@ void __init kmem_cache_init(void)
/* /*
* Initialize the caches that provide memory for the array cache and the * Initialize the caches that provide memory for the array cache and the
* kmem_list3 structures first. Without this, further allocations will * kmem_cache_node structures first. Without this, further allocations will
* bug. * bug.
*/ */
kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
if (INDEX_AC != INDEX_L3) if (INDEX_AC != INDEX_NODE)
kmalloc_caches[INDEX_L3] = kmalloc_caches[INDEX_NODE] =
create_kmalloc_cache("kmalloc-l3", create_kmalloc_cache("kmalloc-node",
kmalloc_size(INDEX_L3), ARCH_KMALLOC_FLAGS); kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
slab_early_init = 0; slab_early_init = 0;
...@@ -1619,19 +1619,19 @@ void __init kmem_cache_init(void) ...@@ -1619,19 +1619,19 @@ void __init kmem_cache_init(void)
kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
} }
/* 5) Replace the bootstrap kmem_list3's */ /* 5) Replace the bootstrap kmem_cache_node */
{ {
int nid; int nid;
for_each_online_node(nid) { for_each_online_node(nid) {
init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
init_list(kmalloc_caches[INDEX_AC], init_list(kmalloc_caches[INDEX_AC],
&initkmem_list3[SIZE_AC + nid], nid); &init_kmem_cache_node[SIZE_AC + nid], nid);
if (INDEX_AC != INDEX_L3) { if (INDEX_AC != INDEX_NODE) {
init_list(kmalloc_caches[INDEX_L3], init_list(kmalloc_caches[INDEX_NODE],
&initkmem_list3[SIZE_L3 + nid], nid); &init_kmem_cache_node[SIZE_NODE + nid], nid);
} }
} }
} }
...@@ -1697,7 +1697,7 @@ __initcall(cpucache_init); ...@@ -1697,7 +1697,7 @@ __initcall(cpucache_init);
static noinline void static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{ {
struct kmem_cache_node *l3; struct kmem_cache_node *n;
struct slab *slabp; struct slab *slabp;
unsigned long flags; unsigned long flags;
int node; int node;
...@@ -1712,24 +1712,24 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) ...@@ -1712,24 +1712,24 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
unsigned long active_objs = 0, num_objs = 0, free_objects = 0; unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
unsigned long active_slabs = 0, num_slabs = 0; unsigned long active_slabs = 0, num_slabs = 0;
l3 = cachep->node[node]; n = cachep->node[node];
if (!l3) if (!n)
continue; continue;
spin_lock_irqsave(&l3->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(slabp, &l3->slabs_full, list) { list_for_each_entry(slabp, &n->slabs_full, list) {
active_objs += cachep->num; active_objs += cachep->num;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &l3->slabs_partial, list) { list_for_each_entry(slabp, &n->slabs_partial, list) {
active_objs += slabp->inuse; active_objs += slabp->inuse;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &l3->slabs_free, list) list_for_each_entry(slabp, &n->slabs_free, list)
num_slabs++; num_slabs++;
free_objects += l3->free_objects; free_objects += n->free_objects;
spin_unlock_irqrestore(&l3->list_lock, flags); spin_unlock_irqrestore(&n->list_lock, flags);
num_slabs += active_slabs; num_slabs += active_slabs;
num_objs = num_slabs * cachep->num; num_objs = num_slabs * cachep->num;
...@@ -2154,7 +2154,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2154,7 +2154,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
if (slab_state == DOWN) { if (slab_state == DOWN) {
/* /*
* Note: Creation of first cache (kmem_cache). * Note: Creation of first cache (kmem_cache).
* The setup_list3s is taken care * The setup_node is taken care
* of by the caller of __kmem_cache_create * of by the caller of __kmem_cache_create
*/ */
cachep->array[smp_processor_id()] = &initarray_generic.cache; cachep->array[smp_processor_id()] = &initarray_generic.cache;
...@@ -2168,13 +2168,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2168,13 +2168,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
cachep->array[smp_processor_id()] = &initarray_generic.cache; cachep->array[smp_processor_id()] = &initarray_generic.cache;
/* /*
* If the cache that's used by kmalloc(sizeof(kmem_list3)) is * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
* the second cache, then we need to set up all its list3s, * the second cache, then we need to set up all its node/,
* otherwise the creation of further caches will BUG(). * otherwise the creation of further caches will BUG().
*/ */
set_up_list3s(cachep, SIZE_AC); set_up_node(cachep, SIZE_AC);
if (INDEX_AC == INDEX_L3) if (INDEX_AC == INDEX_NODE)
slab_state = PARTIAL_L3; slab_state = PARTIAL_NODE;
else else
slab_state = PARTIAL_ARRAYCACHE; slab_state = PARTIAL_ARRAYCACHE;
} else { } else {
...@@ -2183,8 +2183,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2183,8 +2183,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
kmalloc(sizeof(struct arraycache_init), gfp); kmalloc(sizeof(struct arraycache_init), gfp);
if (slab_state == PARTIAL_ARRAYCACHE) { if (slab_state == PARTIAL_ARRAYCACHE) {
set_up_list3s(cachep, SIZE_L3); set_up_node(cachep, SIZE_NODE);
slab_state = PARTIAL_L3; slab_state = PARTIAL_NODE;
} else { } else {
int node; int node;
for_each_online_node(node) { for_each_online_node(node) {
...@@ -2192,7 +2192,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2192,7 +2192,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
kmalloc_node(sizeof(struct kmem_cache_node), kmalloc_node(sizeof(struct kmem_cache_node),
gfp, node); gfp, node);
BUG_ON(!cachep->node[node]); BUG_ON(!cachep->node[node]);
kmem_list3_init(cachep->node[node]); kmem_cache_node_init(cachep->node[node]);
} }
} }
} }
...@@ -2322,7 +2322,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2322,7 +2322,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size += BYTES_PER_WORD; size += BYTES_PER_WORD;
} }
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= kmalloc_size(INDEX_L3 + 1) if (size >= kmalloc_size(INDEX_NODE + 1)
&& cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
size = PAGE_SIZE; size = PAGE_SIZE;
...@@ -2457,7 +2457,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) ...@@ -2457,7 +2457,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
#define check_spinlock_acquired_node(x, y) do { } while(0) #define check_spinlock_acquired_node(x, y) do { } while(0)
#endif #endif
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
struct array_cache *ac, struct array_cache *ac,
int force, int node); int force, int node);
...@@ -2477,21 +2477,21 @@ static void do_drain(void *arg) ...@@ -2477,21 +2477,21 @@ static void do_drain(void *arg)
static void drain_cpu_caches(struct kmem_cache *cachep) static void drain_cpu_caches(struct kmem_cache *cachep)
{ {
struct kmem_cache_node *l3; struct kmem_cache_node *n;
int node; int node;
on_each_cpu(do_drain, cachep, 1); on_each_cpu(do_drain, cachep, 1);
check_irq_on(); check_irq_on();
for_each_online_node(node) { for_each_online_node(node) {
l3 = cachep->node[node]; n = cachep->node[node];
if (l3 && l3->alien) if (n && n->alien)
drain_alien_cache(cachep, l3->alien); drain_alien_cache(cachep, n->alien);
} }
for_each_online_node(node) { for_each_online_node(node) {
l3 = cachep->node[node]; n = cachep->node[node];
if (l3) if (n)
drain_array(cachep, l3, l3->shared, 1, node); drain_array(cachep, n, n->shared, 1, node);
} }
} }
...@@ -2502,19 +2502,19 @@ static void drain_cpu_caches(struct kmem_cache *cachep) ...@@ -2502,19 +2502,19 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
* Returns the actual number of slabs released. * Returns the actual number of slabs released.
*/ */
static int drain_freelist(struct kmem_cache *cache, static int drain_freelist(struct kmem_cache *cache,
struct kmem_cache_node *l3, int tofree) struct kmem_cache_node *n, int tofree)
{ {
struct list_head *p; struct list_head *p;
int nr_freed; int nr_freed;
struct slab *slabp; struct slab *slabp;
nr_freed = 0; nr_freed = 0;
while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
spin_lock_irq(&l3->list_lock); spin_lock_irq(&n->list_lock);
p = l3->slabs_free.prev; p = n->slabs_free.prev;
if (p == &l3->slabs_free) { if (p == &n->slabs_free) {
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
goto out; goto out;
} }
...@@ -2527,8 +2527,8 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2527,8 +2527,8 @@ static int drain_freelist(struct kmem_cache *cache,
* Safe to drop the lock. The slab is no longer linked * Safe to drop the lock. The slab is no longer linked
* to the cache. * to the cache.
*/ */
l3->free_objects -= cache->num; n->free_objects -= cache->num;
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
slab_destroy(cache, slabp); slab_destroy(cache, slabp);
nr_freed++; nr_freed++;
} }
...@@ -2540,20 +2540,20 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2540,20 +2540,20 @@ static int drain_freelist(struct kmem_cache *cache,
static int __cache_shrink(struct kmem_cache *cachep) static int __cache_shrink(struct kmem_cache *cachep)
{ {
int ret = 0, i = 0; int ret = 0, i = 0;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
drain_cpu_caches(cachep); drain_cpu_caches(cachep);
check_irq_on(); check_irq_on();
for_each_online_node(i) { for_each_online_node(i) {
l3 = cachep->node[i]; n = cachep->node[i];
if (!l3) if (!n)
continue; continue;
drain_freelist(cachep, l3, l3->free_objects); drain_freelist(cachep, n, n->free_objects);
ret += !list_empty(&l3->slabs_full) || ret += !list_empty(&n->slabs_full) ||
!list_empty(&l3->slabs_partial); !list_empty(&n->slabs_partial);
} }
return (ret ? 1 : 0); return (ret ? 1 : 0);
} }
...@@ -2582,7 +2582,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); ...@@ -2582,7 +2582,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
int __kmem_cache_shutdown(struct kmem_cache *cachep) int __kmem_cache_shutdown(struct kmem_cache *cachep)
{ {
int i; int i;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
int rc = __cache_shrink(cachep); int rc = __cache_shrink(cachep);
if (rc) if (rc)
...@@ -2591,13 +2591,13 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) ...@@ -2591,13 +2591,13 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
for_each_online_cpu(i) for_each_online_cpu(i)
kfree(cachep->array[i]); kfree(cachep->array[i]);
/* NUMA: free the list3 structures */ /* NUMA: free the node structures */
for_each_online_node(i) { for_each_online_node(i) {
l3 = cachep->node[i]; n = cachep->node[i];
if (l3) { if (n) {
kfree(l3->shared); kfree(n->shared);
free_alien_cache(l3->alien); free_alien_cache(n->alien);
kfree(l3); kfree(n);
} }
} }
return 0; return 0;
...@@ -2779,7 +2779,7 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2779,7 +2779,7 @@ static int cache_grow(struct kmem_cache *cachep,
struct slab *slabp; struct slab *slabp;
size_t offset; size_t offset;
gfp_t local_flags; gfp_t local_flags;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
/* /*
* Be lazy and only check for valid flags here, keeping it out of the * Be lazy and only check for valid flags here, keeping it out of the
...@@ -2788,17 +2788,17 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2788,17 +2788,17 @@ static int cache_grow(struct kmem_cache *cachep,
BUG_ON(flags & GFP_SLAB_BUG_MASK); BUG_ON(flags & GFP_SLAB_BUG_MASK);
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
/* Take the l3 list lock to change the colour_next on this node */ /* Take the node list lock to change the colour_next on this node */
check_irq_off(); check_irq_off();
l3 = cachep->node[nodeid]; n = cachep->node[nodeid];
spin_lock(&l3->list_lock); spin_lock(&n->list_lock);
/* Get colour for the slab, and cal the next value. */ /* Get colour for the slab, and cal the next value. */
offset = l3->colour_next; offset = n->colour_next;
l3->colour_next++; n->colour_next++;
if (l3->colour_next >= cachep->colour) if (n->colour_next >= cachep->colour)
l3->colour_next = 0; n->colour_next = 0;
spin_unlock(&l3->list_lock); spin_unlock(&n->list_lock);
offset *= cachep->colour_off; offset *= cachep->colour_off;
...@@ -2835,13 +2835,13 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2835,13 +2835,13 @@ static int cache_grow(struct kmem_cache *cachep,
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_disable(); local_irq_disable();
check_irq_off(); check_irq_off();
spin_lock(&l3->list_lock); spin_lock(&n->list_lock);
/* Make slab active. */ /* Make slab active. */
list_add_tail(&slabp->list, &(l3->slabs_free)); list_add_tail(&slabp->list, &(n->slabs_free));
STATS_INC_GROWN(cachep); STATS_INC_GROWN(cachep);
l3->free_objects += cachep->num; n->free_objects += cachep->num;
spin_unlock(&l3->list_lock); spin_unlock(&n->list_lock);
return 1; return 1;
opps1: opps1:
kmem_freepages(cachep, objp); kmem_freepages(cachep, objp);
...@@ -2969,7 +2969,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -2969,7 +2969,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
bool force_refill) bool force_refill)
{ {
int batchcount; int batchcount;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
struct array_cache *ac; struct array_cache *ac;
int node; int node;
...@@ -2988,14 +2988,14 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -2988,14 +2988,14 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
*/ */
batchcount = BATCHREFILL_LIMIT; batchcount = BATCHREFILL_LIMIT;
} }
l3 = cachep->node[node]; n = cachep->node[node];
BUG_ON(ac->avail > 0 || !l3); BUG_ON(ac->avail > 0 || !n);
spin_lock(&l3->list_lock); spin_lock(&n->list_lock);
/* See if we can refill from the shared array */ /* See if we can refill from the shared array */
if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) { if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
l3->shared->touched = 1; n->shared->touched = 1;
goto alloc_done; goto alloc_done;
} }
...@@ -3003,11 +3003,11 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -3003,11 +3003,11 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
struct list_head *entry; struct list_head *entry;
struct slab *slabp; struct slab *slabp;
/* Get slab alloc is to come from. */ /* Get slab alloc is to come from. */
entry = l3->slabs_partial.next; entry = n->slabs_partial.next;
if (entry == &l3->slabs_partial) { if (entry == &n->slabs_partial) {
l3->free_touched = 1; n->free_touched = 1;
entry = l3->slabs_free.next; entry = n->slabs_free.next;
if (entry == &l3->slabs_free) if (entry == &n->slabs_free)
goto must_grow; goto must_grow;
} }
...@@ -3035,15 +3035,15 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -3035,15 +3035,15 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&slabp->list); list_del(&slabp->list);
if (slabp->free == BUFCTL_END) if (slabp->free == BUFCTL_END)
list_add(&slabp->list, &l3->slabs_full); list_add(&slabp->list, &n->slabs_full);
else else
list_add(&slabp->list, &l3->slabs_partial); list_add(&slabp->list, &n->slabs_partial);
} }
must_grow: must_grow:
l3->free_objects -= ac->avail; n->free_objects -= ac->avail;
alloc_done: alloc_done:
spin_unlock(&l3->list_lock); spin_unlock(&n->list_lock);
if (unlikely(!ac->avail)) { if (unlikely(!ac->avail)) {
int x; int x;
...@@ -3301,21 +3301,21 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3301,21 +3301,21 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
{ {
struct list_head *entry; struct list_head *entry;
struct slab *slabp; struct slab *slabp;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
void *obj; void *obj;
int x; int x;
l3 = cachep->node[nodeid]; n = cachep->node[nodeid];
BUG_ON(!l3); BUG_ON(!n);
retry: retry:
check_irq_off(); check_irq_off();
spin_lock(&l3->list_lock); spin_lock(&n->list_lock);
entry = l3->slabs_partial.next; entry = n->slabs_partial.next;
if (entry == &l3->slabs_partial) { if (entry == &n->slabs_partial) {
l3->free_touched = 1; n->free_touched = 1;
entry = l3->slabs_free.next; entry = n->slabs_free.next;
if (entry == &l3->slabs_free) if (entry == &n->slabs_free)
goto must_grow; goto must_grow;
} }
...@@ -3331,20 +3331,20 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3331,20 +3331,20 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
obj = slab_get_obj(cachep, slabp, nodeid); obj = slab_get_obj(cachep, slabp, nodeid);
check_slabp(cachep, slabp); check_slabp(cachep, slabp);
l3->free_objects--; n->free_objects--;
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&slabp->list); list_del(&slabp->list);
if (slabp->free == BUFCTL_END) if (slabp->free == BUFCTL_END)
list_add(&slabp->list, &l3->slabs_full); list_add(&slabp->list, &n->slabs_full);
else else
list_add(&slabp->list, &l3->slabs_partial); list_add(&slabp->list, &n->slabs_partial);
spin_unlock(&l3->list_lock); spin_unlock(&n->list_lock);
goto done; goto done;
must_grow: must_grow:
spin_unlock(&l3->list_lock); spin_unlock(&n->list_lock);
x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
if (x) if (x)
goto retry; goto retry;
...@@ -3496,7 +3496,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, ...@@ -3496,7 +3496,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
int node) int node)
{ {
int i; int i;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
for (i = 0; i < nr_objects; i++) { for (i = 0; i < nr_objects; i++) {
void *objp; void *objp;
...@@ -3506,19 +3506,19 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, ...@@ -3506,19 +3506,19 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
objp = objpp[i]; objp = objpp[i];
slabp = virt_to_slab(objp); slabp = virt_to_slab(objp);
l3 = cachep->node[node]; n = cachep->node[node];
list_del(&slabp->list); list_del(&slabp->list);
check_spinlock_acquired_node(cachep, node); check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp); check_slabp(cachep, slabp);
slab_put_obj(cachep, slabp, objp, node); slab_put_obj(cachep, slabp, objp, node);
STATS_DEC_ACTIVE(cachep); STATS_DEC_ACTIVE(cachep);
l3->free_objects++; n->free_objects++;
check_slabp(cachep, slabp); check_slabp(cachep, slabp);
/* fixup slab chains */ /* fixup slab chains */
if (slabp->inuse == 0) { if (slabp->inuse == 0) {
if (l3->free_objects > l3->free_limit) { if (n->free_objects > n->free_limit) {
l3->free_objects -= cachep->num; n->free_objects -= cachep->num;
/* No need to drop any previously held /* No need to drop any previously held
* lock here, even if we have a off-slab slab * lock here, even if we have a off-slab slab
* descriptor it is guaranteed to come from * descriptor it is guaranteed to come from
...@@ -3527,14 +3527,14 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, ...@@ -3527,14 +3527,14 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
*/ */
slab_destroy(cachep, slabp); slab_destroy(cachep, slabp);
} else { } else {
list_add(&slabp->list, &l3->slabs_free); list_add(&slabp->list, &n->slabs_free);
} }
} else { } else {
/* Unconditionally move a slab to the end of the /* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the * partial list on free - maximum time for the
* other objects to be freed, too. * other objects to be freed, too.
*/ */
list_add_tail(&slabp->list, &l3->slabs_partial); list_add_tail(&slabp->list, &n->slabs_partial);
} }
} }
} }
...@@ -3542,7 +3542,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, ...@@ -3542,7 +3542,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{ {
int batchcount; int batchcount;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
int node = numa_mem_id(); int node = numa_mem_id();
batchcount = ac->batchcount; batchcount = ac->batchcount;
...@@ -3550,10 +3550,10 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3550,10 +3550,10 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
BUG_ON(!batchcount || batchcount > ac->avail); BUG_ON(!batchcount || batchcount > ac->avail);
#endif #endif
check_irq_off(); check_irq_off();
l3 = cachep->node[node]; n = cachep->node[node];
spin_lock(&l3->list_lock); spin_lock(&n->list_lock);
if (l3->shared) { if (n->shared) {
struct array_cache *shared_array = l3->shared; struct array_cache *shared_array = n->shared;
int max = shared_array->limit - shared_array->avail; int max = shared_array->limit - shared_array->avail;
if (max) { if (max) {
if (batchcount > max) if (batchcount > max)
...@@ -3572,8 +3572,8 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3572,8 +3572,8 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
int i = 0; int i = 0;
struct list_head *p; struct list_head *p;
p = l3->slabs_free.next; p = n->slabs_free.next;
while (p != &(l3->slabs_free)) { while (p != &(n->slabs_free)) {
struct slab *slabp; struct slab *slabp;
slabp = list_entry(p, struct slab, list); slabp = list_entry(p, struct slab, list);
...@@ -3585,7 +3585,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3585,7 +3585,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
STATS_SET_FREEABLE(cachep, i); STATS_SET_FREEABLE(cachep, i);
} }
#endif #endif
spin_unlock(&l3->list_lock); spin_unlock(&n->list_lock);
ac->avail -= batchcount; ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
} }
...@@ -3829,12 +3829,12 @@ void kfree(const void *objp) ...@@ -3829,12 +3829,12 @@ void kfree(const void *objp)
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
/* /*
* This initializes kmem_list3 or resizes various caches for all nodes. * This initializes kmem_cache_node or resizes various caches for all nodes.
*/ */
static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
{ {
int node; int node;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
struct array_cache *new_shared; struct array_cache *new_shared;
struct array_cache **new_alien = NULL; struct array_cache **new_alien = NULL;
...@@ -3857,43 +3857,43 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) ...@@ -3857,43 +3857,43 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
} }
} }
l3 = cachep->node[node]; n = cachep->node[node];
if (l3) { if (n) {
struct array_cache *shared = l3->shared; struct array_cache *shared = n->shared;
spin_lock_irq(&l3->list_lock); spin_lock_irq(&n->list_lock);
if (shared) if (shared)
free_block(cachep, shared->entry, free_block(cachep, shared->entry,
shared->avail, node); shared->avail, node);
l3->shared = new_shared; n->shared = new_shared;
if (!l3->alien) { if (!n->alien) {
l3->alien = new_alien; n->alien = new_alien;
new_alien = NULL; new_alien = NULL;
} }
l3->free_limit = (1 + nr_cpus_node(node)) * n->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num; cachep->batchcount + cachep->num;
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
kfree(shared); kfree(shared);
free_alien_cache(new_alien); free_alien_cache(new_alien);
continue; continue;
} }
l3 = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
if (!l3) { if (!n) {
free_alien_cache(new_alien); free_alien_cache(new_alien);
kfree(new_shared); kfree(new_shared);
goto fail; goto fail;
} }
kmem_list3_init(l3); kmem_cache_node_init(n);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3; ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
l3->shared = new_shared; n->shared = new_shared;
l3->alien = new_alien; n->alien = new_alien;
l3->free_limit = (1 + nr_cpus_node(node)) * n->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num; cachep->batchcount + cachep->num;
cachep->node[node] = l3; cachep->node[node] = n;
} }
return 0; return 0;
...@@ -3903,11 +3903,11 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) ...@@ -3903,11 +3903,11 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
node--; node--;
while (node >= 0) { while (node >= 0) {
if (cachep->node[node]) { if (cachep->node[node]) {
l3 = cachep->node[node]; n = cachep->node[node];
kfree(l3->shared); kfree(n->shared);
free_alien_cache(l3->alien); free_alien_cache(n->alien);
kfree(l3); kfree(n);
cachep->node[node] = NULL; cachep->node[node] = NULL;
} }
node--; node--;
...@@ -4071,11 +4071,11 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -4071,11 +4071,11 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
} }
/* /*
* Drain an array if it contains any elements taking the l3 lock only if * Drain an array if it contains any elements taking the node lock only if
* necessary. Note that the l3 listlock also protects the array_cache * necessary. Note that the node listlock also protects the array_cache
* if drain_array() is used on the shared array. * if drain_array() is used on the shared array.
*/ */
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
struct array_cache *ac, int force, int node) struct array_cache *ac, int force, int node)
{ {
int tofree; int tofree;
...@@ -4085,7 +4085,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, ...@@ -4085,7 +4085,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3,
if (ac->touched && !force) { if (ac->touched && !force) {
ac->touched = 0; ac->touched = 0;
} else { } else {
spin_lock_irq(&l3->list_lock); spin_lock_irq(&n->list_lock);
if (ac->avail) { if (ac->avail) {
tofree = force ? ac->avail : (ac->limit + 4) / 5; tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail) if (tofree > ac->avail)
...@@ -4095,7 +4095,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, ...@@ -4095,7 +4095,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3,
memmove(ac->entry, &(ac->entry[tofree]), memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void *) * ac->avail); sizeof(void *) * ac->avail);
} }
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
} }
} }
...@@ -4114,7 +4114,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, ...@@ -4114,7 +4114,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3,
static void cache_reap(struct work_struct *w) static void cache_reap(struct work_struct *w)
{ {
struct kmem_cache *searchp; struct kmem_cache *searchp;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
int node = numa_mem_id(); int node = numa_mem_id();
struct delayed_work *work = to_delayed_work(w); struct delayed_work *work = to_delayed_work(w);
...@@ -4126,33 +4126,33 @@ static void cache_reap(struct work_struct *w) ...@@ -4126,33 +4126,33 @@ static void cache_reap(struct work_struct *w)
check_irq_on(); check_irq_on();
/* /*
* We only take the l3 lock if absolutely necessary and we * We only take the node lock if absolutely necessary and we
* have established with reasonable certainty that * have established with reasonable certainty that
* we can do some work if the lock was obtained. * we can do some work if the lock was obtained.
*/ */
l3 = searchp->node[node]; n = searchp->node[node];
reap_alien(searchp, l3); reap_alien(searchp, n);
drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
/* /*
* These are racy checks but it does not matter * These are racy checks but it does not matter
* if we skip one check or scan twice. * if we skip one check or scan twice.
*/ */
if (time_after(l3->next_reap, jiffies)) if (time_after(n->next_reap, jiffies))
goto next; goto next;
l3->next_reap = jiffies + REAPTIMEOUT_LIST3; n->next_reap = jiffies + REAPTIMEOUT_LIST3;
drain_array(searchp, l3, l3->shared, 0, node); drain_array(searchp, n, n->shared, 0, node);
if (l3->free_touched) if (n->free_touched)
l3->free_touched = 0; n->free_touched = 0;
else { else {
int freed; int freed;
freed = drain_freelist(searchp, l3, (l3->free_limit + freed = drain_freelist(searchp, n, (n->free_limit +
5 * searchp->num - 1) / (5 * searchp->num)); 5 * searchp->num - 1) / (5 * searchp->num));
STATS_ADD_REAPED(searchp, freed); STATS_ADD_REAPED(searchp, freed);
} }
...@@ -4178,25 +4178,25 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) ...@@ -4178,25 +4178,25 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
const char *name; const char *name;
char *error = NULL; char *error = NULL;
int node; int node;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
active_objs = 0; active_objs = 0;
num_slabs = 0; num_slabs = 0;
for_each_online_node(node) { for_each_online_node(node) {
l3 = cachep->node[node]; n = cachep->node[node];
if (!l3) if (!n)
continue; continue;
check_irq_on(); check_irq_on();
spin_lock_irq(&l3->list_lock); spin_lock_irq(&n->list_lock);
list_for_each_entry(slabp, &l3->slabs_full, list) { list_for_each_entry(slabp, &n->slabs_full, list) {
if (slabp->inuse != cachep->num && !error) if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error"; error = "slabs_full accounting error";
active_objs += cachep->num; active_objs += cachep->num;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &l3->slabs_partial, list) { list_for_each_entry(slabp, &n->slabs_partial, list) {
if (slabp->inuse == cachep->num && !error) if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error"; error = "slabs_partial inuse accounting error";
if (!slabp->inuse && !error) if (!slabp->inuse && !error)
...@@ -4204,16 +4204,16 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) ...@@ -4204,16 +4204,16 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
active_objs += slabp->inuse; active_objs += slabp->inuse;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &l3->slabs_free, list) { list_for_each_entry(slabp, &n->slabs_free, list) {
if (slabp->inuse && !error) if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error"; error = "slabs_free/inuse accounting error";
num_slabs++; num_slabs++;
} }
free_objects += l3->free_objects; free_objects += n->free_objects;
if (l3->shared) if (n->shared)
shared_avail += l3->shared->avail; shared_avail += n->shared->avail;
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
} }
num_slabs += active_slabs; num_slabs += active_slabs;
num_objs = num_slabs * cachep->num; num_objs = num_slabs * cachep->num;
...@@ -4239,7 +4239,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) ...@@ -4239,7 +4239,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{ {
#if STATS #if STATS
{ /* list3 stats */ { /* node stats */
unsigned long high = cachep->high_mark; unsigned long high = cachep->high_mark;
unsigned long allocs = cachep->num_allocations; unsigned long allocs = cachep->num_allocations;
unsigned long grown = cachep->grown; unsigned long grown = cachep->grown;
...@@ -4392,7 +4392,7 @@ static int leaks_show(struct seq_file *m, void *p) ...@@ -4392,7 +4392,7 @@ static int leaks_show(struct seq_file *m, void *p)
{ {
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
struct slab *slabp; struct slab *slabp;
struct kmem_cache_node *l3; struct kmem_cache_node *n;
const char *name; const char *name;
unsigned long *n = m->private; unsigned long *n = m->private;
int node; int node;
...@@ -4408,18 +4408,18 @@ static int leaks_show(struct seq_file *m, void *p) ...@@ -4408,18 +4408,18 @@ static int leaks_show(struct seq_file *m, void *p)
n[1] = 0; n[1] = 0;
for_each_online_node(node) { for_each_online_node(node) {
l3 = cachep->node[node]; n = cachep->node[node];
if (!l3) if (!n)
continue; continue;
check_irq_on(); check_irq_on();
spin_lock_irq(&l3->list_lock); spin_lock_irq(&n->list_lock);
list_for_each_entry(slabp, &l3->slabs_full, list) list_for_each_entry(slabp, &n->slabs_full, list)
handle_slab(n, cachep, slabp); handle_slab(n, cachep, slabp);
list_for_each_entry(slabp, &l3->slabs_partial, list) list_for_each_entry(slabp, &n->slabs_partial, list)
handle_slab(n, cachep, slabp); handle_slab(n, cachep, slabp);
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&n->list_lock);
} }
name = cachep->name; name = cachep->name;
if (n[0] == n[1]) { if (n[0] == n[1]) {
......
...@@ -16,7 +16,7 @@ enum slab_state { ...@@ -16,7 +16,7 @@ enum slab_state {
DOWN, /* No slab functionality yet */ DOWN, /* No slab functionality yet */
PARTIAL, /* SLUB: kmem_cache_node available */ PARTIAL, /* SLUB: kmem_cache_node available */
PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */ PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
UP, /* Slab caches usable but not all extras yet */ UP, /* Slab caches usable but not all extras yet */
FULL /* Everything is working */ FULL /* Everything is working */
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册