提交 ca34956b 编写于 作者: C Christoph Lameter 提交者: Pekka Enberg

slab: Common definition for kmem_cache_node

Put the definitions for the kmem_cache_node structures together so that
we have one structure. That will allow us to create more common fields in
the future which could yield more opportunities to share code.
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NPekka Enberg <penberg@kernel.org>
上级 ce8eb6c4
...@@ -53,17 +53,6 @@ struct kmem_cache_cpu { ...@@ -53,17 +53,6 @@ struct kmem_cache_cpu {
#endif #endif
}; };
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
atomic_long_t total_objects;
struct list_head full;
#endif
};
/* /*
* Word size structure that can be atomically updated or read and that * Word size structure that can be atomically updated or read and that
* contains both the order and the number of objects that a slab of the * contains both the order and the number of objects that a slab of the
......
...@@ -285,23 +285,6 @@ struct arraycache_init { ...@@ -285,23 +285,6 @@ struct arraycache_init {
void *entries[BOOT_CPUCACHE_ENTRIES]; void *entries[BOOT_CPUCACHE_ENTRIES];
}; };
/*
* The slab lists for all objects.
*/
struct kmem_cache_node {
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
unsigned long free_objects;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
spinlock_t list_lock;
struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */
unsigned long next_reap; /* updated without locking */
int free_touched; /* updated without locking */
};
/* /*
* Need this for bootstrapping a per node allocator. * Need this for bootstrapping a per node allocator.
*/ */
......
...@@ -239,3 +239,35 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) ...@@ -239,3 +239,35 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return s; return s;
} }
#endif #endif
/*
* The slab lists for all objects.
*/
struct kmem_cache_node {
spinlock_t list_lock;
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
unsigned long free_objects;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */
unsigned long next_reap; /* updated without locking */
int free_touched; /* updated without locking */
#endif
#ifdef CONFIG_SLUB
unsigned long nr_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
atomic_long_t total_objects;
struct list_head full;
#endif
#endif
};
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册