提交 065d41cb 编写于 作者: P Pekka Enberg 提交者: Linus Torvalds

[PATCH] slab: convert cache to page mapping macros

This patch converts object cache <-> page mapping macros to static inline
functions to make the more explicit and readable.
Signed-off-by: NPekka Enberg <penberg@cs.helsinki.fi>
Cc: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 669ed175
...@@ -565,14 +565,29 @@ static void **dbg_userword(kmem_cache_t *cachep, void *objp) ...@@ -565,14 +565,29 @@ static void **dbg_userword(kmem_cache_t *cachep, void *objp)
#define BREAK_GFP_ORDER_LO 0 #define BREAK_GFP_ORDER_LO 0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
/* Macros for storing/retrieving the cachep and or slab from the /* Functions for storing/retrieving the cachep and or slab from the
* global 'mem_map'. These are used to find the slab an obj belongs to. * global 'mem_map'. These are used to find the slab an obj belongs to.
* With kfree(), these are used to find the cache which an obj belongs to. * With kfree(), these are used to find the cache which an obj belongs to.
*/ */
#define SET_PAGE_CACHE(pg,x) ((pg)->lru.next = (struct list_head *)(x)) static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
#define GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->lru.next) {
#define SET_PAGE_SLAB(pg,x) ((pg)->lru.prev = (struct list_head *)(x)) page->lru.next = (struct list_head *)cache;
#define GET_PAGE_SLAB(pg) ((struct slab *)(pg)->lru.prev) }
static inline struct kmem_cache *page_get_cache(struct page *page)
{
return (struct kmem_cache *)page->lru.next;
}
static inline void page_set_slab(struct page *page, struct slab *slab)
{
page->lru.prev = (struct list_head *)slab;
}
static inline struct slab *page_get_slab(struct page *page)
{
return (struct slab *)page->lru.prev;
}
/* These are the default caches for kmalloc. Custom caches can have other sizes. */ /* These are the default caches for kmalloc. Custom caches can have other sizes. */
struct cache_sizes malloc_sizes[] = { struct cache_sizes malloc_sizes[] = {
...@@ -1368,7 +1383,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) ...@@ -1368,7 +1383,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
/* Print some data about the neighboring objects, if they /* Print some data about the neighboring objects, if they
* exist: * exist:
*/ */
struct slab *slabp = GET_PAGE_SLAB(virt_to_page(objp)); struct slab *slabp = page_get_slab(virt_to_page(objp));
int objnr; int objnr;
objnr = (objp-slabp->s_mem)/cachep->objsize; objnr = (objp-slabp->s_mem)/cachep->objsize;
...@@ -2138,8 +2153,8 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) ...@@ -2138,8 +2153,8 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
i = 1 << cachep->gfporder; i = 1 << cachep->gfporder;
page = virt_to_page(objp); page = virt_to_page(objp);
do { do {
SET_PAGE_CACHE(page, cachep); page_set_cache(page, cachep);
SET_PAGE_SLAB(page, slabp); page_set_slab(page, slabp);
page++; page++;
} while (--i); } while (--i);
} }
...@@ -2269,14 +2284,14 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, ...@@ -2269,14 +2284,14 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
kfree_debugcheck(objp); kfree_debugcheck(objp);
page = virt_to_page(objp); page = virt_to_page(objp);
if (GET_PAGE_CACHE(page) != cachep) { if (page_get_cache(page) != cachep) {
printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
GET_PAGE_CACHE(page),cachep); page_get_cache(page),cachep);
printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
printk(KERN_ERR "%p is %s.\n", GET_PAGE_CACHE(page), GET_PAGE_CACHE(page)->name); printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name);
WARN_ON(1); WARN_ON(1);
} }
slabp = GET_PAGE_SLAB(page); slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
...@@ -2628,7 +2643,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n ...@@ -2628,7 +2643,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n
struct slab *slabp; struct slab *slabp;
unsigned int objnr; unsigned int objnr;
slabp = GET_PAGE_SLAB(virt_to_page(objp)); slabp = page_get_slab(virt_to_page(objp));
l3 = cachep->nodelists[node]; l3 = cachep->nodelists[node];
list_del(&slabp->list); list_del(&slabp->list);
objnr = (objp - slabp->s_mem) / cachep->objsize; objnr = (objp - slabp->s_mem) / cachep->objsize;
...@@ -2744,7 +2759,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) ...@@ -2744,7 +2759,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
{ {
struct slab *slabp; struct slab *slabp;
slabp = GET_PAGE_SLAB(virt_to_page(objp)); slabp = page_get_slab(virt_to_page(objp));
if (unlikely(slabp->nodeid != numa_node_id())) { if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL; struct array_cache *alien = NULL;
int nodeid = slabp->nodeid; int nodeid = slabp->nodeid;
...@@ -2830,7 +2845,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) ...@@ -2830,7 +2845,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
page = virt_to_page(ptr); page = virt_to_page(ptr);
if (unlikely(!PageSlab(page))) if (unlikely(!PageSlab(page)))
goto out; goto out;
if (unlikely(GET_PAGE_CACHE(page) != cachep)) if (unlikely(page_get_cache(page) != cachep))
goto out; goto out;
return 1; return 1;
out: out:
...@@ -3026,7 +3041,7 @@ void kfree(const void *objp) ...@@ -3026,7 +3041,7 @@ void kfree(const void *objp)
return; return;
local_irq_save(flags); local_irq_save(flags);
kfree_debugcheck(objp); kfree_debugcheck(objp);
c = GET_PAGE_CACHE(virt_to_page(objp)); c = page_get_cache(virt_to_page(objp));
__cache_free(c, (void*)objp); __cache_free(c, (void*)objp);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -3596,7 +3611,7 @@ unsigned int ksize(const void *objp) ...@@ -3596,7 +3611,7 @@ unsigned int ksize(const void *objp)
if (unlikely(objp == NULL)) if (unlikely(objp == NULL))
return 0; return 0;
return obj_reallen(GET_PAGE_CACHE(virt_to_page(objp))); return obj_reallen(page_get_cache(virt_to_page(objp)));
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册