提交 35026088 编写于 作者: C Christoph Lameter 提交者: Pekka Enberg

slab: Remove some accessors

Those are rather trivial now and its better to see inline what is
really going on.
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NPekka Enberg <penberg@kernel.org>
上级 e571b0ad
...@@ -489,16 +489,6 @@ EXPORT_SYMBOL(slab_buffer_size); ...@@ -489,16 +489,6 @@ EXPORT_SYMBOL(slab_buffer_size);
static int slab_max_order = SLAB_MAX_ORDER_LO; static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata; static bool slab_max_order_set __initdata;
/*
* Functions for storing/retrieving the cachep and or slab from the page
* allocator. These are used to find the slab an obj belongs to. With kfree(),
* these are used to find the cache which an obj belongs to.
*/
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
page->slab_cache = cache;
}
static inline struct kmem_cache *page_get_cache(struct page *page) static inline struct kmem_cache *page_get_cache(struct page *page)
{ {
page = compound_head(page); page = compound_head(page);
...@@ -506,27 +496,18 @@ static inline struct kmem_cache *page_get_cache(struct page *page) ...@@ -506,27 +496,18 @@ static inline struct kmem_cache *page_get_cache(struct page *page)
return page->slab_cache; return page->slab_cache;
} }
static inline void page_set_slab(struct page *page, struct slab *slab)
{
page->slab_page = slab;
}
static inline struct slab *page_get_slab(struct page *page)
{
BUG_ON(!PageSlab(page));
return page->slab_page;
}
static inline struct kmem_cache *virt_to_cache(const void *obj) static inline struct kmem_cache *virt_to_cache(const void *obj)
{ {
struct page *page = virt_to_head_page(obj); struct page *page = virt_to_head_page(obj);
return page_get_cache(page); return page->slab_cache;
} }
static inline struct slab *virt_to_slab(const void *obj) static inline struct slab *virt_to_slab(const void *obj)
{ {
struct page *page = virt_to_head_page(obj); struct page *page = virt_to_head_page(obj);
return page_get_slab(page);
VM_BUG_ON(!PageSlab(page));
return page->slab_page;
} }
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
...@@ -2918,8 +2899,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, ...@@ -2918,8 +2899,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
nr_pages <<= cache->gfporder; nr_pages <<= cache->gfporder;
do { do {
page_set_cache(page, cache); page->slab_cache = cache;
page_set_slab(page, slab); page->slab_page = slab;
page++; page++;
} while (--nr_pages); } while (--nr_pages);
} }
...@@ -3057,7 +3038,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -3057,7 +3038,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp); kfree_debugcheck(objp);
page = virt_to_head_page(objp); page = virt_to_head_page(objp);
slabp = page_get_slab(page); slabp = page->slab_page;
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp); verify_redzone_free(cachep, objp);
...@@ -3261,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -3261,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
struct slab *slabp; struct slab *slabp;
unsigned objnr; unsigned objnr;
slabp = page_get_slab(virt_to_head_page(objp)); slabp = virt_to_head_page(objp)->slab_page;
objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册