提交 b49af68f 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

Add virt_to_head_page and consolidate code in slab and slub

Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 6d777953
...@@ -286,6 +286,12 @@ static inline void get_page(struct page *page) ...@@ -286,6 +286,12 @@ static inline void get_page(struct page *page)
atomic_inc(&page->_count); atomic_inc(&page->_count);
} }
static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
return compound_head(page);
}
/* /*
* Setup the page count before being freed into the page allocator for * Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug) * the first time (boot or memory hotplug)
......
...@@ -614,20 +614,19 @@ static inline void page_set_slab(struct page *page, struct slab *slab) ...@@ -614,20 +614,19 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
static inline struct slab *page_get_slab(struct page *page) static inline struct slab *page_get_slab(struct page *page)
{ {
page = compound_head(page);
BUG_ON(!PageSlab(page)); BUG_ON(!PageSlab(page));
return (struct slab *)page->lru.prev; return (struct slab *)page->lru.prev;
} }
static inline struct kmem_cache *virt_to_cache(const void *obj) static inline struct kmem_cache *virt_to_cache(const void *obj)
{ {
struct page *page = virt_to_page(obj); struct page *page = virt_to_head_page(obj);
return page_get_cache(page); return page_get_cache(page);
} }
static inline struct slab *virt_to_slab(const void *obj) static inline struct slab *virt_to_slab(const void *obj)
{ {
struct page *page = virt_to_page(obj); struct page *page = virt_to_head_page(obj);
return page_get_slab(page); return page_get_slab(page);
} }
...@@ -2876,7 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -2876,7 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
objp -= obj_offset(cachep); objp -= obj_offset(cachep);
kfree_debugcheck(objp); kfree_debugcheck(objp);
page = virt_to_page(objp); page = virt_to_head_page(objp);
slabp = page_get_slab(page); slabp = page_get_slab(page);
...@@ -3100,7 +3099,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -3100,7 +3099,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
struct slab *slabp; struct slab *slabp;
unsigned objnr; unsigned objnr;
slabp = page_get_slab(virt_to_page(objp)); slabp = page_get_slab(virt_to_head_page(objp));
objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
} }
......
...@@ -1323,9 +1323,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) ...@@ -1323,9 +1323,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
{ {
struct page * page; struct page * page;
page = virt_to_page(x); page = virt_to_head_page(x);
page = compound_head(page);
if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
set_tracking(s, x, TRACK_FREE); set_tracking(s, x, TRACK_FREE);
...@@ -1336,7 +1334,7 @@ EXPORT_SYMBOL(kmem_cache_free); ...@@ -1336,7 +1334,7 @@ EXPORT_SYMBOL(kmem_cache_free);
/* Figure out on which slab object the object resides */ /* Figure out on which slab object the object resides */
static struct page *get_object_page(const void *x) static struct page *get_object_page(const void *x)
{ {
struct page *page = compound_head(virt_to_page(x)); struct page *page = virt_to_head_page(x);
if (!PageSlab(page)) if (!PageSlab(page))
return NULL; return NULL;
...@@ -2076,7 +2074,7 @@ void kfree(const void *x) ...@@ -2076,7 +2074,7 @@ void kfree(const void *x)
if (!x) if (!x)
return; return;
page = compound_head(virt_to_page(x)); page = virt_to_head_page(x);
s = page->slab; s = page->slab;
...@@ -2112,7 +2110,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) ...@@ -2112,7 +2110,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return NULL; return NULL;
} }
page = compound_head(virt_to_page(p)); page = virt_to_head_page(p);
new_cache = get_slab(new_size, flags); new_cache = get_slab(new_size, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册