提交 c175eea4 编写于 作者: P Pekka Enberg 提交者: Vegard Nossum

slab: add hooks for kmemcheck

We now have SLAB support for kmemcheck! This means that it doesn't matter
whether one chooses SLAB or SLUB, or indeed whether Linus chooses to chuck
SLAB or SLUB.. ;-)

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: NPekka Enberg <penberg@cs.helsinki.fi>

[rebased for mainline inclusion]
Signed-off-by: NVegard Nossum <vegard.nossum@gmail.com>
上级 5a896d9e
...@@ -114,6 +114,7 @@ ...@@ -114,6 +114,7 @@
#include <linux/rtmutex.h> #include <linux/rtmutex.h>
#include <linux/reciprocal_div.h> #include <linux/reciprocal_div.h>
#include <linux/debugobjects.h> #include <linux/debugobjects.h>
#include <linux/kmemcheck.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -179,13 +180,13 @@ ...@@ -179,13 +180,13 @@
SLAB_STORE_USER | \ SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#else #else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \ SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#endif #endif
/* /*
...@@ -1624,6 +1625,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -1624,6 +1625,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
NR_SLAB_UNRECLAIMABLE, nr_pages); NR_SLAB_UNRECLAIMABLE, nr_pages);
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
__SetPageSlab(page + i); __SetPageSlab(page + i);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK))
kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder);
return page_address(page); return page_address(page);
} }
...@@ -1636,6 +1641,9 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) ...@@ -1636,6 +1641,9 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
struct page *page = virt_to_page(addr); struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i; const unsigned long nr_freed = i;
if (kmemcheck_page_is_tracked(page))
kmemcheck_free_shadow(cachep, page, cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_zone_page_state(page_zone(page), sub_zone_page_state(page_zone(page),
NR_SLAB_RECLAIMABLE, nr_freed); NR_SLAB_RECLAIMABLE, nr_freed);
...@@ -3309,6 +3317,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, ...@@ -3309,6 +3317,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
flags); flags);
if (likely(ptr))
kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
if (unlikely((flags & __GFP_ZERO) && ptr)) if (unlikely((flags & __GFP_ZERO) && ptr))
memset(ptr, 0, obj_size(cachep)); memset(ptr, 0, obj_size(cachep));
...@@ -3367,6 +3378,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) ...@@ -3367,6 +3378,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
flags); flags);
prefetchw(objp); prefetchw(objp);
if (likely(objp))
kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
if (unlikely((flags & __GFP_ZERO) && objp)) if (unlikely((flags & __GFP_ZERO) && objp))
memset(objp, 0, obj_size(cachep)); memset(objp, 0, obj_size(cachep));
...@@ -3483,6 +3497,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) ...@@ -3483,6 +3497,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
kmemleak_free_recursive(objp, cachep->flags); kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
kmemcheck_slab_free(cachep, objp, obj_size(cachep));
/* /*
* Skip calling cache_free_alien() when the platform is not numa. * Skip calling cache_free_alien() when the platform is not numa.
* This will avoid cache misses that happen while accessing slabp (which * This will avoid cache misses that happen while accessing slabp (which
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册