提交 b1eeab67 编写于 作者: V Vegard Nossum

kmemcheck: add hooks for the page allocator

This adds support for tracking the initializedness of memory that
was allocated with the page allocator. Highmem requests are not
tracked.

Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: NPekka Enberg <penberg@cs.helsinki.fi>

[build fix for !CONFIG_KMEMCHECK]
Signed-off-by: NIngo Molnar <mingo@elte.hu>

[rebased for mainline inclusion]
Signed-off-by: NVegard Nossum <vegard.nossum@gmail.com>
上级 9b5cab31
...@@ -154,9 +154,9 @@ struct thread_info { ...@@ -154,9 +154,9 @@ struct thread_info {
/* thread information allocation */ /* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE #ifdef CONFIG_DEBUG_STACK_USAGE
#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) #define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else #else
#define THREAD_FLAGS GFP_KERNEL #define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif #endif
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
......
...@@ -116,6 +116,14 @@ void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) ...@@ -116,6 +116,14 @@ void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
} }
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
}
enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
{ {
uint8_t *x; uint8_t *x;
......
...@@ -51,7 +51,12 @@ struct vm_area_struct; ...@@ -51,7 +51,12 @@ struct vm_area_struct;
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
#ifdef CONFIG_KMEMCHECK
#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
#else
#define __GFP_NOTRACK ((__force gfp_t)0)
#endif
/* /*
* This may seem redundant, but it's a way of annotating false positives vs. * This may seem redundant, but it's a way of annotating false positives vs.
......
...@@ -8,13 +8,15 @@ ...@@ -8,13 +8,15 @@
extern int kmemcheck_enabled; extern int kmemcheck_enabled;
/* The slab-related functions. */ /* The slab-related functions. */
void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
struct page *page, int order); void kmemcheck_free_shadow(struct page *page, int order);
void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order);
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
size_t size); size_t size);
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
gfp_t gfpflags);
void kmemcheck_show_pages(struct page *p, unsigned int n); void kmemcheck_show_pages(struct page *p, unsigned int n);
void kmemcheck_hide_pages(struct page *p, unsigned int n); void kmemcheck_hide_pages(struct page *p, unsigned int n);
...@@ -27,6 +29,7 @@ void kmemcheck_mark_freed(void *address, unsigned int n); ...@@ -27,6 +29,7 @@ void kmemcheck_mark_freed(void *address, unsigned int n);
void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
int kmemcheck_show_addr(unsigned long address); int kmemcheck_show_addr(unsigned long address);
int kmemcheck_hide_addr(unsigned long address); int kmemcheck_hide_addr(unsigned long address);
...@@ -34,13 +37,12 @@ int kmemcheck_hide_addr(unsigned long address); ...@@ -34,13 +37,12 @@ int kmemcheck_hide_addr(unsigned long address);
#define kmemcheck_enabled 0 #define kmemcheck_enabled 0
static inline void static inline void
kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
struct page *page, int order)
{ {
} }
static inline void static inline void
kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) kmemcheck_free_shadow(struct page *page, int order)
{ {
} }
...@@ -55,6 +57,11 @@ static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, ...@@ -55,6 +57,11 @@ static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
{ {
} }
static inline void kmemcheck_pagealloc_alloc(struct page *p,
unsigned int order, gfp_t gfpflags)
{
}
static inline bool kmemcheck_page_is_tracked(struct page *p) static inline bool kmemcheck_page_is_tracked(struct page *p)
{ {
return false; return false;
...@@ -75,6 +82,22 @@ static inline void kmemcheck_mark_initialized(void *address, unsigned int n) ...@@ -75,6 +82,22 @@ static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
static inline void kmemcheck_mark_freed(void *address, unsigned int n) static inline void kmemcheck_mark_freed(void *address, unsigned int n)
{ {
} }
static inline void kmemcheck_mark_unallocated_pages(struct page *p,
unsigned int n)
{
}
static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
unsigned int n)
{
}
static inline void kmemcheck_mark_initialized_pages(struct page *p,
unsigned int n)
{
}
#endif /* CONFIG_KMEMCHECK */ #endif /* CONFIG_KMEMCHECK */
#endif /* LINUX_KMEMCHECK_H */ #endif /* LINUX_KMEMCHECK_H */
#include <linux/gfp.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kmemcheck.h> #include <linux/kmemcheck.h>
void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
struct page *page, int order)
{ {
struct page *shadow; struct page *shadow;
int pages; int pages;
...@@ -16,7 +16,7 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, ...@@ -16,7 +16,7 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
* With kmemcheck enabled, we need to allocate a memory area for the * With kmemcheck enabled, we need to allocate a memory area for the
* shadow bits as well. * shadow bits as well.
*/ */
shadow = alloc_pages_node(node, flags, order); shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
if (!shadow) { if (!shadow) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_ERR "kmemcheck: failed to allocate " printk(KERN_ERR "kmemcheck: failed to allocate "
...@@ -33,23 +33,17 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, ...@@ -33,23 +33,17 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
* the memory accesses. * the memory accesses.
*/ */
kmemcheck_hide_pages(page, pages); kmemcheck_hide_pages(page, pages);
/*
* Objects from caches that have a constructor don't get
* cleared when they're allocated, so we need to do it here.
*/
if (s->ctor)
kmemcheck_mark_uninitialized_pages(page, pages);
else
kmemcheck_mark_unallocated_pages(page, pages);
} }
void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) void kmemcheck_free_shadow(struct page *page, int order)
{ {
struct page *shadow; struct page *shadow;
int pages; int pages;
int i; int i;
if (!kmemcheck_page_is_tracked(page))
return;
pages = 1 << order; pages = 1 << order;
kmemcheck_show_pages(page, pages); kmemcheck_show_pages(page, pages);
...@@ -101,3 +95,28 @@ void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) ...@@ -101,3 +95,28 @@ void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
kmemcheck_mark_freed(object, size); kmemcheck_mark_freed(object, size);
} }
void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
gfp_t gfpflags)
{
int pages;
if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
return;
pages = 1 << order;
/*
* NOTE: We choose to track GFP_ZERO pages too; in fact, they
* can become uninitialized by copying uninitialized memory
* into them.
*/
/* XXX: Can use zone->node for node? */
kmemcheck_alloc_shadow(page, order, gfpflags, -1);
if (gfpflags & __GFP_ZERO)
kmemcheck_mark_initialized_pages(page, pages);
else
kmemcheck_mark_uninitialized_pages(page, pages);
}
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
...@@ -546,6 +547,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) ...@@ -546,6 +547,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
int i; int i;
int bad = 0; int bad = 0;
kmemcheck_free_shadow(page, order);
for (i = 0 ; i < (1 << order) ; ++i) for (i = 0 ; i < (1 << order) ; ++i)
bad += free_pages_check(page + i); bad += free_pages_check(page + i);
if (bad) if (bad)
...@@ -994,6 +997,8 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -994,6 +997,8 @@ static void free_hot_cold_page(struct page *page, int cold)
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
unsigned long flags; unsigned long flags;
kmemcheck_free_shadow(page, 0);
if (PageAnon(page)) if (PageAnon(page))
page->mapping = NULL; page->mapping = NULL;
if (free_pages_check(page)) if (free_pages_check(page))
...@@ -1047,6 +1052,16 @@ void split_page(struct page *page, unsigned int order) ...@@ -1047,6 +1052,16 @@ void split_page(struct page *page, unsigned int order)
VM_BUG_ON(PageCompound(page)); VM_BUG_ON(PageCompound(page));
VM_BUG_ON(!page_count(page)); VM_BUG_ON(!page_count(page));
#ifdef CONFIG_KMEMCHECK
/*
* Split shadow pages too, because free(page[0]) would
* otherwise free the whole shadow.
*/
if (kmemcheck_page_is_tracked(page))
split_page(virt_to_page(page[0].shadow), order);
#endif
for (i = 1; i < (1 << order); i++) for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i); set_page_refcounted(page + i);
} }
...@@ -1667,7 +1682,10 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, ...@@ -1667,7 +1682,10 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
dump_stack(); dump_stack();
show_mem(); show_mem();
} }
return page;
got_pg: got_pg:
if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
return page; return page;
} }
EXPORT_SYMBOL(__alloc_pages_internal); EXPORT_SYMBOL(__alloc_pages_internal);
......
...@@ -1612,7 +1612,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -1612,7 +1612,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
flags |= __GFP_RECLAIMABLE; flags |= __GFP_RECLAIMABLE;
page = alloc_pages_node(nodeid, flags, cachep->gfporder); page = alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
if (!page) if (!page)
return NULL; return NULL;
...@@ -1626,8 +1626,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -1626,8 +1626,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
__SetPageSlab(page + i); __SetPageSlab(page + i);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder); kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
if (cachep->ctor)
kmemcheck_mark_uninitialized_pages(page, nr_pages);
else
kmemcheck_mark_unallocated_pages(page, nr_pages);
}
return page_address(page); return page_address(page);
} }
...@@ -1641,8 +1647,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) ...@@ -1641,8 +1647,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
struct page *page = virt_to_page(addr); struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i; const unsigned long nr_freed = i;
if (kmemcheck_page_is_tracked(page)) kmemcheck_free_shadow(page, cachep->gfporder);
kmemcheck_free_shadow(cachep, page, cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_zone_page_state(page_zone(page), sub_zone_page_state(page_zone(page),
......
...@@ -1066,6 +1066,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, ...@@ -1066,6 +1066,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
{ {
int order = oo_order(oo); int order = oo_order(oo);
flags |= __GFP_NOTRACK;
if (node == -1) if (node == -1)
return alloc_pages(flags, order); return alloc_pages(flags, order);
else else
...@@ -1097,7 +1099,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1097,7 +1099,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (kmemcheck_enabled if (kmemcheck_enabled
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
{ {
kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page)); int pages = 1 << oo_order(oo);
kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
/*
* Objects from caches that have a constructor don't get
* cleared when they're allocated, so we need to do it here.
*/
if (s->ctor)
kmemcheck_mark_uninitialized_pages(page, pages);
else
kmemcheck_mark_unallocated_pages(page, pages);
} }
page->objects = oo_objects(oo); page->objects = oo_objects(oo);
...@@ -1173,8 +1186,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1173,8 +1186,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlubDebug(page); __ClearPageSlubDebug(page);
} }
if (kmemcheck_page_is_tracked(page)) kmemcheck_free_shadow(page, compound_order(page));
kmemcheck_free_shadow(s, page, compound_order(page));
mod_zone_page_state(page_zone(page), mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ? (s->flags & SLAB_RECLAIM_ACCOUNT) ?
...@@ -2734,9 +2746,10 @@ EXPORT_SYMBOL(__kmalloc); ...@@ -2734,9 +2746,10 @@ EXPORT_SYMBOL(__kmalloc);
static void *kmalloc_large_node(size_t size, gfp_t flags, int node) static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{ {
struct page *page = alloc_pages_node(node, flags | __GFP_COMP, struct page *page;
get_order(size));
flags |= __GFP_COMP | __GFP_NOTRACK;
page = alloc_pages_node(node, flags, get_order(size));
if (page) if (page)
return page_address(page); return page_address(page);
else else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册