kmemcheck.c 2.9 KB
Newer Older
1
#include <linux/gfp.h>
V
Vegard Nossum 已提交
2 3 4
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/slab.h>
5
#include "slab.h"
V
Vegard Nossum 已提交
6 7
#include <linux/kmemcheck.h>

8
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
V
Vegard Nossum 已提交
9 10 11 12 13 14 15 16 17 18 19
{
	struct page *shadow;
	int pages;
	int i;

	pages = 1 << order;

	/*
	 * With kmemcheck enabled, we need to allocate a memory area for the
	 * shadow bits as well.
	 */
20
	shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
V
Vegard Nossum 已提交
21 22
	if (!shadow) {
		if (printk_ratelimit())
J
Joe Perches 已提交
23
			printk(KERN_ERR "kmemcheck: failed to allocate shadow bitmap\n");
V
Vegard Nossum 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37
		return;
	}

	for(i = 0; i < pages; ++i)
		page[i].shadow = page_address(&shadow[i]);

	/*
	 * Mark it as non-present for the MMU so that our accesses to
	 * this memory will trigger a page fault and let us analyze
	 * the memory accesses.
	 */
	kmemcheck_hide_pages(page, pages);
}

38
void kmemcheck_free_shadow(struct page *page, int order)
V
Vegard Nossum 已提交
39 40 41 42 43
{
	struct page *shadow;
	int pages;
	int i;

44 45 46
	if (!kmemcheck_page_is_tracked(page))
		return;

V
Vegard Nossum 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
	pages = 1 << order;

	kmemcheck_show_pages(page, pages);

	shadow = virt_to_page(page[0].shadow);

	for(i = 0; i < pages; ++i)
		page[i].shadow = NULL;

	__free_pages(shadow, order);
}

void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
			  size_t size)
{
62 63 64
	if (unlikely(!object)) /* Skip object if allocation failed */
		return;

V
Vegard Nossum 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
	/*
	 * Has already been memset(), which initializes the shadow for us
	 * as well.
	 */
	if (gfpflags & __GFP_ZERO)
		return;

	/* No need to initialize the shadow of a non-tracked slab. */
	if (s->flags & SLAB_NOTRACK)
		return;

	if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
		/*
		 * Allow notracked objects to be allocated from
		 * tracked caches. Note however that these objects
		 * will still get page faults on access, they just
		 * won't ever be flagged as uninitialized. If page
		 * faults are not acceptable, the slab cache itself
		 * should be marked NOTRACK.
		 */
		kmemcheck_mark_initialized(object, size);
	} else if (!s->ctor) {
		/*
		 * New objects should be marked uninitialized before
		 * they're returned to the called.
		 */
		kmemcheck_mark_uninitialized(object, size);
	}
}

void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
{
	/* TODO: RCU freeing is unsupported for now; hide false positives. */
	if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
		kmemcheck_mark_freed(object, size);
}
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125

void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
			       gfp_t gfpflags)
{
	int pages;

	if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
		return;

	pages = 1 << order;

	/*
	 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
	 * can become uninitialized by copying uninitialized memory
	 * into them.
	 */

	/* XXX: Can use zone->node for node? */
	kmemcheck_alloc_shadow(page, order, gfpflags, -1);

	if (gfpflags & __GFP_ZERO)
		kmemcheck_mark_initialized_pages(page, pages);
	else
		kmemcheck_mark_uninitialized_pages(page, pages);
}