kasan_init_64.c 4.3 KB
Newer Older
1
#define DISABLE_BRANCH_PROFILING
2
#define pr_fmt(fmt) "kasan: " fmt
A
Andrey Ryabinin 已提交
3 4 5 6 7
#include <linux/bootmem.h>
#include <linux/kasan.h>
#include <linux/kdebug.h>
#include <linux/mm.h>
#include <linux/sched.h>
8
#include <linux/sched/task.h>
A
Andrey Ryabinin 已提交
9 10
#include <linux/vmalloc.h>

11
#include <asm/e820/types.h>
A
Andrey Ryabinin 已提交
12 13 14
#include <asm/tlbflush.h>
#include <asm/sections.h>

15
extern pgd_t early_top_pgt[PTRS_PER_PGD];
16
extern struct range pfn_mapped[E820_MAX_ENTRIES];
A
Andrey Ryabinin 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36

static int __init map_range(struct range *range)
{
	unsigned long start;
	unsigned long end;

	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));

	/*
	 * end + 1 here is intentional. We check several shadow bytes in advance
	 * to slightly speed up fastpath. In some rare cases we could cross
	 * boundary of mapped shadow, so we just map some more here.
	 */
	return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
}

static void __init clear_pgds(unsigned long start,
			unsigned long end)
{
37 38 39 40 41 42 43 44 45 46 47 48 49
	pgd_t *pgd;

	for (; start < end; start += PGDIR_SIZE) {
		pgd = pgd_offset_k(start);
		/*
		 * With folded p4d, pgd_clear() is nop, use p4d_clear()
		 * instead.
		 */
		if (CONFIG_PGTABLE_LEVELS < 5)
			p4d_clear(p4d_offset(pgd, start));
		else
			pgd_clear(pgd);
	}
A
Andrey Ryabinin 已提交
50 51
}

52
static void __init kasan_map_early_shadow(pgd_t *pgd)
A
Andrey Ryabinin 已提交
53 54 55 56 57 58
{
	int i;
	unsigned long start = KASAN_SHADOW_START;
	unsigned long end = KASAN_SHADOW_END;

	for (i = pgd_index(start); start < end; i++) {
59 60 61 62 63 64 65 66 67 68 69 70
		switch (CONFIG_PGTABLE_LEVELS) {
		case 4:
			pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
					_KERNPG_TABLE);
			break;
		case 5:
			pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
					_KERNPG_TABLE);
			break;
		default:
			BUILD_BUG();
		}
A
Andrey Ryabinin 已提交
71 72 73 74 75 76 77 78 79 80
		start += PGDIR_SIZE;
	}
}

#ifdef CONFIG_KASAN_INLINE
static int kasan_die_handler(struct notifier_block *self,
			     unsigned long val,
			     void *data)
{
	if (val == DIE_GPF) {
D
Dmitry Vyukov 已提交
81 82
		pr_emerg("CONFIG_KASAN_INLINE enabled\n");
		pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
A
Andrey Ryabinin 已提交
83 84 85 86 87 88 89 90 91
	}
	return NOTIFY_OK;
}

static struct notifier_block kasan_die_notifier = {
	.notifier_call = kasan_die_handler,
};
#endif

92 93 94 95 96 97
void __init kasan_early_init(void)
{
	int i;
	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
98
	p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
99 100 101 102 103 104 105 106 107 108

	for (i = 0; i < PTRS_PER_PTE; i++)
		kasan_zero_pte[i] = __pte(pte_val);

	for (i = 0; i < PTRS_PER_PMD; i++)
		kasan_zero_pmd[i] = __pmd(pmd_val);

	for (i = 0; i < PTRS_PER_PUD; i++)
		kasan_zero_pud[i] = __pud(pud_val);

109 110 111
	for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
		kasan_zero_p4d[i] = __p4d(p4d_val);

112 113
	kasan_map_early_shadow(early_top_pgt);
	kasan_map_early_shadow(init_top_pgt);
114 115
}

A
Andrey Ryabinin 已提交
116 117 118 119 120 121 122 123
void __init kasan_init(void)
{
	int i;

#ifdef CONFIG_KASAN_INLINE
	register_die_notifier(&kasan_die_notifier);
#endif

124 125
	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
	load_cr3(early_top_pgt);
126
	__flush_tlb_all();
A
Andrey Ryabinin 已提交
127 128 129

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

130
	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
A
Andrey Ryabinin 已提交
131 132
			kasan_mem_to_shadow((void *)PAGE_OFFSET));

133
	for (i = 0; i < E820_MAX_ENTRIES; i++) {
A
Andrey Ryabinin 已提交
134 135 136 137 138 139
		if (pfn_mapped[i].end == 0)
			break;

		if (map_range(&pfn_mapped[i]))
			panic("kasan: unable to allocate shadow!");
	}
140 141 142
	kasan_populate_zero_shadow(
		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
		kasan_mem_to_shadow((void *)__START_KERNEL_map));
143 144 145 146 147

	vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
			(unsigned long)kasan_mem_to_shadow(_end),
			NUMA_NO_NODE);

148
	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
149
			(void *)KASAN_SHADOW_END);
A
Andrey Ryabinin 已提交
150

151
	load_cr3(init_top_pgt);
152
	__flush_tlb_all();
153

154 155
	/*
	 * kasan_zero_page has been used as early shadow memory, thus it may
156 157
	 * contain some garbage. Now we can clear and write protect it, since
	 * after the TLB flush no one should write to it.
158 159
	 */
	memset(kasan_zero_page, 0, PAGE_SIZE);
160 161 162 163 164 165
	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
		set_pte(&kasan_zero_pte[i], pte);
	}
	/* Flush TLBs again to be sure that write protection applied. */
	__flush_tlb_all();
166 167

	init_task.kasan_depth = 0;
A
Andrey Konovalov 已提交
168
	pr_info("KernelAddressSanitizer initialized\n");
A
Andrey Ryabinin 已提交
169
}