report.c 8.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common generic and tag-based KASAN error reporting code.
4 5
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7
 *
8
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9
 *        Andrey Konovalov <andreyknvl@gmail.com>
10 11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */

17
#include <linux/bitops.h>
18
#include <linux/ftrace.h>
19
#include <linux/init.h>
20 21 22 23 24
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
25
#include <linux/stackdepot.h>
26 27 28 29
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
30
#include <linux/module.h>
31

32 33
#include <asm/sections.h>

34
#include "kasan.h"
35
#include "../slab.h"
36 37 38 39 40 41 42

/* Shadow layout customization. */
#define SHADOW_BYTES_PER_BLOCK 1
#define SHADOW_BLOCKS_PER_ROW 16
#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
#define SHADOW_ROWS_AROUND_ADDR 2

43
static unsigned long kasan_flags;
44

45 46
#define KASAN_BIT_REPORTED	0
#define KASAN_BIT_MULTI_SHOT	1
47

48
bool kasan_save_enable_multi_shot(void)
49
{
50
	return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
51
}
52
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
53

54
void kasan_restore_multi_shot(bool enabled)
55
{
56 57
	if (!enabled)
		clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
58
}
59
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
60

61
static int __init kasan_set_multi_shot(char *str)
A
Andrey Konovalov 已提交
62
{
63 64
	set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
	return 1;
A
Andrey Konovalov 已提交
65
}
66
__setup("kasan_multi_shot", kasan_set_multi_shot);
A
Andrey Konovalov 已提交
67

68
static void print_error_description(struct kasan_access_info *info)
69
{
A
Andrey Konovalov 已提交
70
	pr_err("BUG: KASAN: %s in %pS\n",
71
		get_bug_type(info), (void *)info->ip);
72
	pr_err("%s of size %zu at addr %px by task %s/%d\n",
A
Andrey Konovalov 已提交
73
		info->is_write ? "Write" : "Read", info->access_size,
A
Andrey Konovalov 已提交
74
		info->access_addr, current->comm, task_pid_nr(current));
75 76
}

77 78
static DEFINE_SPINLOCK(report_lock);

79
static void start_report(unsigned long *flags)
80 81 82 83 84 85 86 87 88
{
	/*
	 * Make sure we don't end up in loop.
	 */
	kasan_disable_current();
	spin_lock_irqsave(&report_lock, *flags);
	pr_err("==================================================================\n");
}

89
static void end_report(unsigned long *flags)
90 91 92 93
{
	pr_err("==================================================================\n");
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
	spin_unlock_irqrestore(&report_lock, *flags);
D
Dmitry Vyukov 已提交
94 95
	if (panic_on_warn)
		panic("panic_on_warn set ...\n");
96 97 98
	kasan_enable_current();
}

99
static void print_track(struct kasan_track *track, const char *prefix)
A
Alexander Potapenko 已提交
100
{
101
	pr_err("%s by task %u:\n", prefix, track->pid);
102 103 104 105 106 107 108 109
	if (track->stack) {
		struct stack_trace trace;

		depot_fetch_stack(track->stack, &trace);
		print_stack_trace(&trace, 0);
	} else {
		pr_err("(stack is not available)\n");
	}
A
Alexander Potapenko 已提交
110 111
}

112 113 114 115 116 117 118 119
static struct page *addr_to_page(const void *addr)
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_head_page(addr);
	return NULL;
}

120 121
static void describe_object_addr(struct kmem_cache *cache, void *object,
				const void *addr)
A
Alexander Potapenko 已提交
122
{
123 124 125 126
	unsigned long access_addr = (unsigned long)addr;
	unsigned long object_addr = (unsigned long)object;
	const char *rel_type;
	int rel_bytes;
A
Alexander Potapenko 已提交
127

128
	pr_err("The buggy address belongs to the object at %px\n"
129 130
	       " which belongs to the cache %s of size %d\n",
		object, cache->name, cache->object_size);
131

132
	if (!addr)
A
Alexander Potapenko 已提交
133
		return;
134

135 136 137 138 139 140 141 142 143 144 145 146
	if (access_addr < object_addr) {
		rel_type = "to the left";
		rel_bytes = object_addr - access_addr;
	} else if (access_addr >= object_addr + cache->object_size) {
		rel_type = "to the right";
		rel_bytes = access_addr - (object_addr + cache->object_size);
	} else {
		rel_type = "inside";
		rel_bytes = access_addr - object_addr;
	}

	pr_err("The buggy address is located %d bytes %s of\n"
147
	       " %d-byte region [%px, %px)\n",
148 149 150 151 152 153 154 155 156 157 158
		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
		(void *)(object_addr + cache->object_size));
}

static void describe_object(struct kmem_cache *cache, void *object,
				const void *addr)
{
	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);

	if (cache->flags & SLAB_KASAN) {
		print_track(&alloc_info->alloc_track, "Allocated");
159
		pr_err("\n");
160
		print_track(&alloc_info->free_track, "Freed");
161
		pr_err("\n");
162 163 164
	}

	describe_object_addr(cache, object, addr);
A
Alexander Potapenko 已提交
165 166
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
static inline bool kernel_or_module_addr(const void *addr)
{
	if (addr >= (void *)_stext && addr < (void *)_end)
		return true;
	if (is_module_address((unsigned long)addr))
		return true;
	return false;
}

static inline bool init_task_stack_addr(const void *addr)
{
	return addr >= (void *)&init_thread_union.stack &&
		(addr <= (void *)&init_thread_union.stack +
			sizeof(init_thread_union.stack));
}

183
static void print_address_description(void *addr)
184
{
185
	struct page *page = addr_to_page(addr);
186

187
	dump_stack();
188
	pr_err("\n");
189 190 191

	if (page && PageSlab(page)) {
		struct kmem_cache *cache = page->slab_cache;
192
		void *object = nearest_obj(cache, page,	addr);
193

194
		describe_object(cache, object, addr);
195 196
	}

197 198 199 200 201 202 203 204
	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
	}

	if (page) {
		pr_err("The buggy address belongs to the page:\n");
		dump_page(page, "kasan: bad access detected");
205
	}
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
}

static bool row_is_guilty(const void *row, const void *guilty)
{
	return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
}

static int shadow_pointer_offset(const void *row, const void *shadow)
{
	/* The length of ">ff00ff00ff00ff00: " is
	 *    3 + (BITS_PER_LONG/8)*2 chars.
	 */
	return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
		(shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
}

static void print_shadow_for_address(const void *addr)
{
	int i;
	const void *shadow = kasan_mem_to_shadow(addr);
	const void *shadow_row;

	shadow_row = (void *)round_down((unsigned long)shadow,
					SHADOW_BYTES_PER_ROW)
		- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;

	pr_err("Memory state around the buggy address:\n");

	for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
		const void *kaddr = kasan_shadow_to_mem(shadow_row);
		char buffer[4 + (BITS_PER_LONG/8)*2];
237
		char shadow_buf[SHADOW_BYTES_PER_ROW];
238 239

		snprintf(buffer, sizeof(buffer),
240
			(i == 0) ? ">%px: " : " %px: ", kaddr);
241 242 243 244 245 246
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
		memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
247 248
		print_hex_dump(KERN_ERR, buffer,
			DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
249
			shadow_buf, SHADOW_BYTES_PER_ROW, 0);
250 251 252 253 254 255 256 257 258 259

		if (row_is_guilty(shadow_row, shadow))
			pr_err("%*c\n",
				shadow_pointer_offset(shadow_row, shadow),
				'^');

		shadow_row += SHADOW_BYTES_PER_ROW;
	}
}

260 261 262 263 264 265 266 267 268
static bool report_enabled(void)
{
	if (current->kasan_depth)
		return false;
	if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
		return true;
	return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}

269
void kasan_report_invalid_free(void *object, unsigned long ip)
270 271 272
{
	unsigned long flags;

273
	start_report(&flags);
274
	pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
275 276
	print_tags(get_tag(object), reset_tag(object));
	object = reset_tag(object);
277
	pr_err("\n");
278
	print_address_description(object);
279
	pr_err("\n");
280
	print_shadow_for_address(object);
281
	end_report(&flags);
282 283
}

284
void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
285 286
{
	struct kasan_access_info info;
287 288 289
	void *tagged_addr;
	void *untagged_addr;
	unsigned long flags;
290

291
	if (likely(!report_enabled()))
292 293
		return;

294 295
	disable_trace_on_warning();

296 297 298 299 300 301 302 303
	tagged_addr = (void *)addr;
	untagged_addr = reset_tag(tagged_addr);

	info.access_addr = tagged_addr;
	if (addr_has_shadow(untagged_addr))
		info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
	else
		info.first_bad_addr = untagged_addr;
304 305 306
	info.access_size = size;
	info.is_write = is_write;
	info.ip = ip;
307

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	start_report(&flags);

	print_error_description(&info);
	if (addr_has_shadow(untagged_addr))
		print_tags(get_tag(tagged_addr), info.first_bad_addr);
	pr_err("\n");

	if (addr_has_shadow(untagged_addr)) {
		print_address_description(untagged_addr);
		pr_err("\n");
		print_shadow_for_address(info.first_bad_addr);
	} else {
		dump_stack();
	}

	end_report(&flags);
324
}