report.c 11.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common KASAN error reporting code.
4 5
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7
 *
8
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9
 *        Andrey Konovalov <andreyknvl@gmail.com>
10 11
 */

12
#include <linux/bitops.h>
13
#include <linux/ftrace.h>
14
#include <linux/init.h>
15 16 17 18 19
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
20
#include <linux/stackdepot.h>
21 22 23 24
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
25
#include <linux/module.h>
26
#include <linux/sched/task_stack.h>
27
#include <linux/uaccess.h>
28

29 30
#include <asm/sections.h>

P
Patricia Alfonso 已提交
31 32
#include <kunit/test.h>

33
#include "kasan.h"
34
#include "../slab.h"
35

36
static unsigned long kasan_flags;
37

38 39
#define KASAN_BIT_REPORTED	0
#define KASAN_BIT_MULTI_SHOT	1
40

41
bool kasan_save_enable_multi_shot(void)
42
{
43
	return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
44
}
45
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
46

47
void kasan_restore_multi_shot(bool enabled)
48
{
49 50
	if (!enabled)
		clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
51
}
52
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
53

54
static int __init kasan_set_multi_shot(char *str)
A
Andrey Konovalov 已提交
55
{
56 57
	set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
	return 1;
A
Andrey Konovalov 已提交
58
}
59
__setup("kasan_multi_shot", kasan_set_multi_shot);
A
Andrey Konovalov 已提交
60

61
static void print_error_description(struct kasan_access_info *info)
62
{
A
Andrey Konovalov 已提交
63
	pr_err("BUG: KASAN: %s in %pS\n",
64
		get_bug_type(info), (void *)info->ip);
65 66 67 68 69 70 71 72
	if (info->access_size)
		pr_err("%s of size %zu at addr %px by task %s/%d\n",
			info->is_write ? "Write" : "Read", info->access_size,
			info->access_addr, current->comm, task_pid_nr(current));
	else
		pr_err("%s at addr %px by task %s/%d\n",
			info->is_write ? "Write" : "Read",
			info->access_addr, current->comm, task_pid_nr(current));
73 74
}

75 76
static DEFINE_SPINLOCK(report_lock);

77
static void start_report(unsigned long *flags)
78 79 80 81 82 83 84 85 86
{
	/*
	 * Make sure we don't end up in loop.
	 */
	kasan_disable_current();
	spin_lock_irqsave(&report_lock, *flags);
	pr_err("==================================================================\n");
}

87
static void end_report(unsigned long *flags)
88 89 90 91
{
	pr_err("==================================================================\n");
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
	spin_unlock_irqrestore(&report_lock, *flags);
92
	if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) {
93 94 95 96 97 98 99
		/*
		 * This thread may hit another WARN() in the panic path.
		 * Resetting this prevents additional WARN() from panicking the
		 * system on this thread.  Other threads are blocked by the
		 * panic_mutex in panic().
		 */
		panic_on_warn = 0;
D
Dmitry Vyukov 已提交
100
		panic("panic_on_warn set ...\n");
101
	}
102 103 104
	kasan_enable_current();
}

105 106 107 108 109 110 111 112 113
static void print_stack(depot_stack_handle_t stack)
{
	unsigned long *entries;
	unsigned int nr_entries;

	nr_entries = stack_depot_fetch(stack, &entries);
	stack_trace_print(entries, nr_entries, 0);
}

114
static void print_track(struct kasan_track *track, const char *prefix)
A
Alexander Potapenko 已提交
115
{
116
	pr_err("%s by task %u:\n", prefix, track->pid);
117
	if (track->stack) {
118
		print_stack(track->stack);
119 120 121
	} else {
		pr_err("(stack is not available)\n");
	}
A
Alexander Potapenko 已提交
122 123
}

124
struct page *kasan_addr_to_page(const void *addr)
125 126 127 128 129 130 131
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_head_page(addr);
	return NULL;
}

132 133
static void describe_object_addr(struct kmem_cache *cache, void *object,
				const void *addr)
A
Alexander Potapenko 已提交
134
{
135 136 137 138
	unsigned long access_addr = (unsigned long)addr;
	unsigned long object_addr = (unsigned long)object;
	const char *rel_type;
	int rel_bytes;
A
Alexander Potapenko 已提交
139

140
	pr_err("The buggy address belongs to the object at %px\n"
141 142
	       " which belongs to the cache %s of size %d\n",
		object, cache->name, cache->object_size);
143

144
	if (!addr)
A
Alexander Potapenko 已提交
145
		return;
146

147 148 149 150 151 152 153 154 155 156 157 158
	if (access_addr < object_addr) {
		rel_type = "to the left";
		rel_bytes = object_addr - access_addr;
	} else if (access_addr >= object_addr + cache->object_size) {
		rel_type = "to the right";
		rel_bytes = access_addr - (object_addr + cache->object_size);
	} else {
		rel_type = "inside";
		rel_bytes = access_addr - object_addr;
	}

	pr_err("The buggy address is located %d bytes %s of\n"
159
	       " %d-byte region [%px, %px)\n",
160 161 162 163 164
		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
		(void *)(object_addr + cache->object_size));
}

static void describe_object(struct kmem_cache *cache, void *object,
165
				const void *addr, u8 tag)
166 167 168 169
{
	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);

	if (cache->flags & SLAB_KASAN) {
170 171
		struct kasan_track *free_track;

172
		print_track(&alloc_info->alloc_track, "Allocated");
173
		pr_err("\n");
174
		free_track = kasan_get_free_track(cache, object, tag);
175 176 177 178
		if (free_track) {
			print_track(free_track, "Freed");
			pr_err("\n");
		}
179 180 181

#ifdef CONFIG_KASAN_GENERIC
		if (alloc_info->aux_stack[0]) {
W
Walter Wu 已提交
182
			pr_err("Last potentially related work creation:\n");
183 184 185 186
			print_stack(alloc_info->aux_stack[0]);
			pr_err("\n");
		}
		if (alloc_info->aux_stack[1]) {
W
Walter Wu 已提交
187
			pr_err("Second to last potentially related work creation:\n");
188 189 190 191
			print_stack(alloc_info->aux_stack[1]);
			pr_err("\n");
		}
#endif
192 193 194
	}

	describe_object_addr(cache, object, addr);
A
Alexander Potapenko 已提交
195 196
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
static inline bool kernel_or_module_addr(const void *addr)
{
	if (addr >= (void *)_stext && addr < (void *)_end)
		return true;
	if (is_module_address((unsigned long)addr))
		return true;
	return false;
}

static inline bool init_task_stack_addr(const void *addr)
{
	return addr >= (void *)&init_thread_union.stack &&
		(addr <= (void *)&init_thread_union.stack +
			sizeof(init_thread_union.stack));
}

213
static void print_address_description(void *addr, u8 tag)
214
{
215
	struct page *page = kasan_addr_to_page(addr);
216

217
	dump_stack();
218
	pr_err("\n");
219 220 221

	if (page && PageSlab(page)) {
		struct kmem_cache *cache = page->slab_cache;
222
		void *object = nearest_obj(cache, page,	addr);
223

224
		describe_object(cache, object, addr, tag);
225 226
	}

227 228 229 230 231 232 233 234
	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
	}

	if (page) {
		pr_err("The buggy address belongs to the page:\n");
		dump_page(page, "kasan: bad access detected");
235
	}
236 237

	print_address_stack_frame(addr);
238 239
}

240
static bool meta_row_is_guilty(const void *row, const void *addr)
241
{
242
	return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
243 244
}

245
static int meta_pointer_offset(const void *row, const void *addr)
246
{
247 248 249 250 251 252 253 254 255
	/*
	 * Memory state around the buggy address:
	 *  ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
	 *  ...
	 *
	 * The length of ">ff00ff00ff00ff00: " is
	 *    3 + (BITS_PER_LONG / 8) * 2 chars.
	 * The length of each granule metadata is 2 bytes
	 *    plus 1 byte for space.
256
	 */
257 258
	return 3 + (BITS_PER_LONG / 8) * 2 +
		(addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
259 260
}

261
static void print_memory_metadata(const void *addr)
262 263
{
	int i;
264
	void *row;
265

266 267
	row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
			- META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
268 269 270

	pr_err("Memory state around the buggy address:\n");

271
	for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
272 273
		char buffer[4 + (BITS_PER_LONG / 8) * 2];
		char metadata[META_BYTES_PER_ROW];
274 275

		snprintf(buffer, sizeof(buffer),
276 277
				(i == 0) ? ">%px: " : " %px: ", row);

278 279 280 281 282
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
283 284
		metadata_fetch_row(&metadata[0], row);

285
		print_hex_dump(KERN_ERR, buffer,
286
			DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
287
			metadata, META_BYTES_PER_ROW, 0);
288

289 290
		if (meta_row_is_guilty(row, addr))
			pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
291

292
		row += META_MEM_BYTES_PER_ROW;
293 294 295
	}
}

296
static bool report_enabled(void)
297
{
298
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
299 300
	if (current->kasan_depth)
		return false;
301
#endif
302 303 304 305 306
	if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
		return true;
	return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}

P
Patricia Alfonso 已提交
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
#if IS_ENABLED(CONFIG_KUNIT)
static void kasan_update_kunit_status(struct kunit *cur_test)
{
	struct kunit_resource *resource;
	struct kunit_kasan_expectation *kasan_data;

	resource = kunit_find_named_resource(cur_test, "kasan_data");

	if (!resource) {
		kunit_set_failure(cur_test);
		return;
	}

	kasan_data = (struct kunit_kasan_expectation *)resource->data;
	kasan_data->report_found = true;
	kunit_put_resource(resource);
}
#endif /* IS_ENABLED(CONFIG_KUNIT) */

326
void kasan_report_invalid_free(void *object, unsigned long ip)
327 328
{
	unsigned long flags;
329
	u8 tag = get_tag(object);
330

331
	object = reset_tag(object);
P
Patricia Alfonso 已提交
332 333 334 335 336 337

#if IS_ENABLED(CONFIG_KUNIT)
	if (current->kunit_test)
		kasan_update_kunit_status(current->kunit_test);
#endif /* IS_ENABLED(CONFIG_KUNIT) */

338
	start_report(&flags);
339
	pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
340
	print_tags(tag, object);
341
	pr_err("\n");
342
	print_address_description(object, tag);
343
	pr_err("\n");
344
	print_memory_metadata(object);
345
	end_report(&flags);
346 347
}

348 349
static void __kasan_report(unsigned long addr, size_t size, bool is_write,
				unsigned long ip)
350 351
{
	struct kasan_access_info info;
352 353 354
	void *tagged_addr;
	void *untagged_addr;
	unsigned long flags;
355

P
Patricia Alfonso 已提交
356 357 358 359 360
#if IS_ENABLED(CONFIG_KUNIT)
	if (current->kunit_test)
		kasan_update_kunit_status(current->kunit_test);
#endif /* IS_ENABLED(CONFIG_KUNIT) */

361 362
	disable_trace_on_warning();

363 364 365 366
	tagged_addr = (void *)addr;
	untagged_addr = reset_tag(tagged_addr);

	info.access_addr = tagged_addr;
367
	if (addr_has_metadata(untagged_addr))
368 369 370
		info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
	else
		info.first_bad_addr = untagged_addr;
371 372 373
	info.access_size = size;
	info.is_write = is_write;
	info.ip = ip;
374

375 376 377
	start_report(&flags);

	print_error_description(&info);
378
	if (addr_has_metadata(untagged_addr))
379 380 381
		print_tags(get_tag(tagged_addr), info.first_bad_addr);
	pr_err("\n");

382
	if (addr_has_metadata(untagged_addr)) {
383
		print_address_description(untagged_addr, get_tag(tagged_addr));
384
		pr_err("\n");
385
		print_memory_metadata(info.first_bad_addr);
386 387 388 389 390
	} else {
		dump_stack();
	}

	end_report(&flags);
391
}
392

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
bool kasan_report(unsigned long addr, size_t size, bool is_write,
			unsigned long ip)
{
	unsigned long flags = user_access_save();
	bool ret = false;

	if (likely(report_enabled())) {
		__kasan_report(addr, size, is_write, ip);
		ret = true;
	}

	user_access_restore(flags);

	return ret;
}

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
#ifdef CONFIG_KASAN_INLINE
/*
 * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
 * canonical half of the address space) cause out-of-bounds shadow memory reads
 * before the actual access. For addresses in the low canonical half of the
 * address space, as well as most non-canonical addresses, that out-of-bounds
 * shadow memory access lands in the non-canonical part of the address space.
 * Help the user figure out what the original bogus pointer was.
 */
void kasan_non_canonical_hook(unsigned long addr)
{
	unsigned long orig_addr;
	const char *bug_type;

	if (addr < KASAN_SHADOW_OFFSET)
		return;

	orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT;
	/*
	 * For faults near the shadow address for NULL, we can be fairly certain
	 * that this is a KASAN shadow memory access.
	 * For faults that correspond to shadow for low canonical addresses, we
	 * can still be pretty sure - that shadow region is a fairly narrow
	 * chunk of the non-canonical address space.
	 * But faults that look like shadow for non-canonical addresses are a
	 * really large chunk of the address space. In that case, we still
	 * print the decoded address, but make it clear that this is not
	 * necessarily what's actually going on.
	 */
	if (orig_addr < PAGE_SIZE)
		bug_type = "null-ptr-deref";
	else if (orig_addr < TASK_SIZE)
		bug_type = "probably user-memory-access";
	else
		bug_type = "maybe wild-memory-access";
	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
445
		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
446 447
}
#endif