report.c 13.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common KASAN error reporting code.
4 5
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7
 *
8
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9
 *        Andrey Konovalov <andreyknvl@gmail.com>
10 11
 */

12
#include <linux/bitops.h>
13
#include <linux/ftrace.h>
14
#include <linux/init.h>
15 16 17 18 19
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
20
#include <linux/stackdepot.h>
21 22 23 24
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
25
#include <linux/module.h>
26
#include <linux/sched/task_stack.h>
27
#include <linux/uaccess.h>
28
#include <trace/events/error_report.h>
29

30 31
#include <asm/sections.h>

P
Patricia Alfonso 已提交
32 33
#include <kunit/test.h>

34
#include "kasan.h"
35
#include "../slab.h"
36

37
static unsigned long kasan_flags;
38

39 40
#define KASAN_BIT_REPORTED	0
#define KASAN_BIT_MULTI_SHOT	1
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
enum kasan_arg_fault {
	KASAN_ARG_FAULT_DEFAULT,
	KASAN_ARG_FAULT_REPORT,
	KASAN_ARG_FAULT_PANIC,
};

static enum kasan_arg_fault kasan_arg_fault __ro_after_init = KASAN_ARG_FAULT_DEFAULT;

/* kasan.fault=report/panic */
static int __init early_kasan_fault(char *arg)
{
	if (!arg)
		return -EINVAL;

	if (!strcmp(arg, "report"))
		kasan_arg_fault = KASAN_ARG_FAULT_REPORT;
	else if (!strcmp(arg, "panic"))
		kasan_arg_fault = KASAN_ARG_FAULT_PANIC;
	else
		return -EINVAL;

	return 0;
}
early_param("kasan.fault", early_kasan_fault);

67
bool kasan_save_enable_multi_shot(void)
68
{
69
	return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
70
}
71
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
72

73
void kasan_restore_multi_shot(bool enabled)
74
{
75 76
	if (!enabled)
		clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
77
}
78
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
79

80
static int __init kasan_set_multi_shot(char *str)
A
Andrey Konovalov 已提交
81
{
82 83
	set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
	return 1;
A
Andrey Konovalov 已提交
84
}
85
__setup("kasan_multi_shot", kasan_set_multi_shot);
A
Andrey Konovalov 已提交
86

87
static void print_error_description(struct kasan_access_info *info)
88
{
A
Andrey Konovalov 已提交
89
	pr_err("BUG: KASAN: %s in %pS\n",
90
		kasan_get_bug_type(info), (void *)info->ip);
91 92 93 94 95 96 97 98
	if (info->access_size)
		pr_err("%s of size %zu at addr %px by task %s/%d\n",
			info->is_write ? "Write" : "Read", info->access_size,
			info->access_addr, current->comm, task_pid_nr(current));
	else
		pr_err("%s at addr %px by task %s/%d\n",
			info->is_write ? "Write" : "Read",
			info->access_addr, current->comm, task_pid_nr(current));
99 100
}

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
static void update_kunit_status(bool sync)
{
	struct kunit *test;
	struct kunit_resource *resource;
	struct kunit_kasan_status *status;

	test = current->kunit_test;
	if (!test)
		return;

	resource = kunit_find_named_resource(test, "kasan_status");
	if (!resource) {
		kunit_set_failure(test);
		return;
	}

	status = (struct kunit_kasan_status *)resource->data;
	WRITE_ONCE(status->report_found, true);
	WRITE_ONCE(status->sync_fault, sync);

	kunit_put_resource(resource);
}
#else
static void update_kunit_status(bool sync) { }
#endif

128 129
static DEFINE_SPINLOCK(report_lock);

130
static void start_report(unsigned long *flags, bool sync)
131
{
132 133 134
	/* Update status of the currently running KASAN test. */
	update_kunit_status(sync);
	/* Make sure we don't end up in loop. */
135 136 137 138 139
	kasan_disable_current();
	spin_lock_irqsave(&report_lock, *flags);
	pr_err("==================================================================\n");
}

140
static void end_report(unsigned long *flags, unsigned long addr)
141
{
142
	if (addr)
143
		trace_error_report_end(ERROR_DETECTOR_KASAN, addr);
144 145 146
	pr_err("==================================================================\n");
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
	spin_unlock_irqrestore(&report_lock, *flags);
147
	if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
D
Dmitry Vyukov 已提交
148
		panic("panic_on_warn set ...\n");
149
	if (kasan_arg_fault == KASAN_ARG_FAULT_PANIC)
150
		panic("kasan.fault=panic set ...\n");
151 152 153
	kasan_enable_current();
}

154
static void print_track(struct kasan_track *track, const char *prefix)
A
Alexander Potapenko 已提交
155
{
156
	pr_err("%s by task %u:\n", prefix, track->pid);
157
	if (track->stack) {
158
		stack_depot_print(track->stack);
159 160 161
	} else {
		pr_err("(stack is not available)\n");
	}
A
Alexander Potapenko 已提交
162 163
}

164
struct page *kasan_addr_to_page(const void *addr)
165 166 167 168 169 170 171
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_head_page(addr);
	return NULL;
}

172 173 174 175 176 177 178 179
struct slab *kasan_addr_to_slab(const void *addr)
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_slab(addr);
	return NULL;
}

180 181
static void describe_object_addr(struct kmem_cache *cache, void *object,
				const void *addr)
A
Alexander Potapenko 已提交
182
{
183 184 185 186
	unsigned long access_addr = (unsigned long)addr;
	unsigned long object_addr = (unsigned long)object;
	const char *rel_type;
	int rel_bytes;
A
Alexander Potapenko 已提交
187

188
	pr_err("The buggy address belongs to the object at %px\n"
189 190
	       " which belongs to the cache %s of size %d\n",
		object, cache->name, cache->object_size);
191

192 193 194 195 196 197 198 199 200 201 202 203
	if (access_addr < object_addr) {
		rel_type = "to the left";
		rel_bytes = object_addr - access_addr;
	} else if (access_addr >= object_addr + cache->object_size) {
		rel_type = "to the right";
		rel_bytes = access_addr - (object_addr + cache->object_size);
	} else {
		rel_type = "inside";
		rel_bytes = access_addr - object_addr;
	}

	pr_err("The buggy address is located %d bytes %s of\n"
204
	       " %d-byte region [%px, %px)\n",
205 206 207 208
		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
		(void *)(object_addr + cache->object_size));
}

209 210
static void describe_object_stacks(struct kmem_cache *cache, void *object,
					const void *addr, u8 tag)
211
{
212 213
	struct kasan_alloc_meta *alloc_meta;
	struct kasan_track *free_track;
214

215 216
	alloc_meta = kasan_get_alloc_meta(cache, object);
	if (alloc_meta) {
217
		print_track(&alloc_meta->alloc_track, "Allocated");
218
		pr_err("\n");
219 220 221 222 223 224 225
	}

	free_track = kasan_get_free_track(cache, object, tag);
	if (free_track) {
		print_track(free_track, "Freed");
		pr_err("\n");
	}
226 227

#ifdef CONFIG_KASAN_GENERIC
228 229 230 231
	if (!alloc_meta)
		return;
	if (alloc_meta->aux_stack[0]) {
		pr_err("Last potentially related work creation:\n");
232
		stack_depot_print(alloc_meta->aux_stack[0]);
233
		pr_err("\n");
234
	}
235 236
	if (alloc_meta->aux_stack[1]) {
		pr_err("Second to last potentially related work creation:\n");
237
		stack_depot_print(alloc_meta->aux_stack[1]);
238 239 240
		pr_err("\n");
	}
#endif
241
}
242

243 244 245 246 247
static void describe_object(struct kmem_cache *cache, void *object,
				const void *addr, u8 tag)
{
	if (kasan_stack_collection_enabled())
		describe_object_stacks(cache, object, addr, tag);
248
	describe_object_addr(cache, object, addr);
A
Alexander Potapenko 已提交
249 250
}

251 252
static inline bool kernel_or_module_addr(const void *addr)
{
K
Kefeng Wang 已提交
253
	if (is_kernel((unsigned long)addr))
254 255 256 257 258 259 260 261 262 263 264 265 266
		return true;
	if (is_module_address((unsigned long)addr))
		return true;
	return false;
}

static inline bool init_task_stack_addr(const void *addr)
{
	return addr >= (void *)&init_thread_union.stack &&
		(addr <= (void *)&init_thread_union.stack +
			sizeof(init_thread_union.stack));
}

267
static void print_address_description(void *addr, u8 tag)
268
{
269
	struct page *page = kasan_addr_to_page(addr);
270

271
	dump_stack_lvl(KERN_ERR);
272
	pr_err("\n");
273 274

	if (page && PageSlab(page)) {
275 276 277
		struct slab *slab = page_slab(page);
		struct kmem_cache *cache = slab->slab_cache;
		void *object = nearest_obj(cache, slab,	addr);
278

279
		describe_object(cache, object, addr, tag);
280
		pr_err("\n");
281 282
	}

283 284 285
	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
286
		pr_err("\n");
287 288
	}

289 290 291 292 293 294 295 296 297
	if (object_is_on_stack(addr)) {
		/*
		 * Currently, KASAN supports printing frame information only
		 * for accesses to the task's own stack.
		 */
		kasan_print_address_stack_frame(addr);
		pr_err("\n");
	}

298 299 300 301 302 303 304 305
	if (is_vmalloc_addr(addr)) {
		struct vm_struct *va = find_vm_area(addr);

		if (va) {
			pr_err("The buggy address belongs to the virtual mapping at\n"
			       " [%px, %px) created by:\n"
			       " %pS\n",
			       va->addr, va->addr + va->size, va->caller);
306
			pr_err("\n");
307 308 309 310 311

			page = vmalloc_to_page(page);
		}
	}

312
	if (page) {
313
		pr_err("The buggy address belongs to the physical page:\n");
314
		dump_page(page, "kasan: bad access detected");
315
		pr_err("\n");
316
	}
317 318
}

319
static bool meta_row_is_guilty(const void *row, const void *addr)
320
{
321
	return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
322 323
}

324
static int meta_pointer_offset(const void *row, const void *addr)
325
{
326 327 328 329 330 331 332 333 334
	/*
	 * Memory state around the buggy address:
	 *  ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
	 *  ...
	 *
	 * The length of ">ff00ff00ff00ff00: " is
	 *    3 + (BITS_PER_LONG / 8) * 2 chars.
	 * The length of each granule metadata is 2 bytes
	 *    plus 1 byte for space.
335
	 */
336 337
	return 3 + (BITS_PER_LONG / 8) * 2 +
		(addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
338 339
}

340
static void print_memory_metadata(const void *addr)
341 342
{
	int i;
343
	void *row;
344

345 346
	row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
			- META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
347 348 349

	pr_err("Memory state around the buggy address:\n");

350
	for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
351 352
		char buffer[4 + (BITS_PER_LONG / 8) * 2];
		char metadata[META_BYTES_PER_ROW];
353 354

		snprintf(buffer, sizeof(buffer),
355 356
				(i == 0) ? ">%px: " : " %px: ", row);

357 358 359 360 361
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
362
		kasan_metadata_fetch_row(&metadata[0], row);
363

364
		print_hex_dump(KERN_ERR, buffer,
365
			DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
366
			metadata, META_BYTES_PER_ROW, 0);
367

368 369
		if (meta_row_is_guilty(row, addr))
			pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
370

371
		row += META_MEM_BYTES_PER_ROW;
372 373 374
	}
}

375
static bool report_enabled(void)
376
{
377
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
378 379
	if (current->kasan_depth)
		return false;
380
#endif
381 382 383 384 385
	if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
		return true;
	return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}

386
void kasan_report_invalid_free(void *object, unsigned long ip)
387 388
{
	unsigned long flags;
389
	u8 tag = get_tag(object);
390

391
	object = kasan_reset_tag(object);
P
Patricia Alfonso 已提交
392

393
	start_report(&flags, true);
394
	pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
395
	kasan_print_tags(tag, object);
396
	pr_err("\n");
397
	print_address_description(object, tag);
398
	print_memory_metadata(object);
399
	end_report(&flags, (unsigned long)object);
400 401
}

402 403 404 405 406
#ifdef CONFIG_KASAN_HW_TAGS
void kasan_report_async(void)
{
	unsigned long flags;

407
	start_report(&flags, false);
408 409 410
	pr_err("BUG: KASAN: invalid-access\n");
	pr_err("Asynchronous mode enabled: no access details available\n");
	pr_err("\n");
411
	dump_stack_lvl(KERN_ERR);
412 413 414 415
	end_report(&flags, 0);
}
#endif /* CONFIG_KASAN_HW_TAGS */

416 417
static void __kasan_report(unsigned long addr, size_t size, bool is_write,
				unsigned long ip)
418 419
{
	struct kasan_access_info info;
420 421 422
	void *tagged_addr;
	void *untagged_addr;
	unsigned long flags;
423

424
	disable_trace_on_warning();
425
	start_report(&flags, true);
426

427
	tagged_addr = (void *)addr;
428
	untagged_addr = kasan_reset_tag(tagged_addr);
429 430

	info.access_addr = tagged_addr;
431
	if (addr_has_metadata(untagged_addr))
432 433
		info.first_bad_addr =
			kasan_find_first_bad_addr(tagged_addr, size);
434 435
	else
		info.first_bad_addr = untagged_addr;
436 437 438
	info.access_size = size;
	info.is_write = is_write;
	info.ip = ip;
439

440
	print_error_description(&info);
441
	if (addr_has_metadata(untagged_addr))
442
		kasan_print_tags(get_tag(tagged_addr), info.first_bad_addr);
443 444
	pr_err("\n");

445
	if (addr_has_metadata(untagged_addr)) {
446
		print_address_description(untagged_addr, get_tag(tagged_addr));
447
		print_memory_metadata(info.first_bad_addr);
448
	} else {
449
		dump_stack_lvl(KERN_ERR);
450 451
	}

452
	end_report(&flags, addr);
453
}
454

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
bool kasan_report(unsigned long addr, size_t size, bool is_write,
			unsigned long ip)
{
	unsigned long flags = user_access_save();
	bool ret = false;

	if (likely(report_enabled())) {
		__kasan_report(addr, size, is_write, ip);
		ret = true;
	}

	user_access_restore(flags);

	return ret;
}

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
#ifdef CONFIG_KASAN_INLINE
/*
 * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
 * canonical half of the address space) cause out-of-bounds shadow memory reads
 * before the actual access. For addresses in the low canonical half of the
 * address space, as well as most non-canonical addresses, that out-of-bounds
 * shadow memory access lands in the non-canonical part of the address space.
 * Help the user figure out what the original bogus pointer was.
 */
void kasan_non_canonical_hook(unsigned long addr)
{
	unsigned long orig_addr;
	const char *bug_type;

	if (addr < KASAN_SHADOW_OFFSET)
		return;

	orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT;
	/*
	 * For faults near the shadow address for NULL, we can be fairly certain
	 * that this is a KASAN shadow memory access.
	 * For faults that correspond to shadow for low canonical addresses, we
	 * can still be pretty sure - that shadow region is a fairly narrow
	 * chunk of the non-canonical address space.
	 * But faults that look like shadow for non-canonical addresses are a
	 * really large chunk of the address space. In that case, we still
	 * print the decoded address, but make it clear that this is not
	 * necessarily what's actually going on.
	 */
	if (orig_addr < PAGE_SIZE)
		bug_type = "null-ptr-deref";
	else if (orig_addr < TASK_SIZE)
		bug_type = "probably user-memory-access";
	else
		bug_type = "maybe wild-memory-access";
	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
507
		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
508 509
}
#endif