report.c 13.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common KASAN error reporting code.
4 5
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7
 *
8
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9
 *        Andrey Konovalov <andreyknvl@gmail.com>
10 11
 */

12
#include <linux/bitops.h>
13
#include <linux/ftrace.h>
14
#include <linux/init.h>
15 16 17 18 19
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
20
#include <linux/stackdepot.h>
21 22 23 24
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
25
#include <linux/module.h>
26
#include <linux/sched/task_stack.h>
27
#include <linux/uaccess.h>
28
#include <trace/events/error_report.h>
29

30 31
#include <asm/sections.h>

P
Patricia Alfonso 已提交
32 33
#include <kunit/test.h>

34
#include "kasan.h"
35
#include "../slab.h"
36

37
static unsigned long kasan_flags;
38

39 40
#define KASAN_BIT_REPORTED	0
#define KASAN_BIT_MULTI_SHOT	1
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
enum kasan_arg_fault {
	KASAN_ARG_FAULT_DEFAULT,
	KASAN_ARG_FAULT_REPORT,
	KASAN_ARG_FAULT_PANIC,
};

static enum kasan_arg_fault kasan_arg_fault __ro_after_init = KASAN_ARG_FAULT_DEFAULT;

/* kasan.fault=report/panic */
static int __init early_kasan_fault(char *arg)
{
	if (!arg)
		return -EINVAL;

	if (!strcmp(arg, "report"))
		kasan_arg_fault = KASAN_ARG_FAULT_REPORT;
	else if (!strcmp(arg, "panic"))
		kasan_arg_fault = KASAN_ARG_FAULT_PANIC;
	else
		return -EINVAL;

	return 0;
}
early_param("kasan.fault", early_kasan_fault);

67
bool kasan_save_enable_multi_shot(void)
68
{
69
	return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
70
}
71
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
72

73
void kasan_restore_multi_shot(bool enabled)
74
{
75 76
	if (!enabled)
		clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
77
}
78
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
79

80
static int __init kasan_set_multi_shot(char *str)
A
Andrey Konovalov 已提交
81
{
82 83
	set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
	return 1;
A
Andrey Konovalov 已提交
84
}
85
__setup("kasan_multi_shot", kasan_set_multi_shot);
A
Andrey Konovalov 已提交
86

87
static void print_error_description(struct kasan_access_info *info)
88
{
89 90 91 92 93 94
	if (info->type == KASAN_REPORT_INVALID_FREE) {
		pr_err("BUG: KASAN: double-free or invalid-free in %pS\n",
		       (void *)info->ip);
		return;
	}

A
Andrey Konovalov 已提交
95
	pr_err("BUG: KASAN: %s in %pS\n",
96
		kasan_get_bug_type(info), (void *)info->ip);
97 98 99 100 101 102 103 104
	if (info->access_size)
		pr_err("%s of size %zu at addr %px by task %s/%d\n",
			info->is_write ? "Write" : "Read", info->access_size,
			info->access_addr, current->comm, task_pid_nr(current));
	else
		pr_err("%s at addr %px by task %s/%d\n",
			info->is_write ? "Write" : "Read",
			info->access_addr, current->comm, task_pid_nr(current));
105 106
}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
static void update_kunit_status(bool sync)
{
	struct kunit *test;
	struct kunit_resource *resource;
	struct kunit_kasan_status *status;

	test = current->kunit_test;
	if (!test)
		return;

	resource = kunit_find_named_resource(test, "kasan_status");
	if (!resource) {
		kunit_set_failure(test);
		return;
	}

	status = (struct kunit_kasan_status *)resource->data;
	WRITE_ONCE(status->report_found, true);
	WRITE_ONCE(status->sync_fault, sync);

	kunit_put_resource(resource);
}
#else
static void update_kunit_status(bool sync) { }
#endif

134 135
static DEFINE_SPINLOCK(report_lock);

136
static void start_report(unsigned long *flags, bool sync)
137
{
138 139
	/* Respect the /proc/sys/kernel/traceoff_on_warning interface. */
	disable_trace_on_warning();
140 141 142
	/* Update status of the currently running KASAN test. */
	update_kunit_status(sync);
	/* Make sure we don't end up in loop. */
143 144 145 146 147
	kasan_disable_current();
	spin_lock_irqsave(&report_lock, *flags);
	pr_err("==================================================================\n");
}

148
static void end_report(unsigned long *flags, void *addr)
149
{
150
	if (addr)
151 152
		trace_error_report_end(ERROR_DETECTOR_KASAN,
				       (unsigned long)addr);
153 154 155
	pr_err("==================================================================\n");
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
	spin_unlock_irqrestore(&report_lock, *flags);
156
	if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
D
Dmitry Vyukov 已提交
157
		panic("panic_on_warn set ...\n");
158
	if (kasan_arg_fault == KASAN_ARG_FAULT_PANIC)
159
		panic("kasan.fault=panic set ...\n");
160 161 162
	kasan_enable_current();
}

163
static void print_track(struct kasan_track *track, const char *prefix)
A
Alexander Potapenko 已提交
164
{
165
	pr_err("%s by task %u:\n", prefix, track->pid);
166
	if (track->stack) {
167
		stack_depot_print(track->stack);
168 169 170
	} else {
		pr_err("(stack is not available)\n");
	}
A
Alexander Potapenko 已提交
171 172
}

173
struct page *kasan_addr_to_page(const void *addr)
174 175 176 177 178 179 180
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_head_page(addr);
	return NULL;
}

181 182 183 184 185 186 187 188
struct slab *kasan_addr_to_slab(const void *addr)
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_slab(addr);
	return NULL;
}

189 190
static void describe_object_addr(struct kmem_cache *cache, void *object,
				const void *addr)
A
Alexander Potapenko 已提交
191
{
192 193 194 195
	unsigned long access_addr = (unsigned long)addr;
	unsigned long object_addr = (unsigned long)object;
	const char *rel_type;
	int rel_bytes;
A
Alexander Potapenko 已提交
196

197
	pr_err("The buggy address belongs to the object at %px\n"
198 199
	       " which belongs to the cache %s of size %d\n",
		object, cache->name, cache->object_size);
200

201 202 203 204 205 206 207 208 209 210 211 212
	if (access_addr < object_addr) {
		rel_type = "to the left";
		rel_bytes = object_addr - access_addr;
	} else if (access_addr >= object_addr + cache->object_size) {
		rel_type = "to the right";
		rel_bytes = access_addr - (object_addr + cache->object_size);
	} else {
		rel_type = "inside";
		rel_bytes = access_addr - object_addr;
	}

	pr_err("The buggy address is located %d bytes %s of\n"
213
	       " %d-byte region [%px, %px)\n",
214 215 216 217
		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
		(void *)(object_addr + cache->object_size));
}

218 219
static void describe_object_stacks(struct kmem_cache *cache, void *object,
					const void *addr, u8 tag)
220
{
221 222
	struct kasan_alloc_meta *alloc_meta;
	struct kasan_track *free_track;
223

224 225
	alloc_meta = kasan_get_alloc_meta(cache, object);
	if (alloc_meta) {
226
		print_track(&alloc_meta->alloc_track, "Allocated");
227
		pr_err("\n");
228 229 230 231 232 233 234
	}

	free_track = kasan_get_free_track(cache, object, tag);
	if (free_track) {
		print_track(free_track, "Freed");
		pr_err("\n");
	}
235 236

#ifdef CONFIG_KASAN_GENERIC
237 238 239 240
	if (!alloc_meta)
		return;
	if (alloc_meta->aux_stack[0]) {
		pr_err("Last potentially related work creation:\n");
241
		stack_depot_print(alloc_meta->aux_stack[0]);
242
		pr_err("\n");
243
	}
244 245
	if (alloc_meta->aux_stack[1]) {
		pr_err("Second to last potentially related work creation:\n");
246
		stack_depot_print(alloc_meta->aux_stack[1]);
247 248 249
		pr_err("\n");
	}
#endif
250
}
251

252 253 254 255 256
static void describe_object(struct kmem_cache *cache, void *object,
				const void *addr, u8 tag)
{
	if (kasan_stack_collection_enabled())
		describe_object_stacks(cache, object, addr, tag);
257
	describe_object_addr(cache, object, addr);
A
Alexander Potapenko 已提交
258 259
}

260 261
static inline bool kernel_or_module_addr(const void *addr)
{
K
Kefeng Wang 已提交
262
	if (is_kernel((unsigned long)addr))
263 264 265 266 267 268 269 270 271 272 273 274 275
		return true;
	if (is_module_address((unsigned long)addr))
		return true;
	return false;
}

static inline bool init_task_stack_addr(const void *addr)
{
	return addr >= (void *)&init_thread_union.stack &&
		(addr <= (void *)&init_thread_union.stack +
			sizeof(init_thread_union.stack));
}

276
static void print_address_description(void *addr, u8 tag)
277
{
278
	struct page *page = kasan_addr_to_page(addr);
279

280
	dump_stack_lvl(KERN_ERR);
281
	pr_err("\n");
282 283

	if (page && PageSlab(page)) {
284 285 286
		struct slab *slab = page_slab(page);
		struct kmem_cache *cache = slab->slab_cache;
		void *object = nearest_obj(cache, slab,	addr);
287

288
		describe_object(cache, object, addr, tag);
289
		pr_err("\n");
290 291
	}

292 293 294
	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
295
		pr_err("\n");
296 297
	}

298 299 300 301 302 303 304 305 306
	if (object_is_on_stack(addr)) {
		/*
		 * Currently, KASAN supports printing frame information only
		 * for accesses to the task's own stack.
		 */
		kasan_print_address_stack_frame(addr);
		pr_err("\n");
	}

307 308 309 310 311 312 313 314
	if (is_vmalloc_addr(addr)) {
		struct vm_struct *va = find_vm_area(addr);

		if (va) {
			pr_err("The buggy address belongs to the virtual mapping at\n"
			       " [%px, %px) created by:\n"
			       " %pS\n",
			       va->addr, va->addr + va->size, va->caller);
315
			pr_err("\n");
316 317 318 319 320

			page = vmalloc_to_page(page);
		}
	}

321
	if (page) {
322
		pr_err("The buggy address belongs to the physical page:\n");
323
		dump_page(page, "kasan: bad access detected");
324
		pr_err("\n");
325
	}
326 327
}

328
static bool meta_row_is_guilty(const void *row, const void *addr)
329
{
330
	return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
331 332
}

333
static int meta_pointer_offset(const void *row, const void *addr)
334
{
335 336 337 338 339 340 341 342 343
	/*
	 * Memory state around the buggy address:
	 *  ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
	 *  ...
	 *
	 * The length of ">ff00ff00ff00ff00: " is
	 *    3 + (BITS_PER_LONG / 8) * 2 chars.
	 * The length of each granule metadata is 2 bytes
	 *    plus 1 byte for space.
344
	 */
345 346
	return 3 + (BITS_PER_LONG / 8) * 2 +
		(addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
347 348
}

349
static void print_memory_metadata(const void *addr)
350 351
{
	int i;
352
	void *row;
353

354 355
	row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
			- META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
356 357 358

	pr_err("Memory state around the buggy address:\n");

359
	for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
360 361
		char buffer[4 + (BITS_PER_LONG / 8) * 2];
		char metadata[META_BYTES_PER_ROW];
362 363

		snprintf(buffer, sizeof(buffer),
364 365
				(i == 0) ? ">%px: " : " %px: ", row);

366 367 368 369 370
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
371
		kasan_metadata_fetch_row(&metadata[0], row);
372

373
		print_hex_dump(KERN_ERR, buffer,
374
			DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
375
			metadata, META_BYTES_PER_ROW, 0);
376

377 378
		if (meta_row_is_guilty(row, addr))
			pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
379

380
		row += META_MEM_BYTES_PER_ROW;
381 382 383
	}
}

384
static bool report_enabled(void)
385
{
386
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
387 388
	if (current->kasan_depth)
		return false;
389
#endif
390 391 392 393 394
	if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
		return true;
	return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}

395 396 397 398 399
#ifdef CONFIG_KASAN_HW_TAGS
void kasan_report_async(void)
{
	unsigned long flags;

400
	start_report(&flags, false);
401 402 403
	pr_err("BUG: KASAN: invalid-access\n");
	pr_err("Asynchronous mode enabled: no access details available\n");
	pr_err("\n");
404
	dump_stack_lvl(KERN_ERR);
405
	end_report(&flags, NULL);
406 407 408
}
#endif /* CONFIG_KASAN_HW_TAGS */

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
static void print_report(struct kasan_access_info *info)
{
	void *tagged_addr = info->access_addr;
	void *untagged_addr = kasan_reset_tag(tagged_addr);
	u8 tag = get_tag(tagged_addr);

	print_error_description(info);
	if (addr_has_metadata(untagged_addr))
		kasan_print_tags(tag, info->first_bad_addr);
	pr_err("\n");

	if (addr_has_metadata(untagged_addr)) {
		print_address_description(untagged_addr, tag);
		print_memory_metadata(info->first_bad_addr);
	} else {
		dump_stack_lvl(KERN_ERR);
	}
}

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
void kasan_report_invalid_free(void *ptr, unsigned long ip)
{
	unsigned long flags;
	struct kasan_access_info info;

	start_report(&flags, true);

	info.type = KASAN_REPORT_INVALID_FREE;
	info.access_addr = ptr;
	info.first_bad_addr = kasan_reset_tag(ptr);
	info.access_size = 0;
	info.is_write = false;
	info.ip = ip;

	print_report(&info);

	end_report(&flags, ptr);
}

447 448 449
bool kasan_report(unsigned long addr, size_t size, bool is_write,
			unsigned long ip)
{
450
	bool ret = true;
451 452 453 454
	void *ptr = (void *)addr;
	unsigned long ua_flags = user_access_save();
	unsigned long irq_flags;
	struct kasan_access_info info;
455

456 457 458
	if (unlikely(!report_enabled())) {
		ret = false;
		goto out;
459 460
	}

461 462
	start_report(&irq_flags, true);

463
	info.type = KASAN_REPORT_ACCESS;
464 465 466 467 468 469 470 471 472
	info.access_addr = ptr;
	info.first_bad_addr = kasan_find_first_bad_addr(ptr, size);
	info.access_size = size;
	info.is_write = is_write;
	info.ip = ip;

	print_report(&info);

	end_report(&irq_flags, ptr);
473 474 475

out:
	user_access_restore(ua_flags);
476 477 478 479

	return ret;
}

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
#ifdef CONFIG_KASAN_INLINE
/*
 * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
 * canonical half of the address space) cause out-of-bounds shadow memory reads
 * before the actual access. For addresses in the low canonical half of the
 * address space, as well as most non-canonical addresses, that out-of-bounds
 * shadow memory access lands in the non-canonical part of the address space.
 * Help the user figure out what the original bogus pointer was.
 */
void kasan_non_canonical_hook(unsigned long addr)
{
	unsigned long orig_addr;
	const char *bug_type;

	if (addr < KASAN_SHADOW_OFFSET)
		return;

	orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT;
	/*
	 * For faults near the shadow address for NULL, we can be fairly certain
	 * that this is a KASAN shadow memory access.
	 * For faults that correspond to shadow for low canonical addresses, we
	 * can still be pretty sure - that shadow region is a fairly narrow
	 * chunk of the non-canonical address space.
	 * But faults that look like shadow for non-canonical addresses are a
	 * really large chunk of the address space. In that case, we still
	 * print the decoded address, but make it clear that this is not
	 * necessarily what's actually going on.
	 */
	if (orig_addr < PAGE_SIZE)
		bug_type = "null-ptr-deref";
	else if (orig_addr < TASK_SIZE)
		bug_type = "probably user-memory-access";
	else
		bug_type = "maybe wild-memory-access";
	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
516
		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
517 518
}
#endif