report.c 13.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common KASAN error reporting code.
4 5
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7
 *
8
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9
 *        Andrey Konovalov <andreyknvl@gmail.com>
10 11
 */

12
#include <linux/bitops.h>
13
#include <linux/ftrace.h>
14
#include <linux/init.h>
15 16 17 18 19
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
20
#include <linux/stackdepot.h>
21 22 23 24
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
25
#include <linux/module.h>
26
#include <linux/sched/task_stack.h>
27
#include <linux/uaccess.h>
28
#include <trace/events/error_report.h>
29

30 31
#include <asm/sections.h>

P
Patricia Alfonso 已提交
32 33
#include <kunit/test.h>

34
#include "kasan.h"
35
#include "../slab.h"
36

37
static unsigned long kasan_flags;
38

39 40
#define KASAN_BIT_REPORTED	0
#define KASAN_BIT_MULTI_SHOT	1
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
enum kasan_arg_fault {
	KASAN_ARG_FAULT_DEFAULT,
	KASAN_ARG_FAULT_REPORT,
	KASAN_ARG_FAULT_PANIC,
};

static enum kasan_arg_fault kasan_arg_fault __ro_after_init = KASAN_ARG_FAULT_DEFAULT;

/* kasan.fault=report/panic */
static int __init early_kasan_fault(char *arg)
{
	if (!arg)
		return -EINVAL;

	if (!strcmp(arg, "report"))
		kasan_arg_fault = KASAN_ARG_FAULT_REPORT;
	else if (!strcmp(arg, "panic"))
		kasan_arg_fault = KASAN_ARG_FAULT_PANIC;
	else
		return -EINVAL;

	return 0;
}
early_param("kasan.fault", early_kasan_fault);

67
bool kasan_save_enable_multi_shot(void)
68
{
69
	return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
70
}
71
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
72

73
void kasan_restore_multi_shot(bool enabled)
74
{
75 76
	if (!enabled)
		clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
77
}
78
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
79

80
static int __init kasan_set_multi_shot(char *str)
A
Andrey Konovalov 已提交
81
{
82 83
	set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
	return 1;
A
Andrey Konovalov 已提交
84
}
85
__setup("kasan_multi_shot", kasan_set_multi_shot);
A
Andrey Konovalov 已提交
86

87
static void print_error_description(struct kasan_access_info *info)
88
{
A
Andrey Konovalov 已提交
89
	pr_err("BUG: KASAN: %s in %pS\n",
90
		kasan_get_bug_type(info), (void *)info->ip);
91 92 93 94 95 96 97 98
	if (info->access_size)
		pr_err("%s of size %zu at addr %px by task %s/%d\n",
			info->is_write ? "Write" : "Read", info->access_size,
			info->access_addr, current->comm, task_pid_nr(current));
	else
		pr_err("%s at addr %px by task %s/%d\n",
			info->is_write ? "Write" : "Read",
			info->access_addr, current->comm, task_pid_nr(current));
99 100
}

101 102
static DEFINE_SPINLOCK(report_lock);

103
static void start_report(unsigned long *flags)
104 105 106 107 108 109 110 111 112
{
	/*
	 * Make sure we don't end up in loop.
	 */
	kasan_disable_current();
	spin_lock_irqsave(&report_lock, *flags);
	pr_err("==================================================================\n");
}

113
static void end_report(unsigned long *flags, unsigned long addr)
114
{
115
	if (!kasan_async_fault_possible())
116
		trace_error_report_end(ERROR_DETECTOR_KASAN, addr);
117 118 119
	pr_err("==================================================================\n");
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
	spin_unlock_irqrestore(&report_lock, *flags);
120
	if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
D
Dmitry Vyukov 已提交
121
		panic("panic_on_warn set ...\n");
122
	if (kasan_arg_fault == KASAN_ARG_FAULT_PANIC)
123
		panic("kasan.fault=panic set ...\n");
124 125 126
	kasan_enable_current();
}

127
static void print_track(struct kasan_track *track, const char *prefix)
A
Alexander Potapenko 已提交
128
{
129
	pr_err("%s by task %u:\n", prefix, track->pid);
130
	if (track->stack) {
131
		stack_depot_print(track->stack);
132 133 134
	} else {
		pr_err("(stack is not available)\n");
	}
A
Alexander Potapenko 已提交
135 136
}

137
struct page *kasan_addr_to_page(const void *addr)
138 139 140 141 142 143 144
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_head_page(addr);
	return NULL;
}

145 146 147 148 149 150 151 152
struct slab *kasan_addr_to_slab(const void *addr)
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_slab(addr);
	return NULL;
}

153 154
static void describe_object_addr(struct kmem_cache *cache, void *object,
				const void *addr)
A
Alexander Potapenko 已提交
155
{
156 157 158 159
	unsigned long access_addr = (unsigned long)addr;
	unsigned long object_addr = (unsigned long)object;
	const char *rel_type;
	int rel_bytes;
A
Alexander Potapenko 已提交
160

161
	pr_err("The buggy address belongs to the object at %px\n"
162 163
	       " which belongs to the cache %s of size %d\n",
		object, cache->name, cache->object_size);
164

165 166 167 168 169 170 171 172 173 174 175 176
	if (access_addr < object_addr) {
		rel_type = "to the left";
		rel_bytes = object_addr - access_addr;
	} else if (access_addr >= object_addr + cache->object_size) {
		rel_type = "to the right";
		rel_bytes = access_addr - (object_addr + cache->object_size);
	} else {
		rel_type = "inside";
		rel_bytes = access_addr - object_addr;
	}

	pr_err("The buggy address is located %d bytes %s of\n"
177
	       " %d-byte region [%px, %px)\n",
178 179 180 181
		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
		(void *)(object_addr + cache->object_size));
}

182 183
static void describe_object_stacks(struct kmem_cache *cache, void *object,
					const void *addr, u8 tag)
184
{
185 186
	struct kasan_alloc_meta *alloc_meta;
	struct kasan_track *free_track;
187

188 189
	alloc_meta = kasan_get_alloc_meta(cache, object);
	if (alloc_meta) {
190
		print_track(&alloc_meta->alloc_track, "Allocated");
191
		pr_err("\n");
192 193 194 195 196 197 198
	}

	free_track = kasan_get_free_track(cache, object, tag);
	if (free_track) {
		print_track(free_track, "Freed");
		pr_err("\n");
	}
199 200

#ifdef CONFIG_KASAN_GENERIC
201 202 203 204
	if (!alloc_meta)
		return;
	if (alloc_meta->aux_stack[0]) {
		pr_err("Last potentially related work creation:\n");
205
		stack_depot_print(alloc_meta->aux_stack[0]);
206
		pr_err("\n");
207
	}
208 209
	if (alloc_meta->aux_stack[1]) {
		pr_err("Second to last potentially related work creation:\n");
210
		stack_depot_print(alloc_meta->aux_stack[1]);
211 212 213
		pr_err("\n");
	}
#endif
214
}
215

216 217 218 219 220
static void describe_object(struct kmem_cache *cache, void *object,
				const void *addr, u8 tag)
{
	if (kasan_stack_collection_enabled())
		describe_object_stacks(cache, object, addr, tag);
221
	describe_object_addr(cache, object, addr);
A
Alexander Potapenko 已提交
222 223
}

224 225
static inline bool kernel_or_module_addr(const void *addr)
{
K
Kefeng Wang 已提交
226
	if (is_kernel((unsigned long)addr))
227 228 229 230 231 232 233 234 235 236 237 238 239
		return true;
	if (is_module_address((unsigned long)addr))
		return true;
	return false;
}

static inline bool init_task_stack_addr(const void *addr)
{
	return addr >= (void *)&init_thread_union.stack &&
		(addr <= (void *)&init_thread_union.stack +
			sizeof(init_thread_union.stack));
}

240
static void print_address_description(void *addr, u8 tag)
241
{
242
	struct page *page = kasan_addr_to_page(addr);
243

244
	dump_stack_lvl(KERN_ERR);
245
	pr_err("\n");
246 247

	if (page && PageSlab(page)) {
248 249 250
		struct slab *slab = page_slab(page);
		struct kmem_cache *cache = slab->slab_cache;
		void *object = nearest_obj(cache, slab,	addr);
251

252
		describe_object(cache, object, addr, tag);
253 254
	}

255 256 257 258 259
	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
	}

260 261 262 263 264 265 266 267 268 269 270 271 272
	if (is_vmalloc_addr(addr)) {
		struct vm_struct *va = find_vm_area(addr);

		if (va) {
			pr_err("The buggy address belongs to the virtual mapping at\n"
			       " [%px, %px) created by:\n"
			       " %pS\n",
			       va->addr, va->addr + va->size, va->caller);

			page = vmalloc_to_page(page);
		}
	}

273
	if (page) {
274
		pr_err("The buggy address belongs to the physical page:\n");
275
		dump_page(page, "kasan: bad access detected");
276
	}
277

278
	kasan_print_address_stack_frame(addr);
279 280
}

281
static bool meta_row_is_guilty(const void *row, const void *addr)
282
{
283
	return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
284 285
}

286
static int meta_pointer_offset(const void *row, const void *addr)
287
{
288 289 290 291 292 293 294 295 296
	/*
	 * Memory state around the buggy address:
	 *  ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
	 *  ...
	 *
	 * The length of ">ff00ff00ff00ff00: " is
	 *    3 + (BITS_PER_LONG / 8) * 2 chars.
	 * The length of each granule metadata is 2 bytes
	 *    plus 1 byte for space.
297
	 */
298 299
	return 3 + (BITS_PER_LONG / 8) * 2 +
		(addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
300 301
}

302
static void print_memory_metadata(const void *addr)
303 304
{
	int i;
305
	void *row;
306

307 308
	row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
			- META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
309 310 311

	pr_err("Memory state around the buggy address:\n");

312
	for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
313 314
		char buffer[4 + (BITS_PER_LONG / 8) * 2];
		char metadata[META_BYTES_PER_ROW];
315 316

		snprintf(buffer, sizeof(buffer),
317 318
				(i == 0) ? ">%px: " : " %px: ", row);

319 320 321 322 323
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
324
		kasan_metadata_fetch_row(&metadata[0], row);
325

326
		print_hex_dump(KERN_ERR, buffer,
327
			DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
328
			metadata, META_BYTES_PER_ROW, 0);
329

330 331
		if (meta_row_is_guilty(row, addr))
			pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
332

333
		row += META_MEM_BYTES_PER_ROW;
334 335 336
	}
}

337
static bool report_enabled(void)
338
{
339
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
340 341
	if (current->kasan_depth)
		return false;
342
#endif
343 344 345 346 347
	if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
		return true;
	return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}

P
Patricia Alfonso 已提交
348
#if IS_ENABLED(CONFIG_KUNIT)
349
static void kasan_update_kunit_status(struct kunit *cur_test, bool sync)
P
Patricia Alfonso 已提交
350 351
{
	struct kunit_resource *resource;
352
	struct kunit_kasan_status *status;
P
Patricia Alfonso 已提交
353

354
	resource = kunit_find_named_resource(cur_test, "kasan_status");
P
Patricia Alfonso 已提交
355 356 357 358 359 360

	if (!resource) {
		kunit_set_failure(cur_test);
		return;
	}

361 362 363
	status = (struct kunit_kasan_status *)resource->data;
	WRITE_ONCE(status->report_found, true);
	WRITE_ONCE(status->sync_fault, sync);
P
Patricia Alfonso 已提交
364 365 366 367
	kunit_put_resource(resource);
}
#endif /* IS_ENABLED(CONFIG_KUNIT) */

368
void kasan_report_invalid_free(void *object, unsigned long ip)
369 370
{
	unsigned long flags;
371
	u8 tag = get_tag(object);
372

373
	object = kasan_reset_tag(object);
P
Patricia Alfonso 已提交
374 375 376

#if IS_ENABLED(CONFIG_KUNIT)
	if (current->kunit_test)
377
		kasan_update_kunit_status(current->kunit_test, true);
P
Patricia Alfonso 已提交
378 379
#endif /* IS_ENABLED(CONFIG_KUNIT) */

380
	start_report(&flags);
381
	pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
382
	kasan_print_tags(tag, object);
383
	pr_err("\n");
384
	print_address_description(object, tag);
385
	pr_err("\n");
386
	print_memory_metadata(object);
387
	end_report(&flags, (unsigned long)object);
388 389
}

390 391 392 393 394
#ifdef CONFIG_KASAN_HW_TAGS
void kasan_report_async(void)
{
	unsigned long flags;

395 396
#if IS_ENABLED(CONFIG_KUNIT)
	if (current->kunit_test)
397
		kasan_update_kunit_status(current->kunit_test, false);
398 399
#endif /* IS_ENABLED(CONFIG_KUNIT) */

400 401 402 403
	start_report(&flags);
	pr_err("BUG: KASAN: invalid-access\n");
	pr_err("Asynchronous mode enabled: no access details available\n");
	pr_err("\n");
404
	dump_stack_lvl(KERN_ERR);
405 406 407 408
	end_report(&flags, 0);
}
#endif /* CONFIG_KASAN_HW_TAGS */

409 410
static void __kasan_report(unsigned long addr, size_t size, bool is_write,
				unsigned long ip)
411 412
{
	struct kasan_access_info info;
413 414 415
	void *tagged_addr;
	void *untagged_addr;
	unsigned long flags;
416

P
Patricia Alfonso 已提交
417 418
#if IS_ENABLED(CONFIG_KUNIT)
	if (current->kunit_test)
419
		kasan_update_kunit_status(current->kunit_test, true);
P
Patricia Alfonso 已提交
420 421
#endif /* IS_ENABLED(CONFIG_KUNIT) */

422 423
	disable_trace_on_warning();

424
	tagged_addr = (void *)addr;
425
	untagged_addr = kasan_reset_tag(tagged_addr);
426 427

	info.access_addr = tagged_addr;
428
	if (addr_has_metadata(untagged_addr))
429 430
		info.first_bad_addr =
			kasan_find_first_bad_addr(tagged_addr, size);
431 432
	else
		info.first_bad_addr = untagged_addr;
433 434 435
	info.access_size = size;
	info.is_write = is_write;
	info.ip = ip;
436

437 438 439
	start_report(&flags);

	print_error_description(&info);
440
	if (addr_has_metadata(untagged_addr))
441
		kasan_print_tags(get_tag(tagged_addr), info.first_bad_addr);
442 443
	pr_err("\n");

444
	if (addr_has_metadata(untagged_addr)) {
445
		print_address_description(untagged_addr, get_tag(tagged_addr));
446
		pr_err("\n");
447
		print_memory_metadata(info.first_bad_addr);
448
	} else {
449
		dump_stack_lvl(KERN_ERR);
450 451
	}

452
	end_report(&flags, addr);
453
}
454

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
bool kasan_report(unsigned long addr, size_t size, bool is_write,
			unsigned long ip)
{
	unsigned long flags = user_access_save();
	bool ret = false;

	if (likely(report_enabled())) {
		__kasan_report(addr, size, is_write, ip);
		ret = true;
	}

	user_access_restore(flags);

	return ret;
}

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
#ifdef CONFIG_KASAN_INLINE
/*
 * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
 * canonical half of the address space) cause out-of-bounds shadow memory reads
 * before the actual access. For addresses in the low canonical half of the
 * address space, as well as most non-canonical addresses, that out-of-bounds
 * shadow memory access lands in the non-canonical part of the address space.
 * Help the user figure out what the original bogus pointer was.
 */
void kasan_non_canonical_hook(unsigned long addr)
{
	unsigned long orig_addr;
	const char *bug_type;

	if (addr < KASAN_SHADOW_OFFSET)
		return;

	orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT;
	/*
	 * For faults near the shadow address for NULL, we can be fairly certain
	 * that this is a KASAN shadow memory access.
	 * For faults that correspond to shadow for low canonical addresses, we
	 * can still be pretty sure - that shadow region is a fairly narrow
	 * chunk of the non-canonical address space.
	 * But faults that look like shadow for non-canonical addresses are a
	 * really large chunk of the address space. In that case, we still
	 * print the decoded address, but make it clear that this is not
	 * necessarily what's actually going on.
	 */
	if (orig_addr < PAGE_SIZE)
		bug_type = "null-ptr-deref";
	else if (orig_addr < TASK_SIZE)
		bug_type = "probably user-memory-access";
	else
		bug_type = "maybe wild-memory-access";
	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
507
		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
508 509
}
#endif