report.c 11.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common KASAN error reporting code.
4 5
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7
 *
8
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9
 *        Andrey Konovalov <andreyknvl@gmail.com>
10 11
 */

12
#include <linux/bitops.h>
13
#include <linux/ftrace.h>
14
#include <linux/init.h>
15 16 17 18 19
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
20
#include <linux/stackdepot.h>
21 22 23 24
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
25
#include <linux/module.h>
26
#include <linux/sched/task_stack.h>
27
#include <linux/uaccess.h>
28

29 30
#include <asm/sections.h>

P
Patricia Alfonso 已提交
31 32
#include <kunit/test.h>

33
#include "kasan.h"
34
#include "../slab.h"
35 36 37 38 39 40 41

/* Shadow layout customization. */
#define SHADOW_BYTES_PER_BLOCK 1
#define SHADOW_BLOCKS_PER_ROW 16
#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
#define SHADOW_ROWS_AROUND_ADDR 2

42
static unsigned long kasan_flags;
43

44 45
#define KASAN_BIT_REPORTED	0
#define KASAN_BIT_MULTI_SHOT	1
46

47
bool kasan_save_enable_multi_shot(void)
48
{
49
	return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
50
}
51
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
52

53
void kasan_restore_multi_shot(bool enabled)
54
{
55 56
	if (!enabled)
		clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
57
}
58
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
59

60
static int __init kasan_set_multi_shot(char *str)
A
Andrey Konovalov 已提交
61
{
62 63
	set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
	return 1;
A
Andrey Konovalov 已提交
64
}
65
__setup("kasan_multi_shot", kasan_set_multi_shot);
A
Andrey Konovalov 已提交
66

67
static void print_error_description(struct kasan_access_info *info)
68
{
A
Andrey Konovalov 已提交
69
	pr_err("BUG: KASAN: %s in %pS\n",
70
		get_bug_type(info), (void *)info->ip);
71
	pr_err("%s of size %zu at addr %px by task %s/%d\n",
A
Andrey Konovalov 已提交
72
		info->is_write ? "Write" : "Read", info->access_size,
A
Andrey Konovalov 已提交
73
		info->access_addr, current->comm, task_pid_nr(current));
74 75
}

76 77
static DEFINE_SPINLOCK(report_lock);

78
static void start_report(unsigned long *flags)
79 80 81 82 83 84 85 86 87
{
	/*
	 * Make sure we don't end up in loop.
	 */
	kasan_disable_current();
	spin_lock_irqsave(&report_lock, *flags);
	pr_err("==================================================================\n");
}

88
static void end_report(unsigned long *flags)
89 90 91 92
{
	pr_err("==================================================================\n");
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
	spin_unlock_irqrestore(&report_lock, *flags);
93
	if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) {
94 95 96 97 98 99 100
		/*
		 * This thread may hit another WARN() in the panic path.
		 * Resetting this prevents additional WARN() from panicking the
		 * system on this thread.  Other threads are blocked by the
		 * panic_mutex in panic().
		 */
		panic_on_warn = 0;
D
Dmitry Vyukov 已提交
101
		panic("panic_on_warn set ...\n");
102
	}
103 104 105
	kasan_enable_current();
}

106 107 108 109 110 111 112 113 114
static void print_stack(depot_stack_handle_t stack)
{
	unsigned long *entries;
	unsigned int nr_entries;

	nr_entries = stack_depot_fetch(stack, &entries);
	stack_trace_print(entries, nr_entries, 0);
}

115
static void print_track(struct kasan_track *track, const char *prefix)
A
Alexander Potapenko 已提交
116
{
117
	pr_err("%s by task %u:\n", prefix, track->pid);
118
	if (track->stack) {
119
		print_stack(track->stack);
120 121 122
	} else {
		pr_err("(stack is not available)\n");
	}
A
Alexander Potapenko 已提交
123 124
}

125
struct page *kasan_addr_to_page(const void *addr)
126 127 128 129 130 131 132
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_head_page(addr);
	return NULL;
}

133 134
static void describe_object_addr(struct kmem_cache *cache, void *object,
				const void *addr)
A
Alexander Potapenko 已提交
135
{
136 137 138 139
	unsigned long access_addr = (unsigned long)addr;
	unsigned long object_addr = (unsigned long)object;
	const char *rel_type;
	int rel_bytes;
A
Alexander Potapenko 已提交
140

141
	pr_err("The buggy address belongs to the object at %px\n"
142 143
	       " which belongs to the cache %s of size %d\n",
		object, cache->name, cache->object_size);
144

145
	if (!addr)
A
Alexander Potapenko 已提交
146
		return;
147

148 149 150 151 152 153 154 155 156 157 158 159
	if (access_addr < object_addr) {
		rel_type = "to the left";
		rel_bytes = object_addr - access_addr;
	} else if (access_addr >= object_addr + cache->object_size) {
		rel_type = "to the right";
		rel_bytes = access_addr - (object_addr + cache->object_size);
	} else {
		rel_type = "inside";
		rel_bytes = access_addr - object_addr;
	}

	pr_err("The buggy address is located %d bytes %s of\n"
160
	       " %d-byte region [%px, %px)\n",
161 162 163 164 165
		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
		(void *)(object_addr + cache->object_size));
}

static void describe_object(struct kmem_cache *cache, void *object,
166
				const void *addr, u8 tag)
167 168 169 170
{
	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);

	if (cache->flags & SLAB_KASAN) {
171 172
		struct kasan_track *free_track;

173
		print_track(&alloc_info->alloc_track, "Allocated");
174
		pr_err("\n");
175
		free_track = kasan_get_free_track(cache, object, tag);
176 177 178 179
		if (free_track) {
			print_track(free_track, "Freed");
			pr_err("\n");
		}
180 181 182

#ifdef CONFIG_KASAN_GENERIC
		if (alloc_info->aux_stack[0]) {
W
Walter Wu 已提交
183
			pr_err("Last potentially related work creation:\n");
184 185 186 187
			print_stack(alloc_info->aux_stack[0]);
			pr_err("\n");
		}
		if (alloc_info->aux_stack[1]) {
W
Walter Wu 已提交
188
			pr_err("Second to last potentially related work creation:\n");
189 190 191 192
			print_stack(alloc_info->aux_stack[1]);
			pr_err("\n");
		}
#endif
193 194 195
	}

	describe_object_addr(cache, object, addr);
A
Alexander Potapenko 已提交
196 197
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
static inline bool kernel_or_module_addr(const void *addr)
{
	if (addr >= (void *)_stext && addr < (void *)_end)
		return true;
	if (is_module_address((unsigned long)addr))
		return true;
	return false;
}

static inline bool init_task_stack_addr(const void *addr)
{
	return addr >= (void *)&init_thread_union.stack &&
		(addr <= (void *)&init_thread_union.stack +
			sizeof(init_thread_union.stack));
}

214
static void print_address_description(void *addr, u8 tag)
215
{
216
	struct page *page = kasan_addr_to_page(addr);
217

218
	dump_stack();
219
	pr_err("\n");
220 221 222

	if (page && PageSlab(page)) {
		struct kmem_cache *cache = page->slab_cache;
223
		void *object = nearest_obj(cache, page,	addr);
224

225
		describe_object(cache, object, addr, tag);
226 227
	}

228 229 230 231 232 233 234 235
	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
	}

	if (page) {
		pr_err("The buggy address belongs to the page:\n");
		dump_page(page, "kasan: bad access detected");
236
	}
237 238

	print_address_stack_frame(addr);
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
}

static bool row_is_guilty(const void *row, const void *guilty)
{
	return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
}

static int shadow_pointer_offset(const void *row, const void *shadow)
{
	/* The length of ">ff00ff00ff00ff00: " is
	 *    3 + (BITS_PER_LONG/8)*2 chars.
	 */
	return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
		(shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
}

255
static void print_memory_metadata(const void *addr)
256 257 258 259 260 261 262 263 264 265 266 267 268 269
{
	int i;
	const void *shadow = kasan_mem_to_shadow(addr);
	const void *shadow_row;

	shadow_row = (void *)round_down((unsigned long)shadow,
					SHADOW_BYTES_PER_ROW)
		- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;

	pr_err("Memory state around the buggy address:\n");

	for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
		const void *kaddr = kasan_shadow_to_mem(shadow_row);
		char buffer[4 + (BITS_PER_LONG/8)*2];
270
		char shadow_buf[SHADOW_BYTES_PER_ROW];
271 272

		snprintf(buffer, sizeof(buffer),
273
			(i == 0) ? ">%px: " : " %px: ", kaddr);
274 275 276 277 278 279
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
		memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
280 281
		print_hex_dump(KERN_ERR, buffer,
			DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
282
			shadow_buf, SHADOW_BYTES_PER_ROW, 0);
283 284 285 286 287 288 289 290 291 292

		if (row_is_guilty(shadow_row, shadow))
			pr_err("%*c\n",
				shadow_pointer_offset(shadow_row, shadow),
				'^');

		shadow_row += SHADOW_BYTES_PER_ROW;
	}
}

293
static bool report_enabled(void)
294
{
295
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
296 297
	if (current->kasan_depth)
		return false;
298
#endif
299 300 301 302 303
	if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
		return true;
	return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}

P
Patricia Alfonso 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
#if IS_ENABLED(CONFIG_KUNIT)
static void kasan_update_kunit_status(struct kunit *cur_test)
{
	struct kunit_resource *resource;
	struct kunit_kasan_expectation *kasan_data;

	resource = kunit_find_named_resource(cur_test, "kasan_data");

	if (!resource) {
		kunit_set_failure(cur_test);
		return;
	}

	kasan_data = (struct kunit_kasan_expectation *)resource->data;
	kasan_data->report_found = true;
	kunit_put_resource(resource);
}
#endif /* IS_ENABLED(CONFIG_KUNIT) */

323
void kasan_report_invalid_free(void *object, unsigned long ip)
324 325
{
	unsigned long flags;
326
	u8 tag = get_tag(object);
327

328
	object = reset_tag(object);
P
Patricia Alfonso 已提交
329 330 331 332 333 334

#if IS_ENABLED(CONFIG_KUNIT)
	if (current->kunit_test)
		kasan_update_kunit_status(current->kunit_test);
#endif /* IS_ENABLED(CONFIG_KUNIT) */

335
	start_report(&flags);
336
	pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
337
	print_tags(tag, object);
338
	pr_err("\n");
339
	print_address_description(object, tag);
340
	pr_err("\n");
341
	print_memory_metadata(object);
342
	end_report(&flags);
343 344
}

345 346
static void __kasan_report(unsigned long addr, size_t size, bool is_write,
				unsigned long ip)
347 348
{
	struct kasan_access_info info;
349 350 351
	void *tagged_addr;
	void *untagged_addr;
	unsigned long flags;
352

P
Patricia Alfonso 已提交
353 354 355 356 357
#if IS_ENABLED(CONFIG_KUNIT)
	if (current->kunit_test)
		kasan_update_kunit_status(current->kunit_test);
#endif /* IS_ENABLED(CONFIG_KUNIT) */

358 359
	disable_trace_on_warning();

360 361 362 363
	tagged_addr = (void *)addr;
	untagged_addr = reset_tag(tagged_addr);

	info.access_addr = tagged_addr;
364
	if (addr_has_metadata(untagged_addr))
365 366 367
		info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
	else
		info.first_bad_addr = untagged_addr;
368 369 370
	info.access_size = size;
	info.is_write = is_write;
	info.ip = ip;
371

372 373 374
	start_report(&flags);

	print_error_description(&info);
375
	if (addr_has_metadata(untagged_addr))
376 377 378
		print_tags(get_tag(tagged_addr), info.first_bad_addr);
	pr_err("\n");

379
	if (addr_has_metadata(untagged_addr)) {
380
		print_address_description(untagged_addr, get_tag(tagged_addr));
381
		pr_err("\n");
382
		print_memory_metadata(info.first_bad_addr);
383 384 385 386 387
	} else {
		dump_stack();
	}

	end_report(&flags);
388
}
389

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
bool kasan_report(unsigned long addr, size_t size, bool is_write,
			unsigned long ip)
{
	unsigned long flags = user_access_save();
	bool ret = false;

	if (likely(report_enabled())) {
		__kasan_report(addr, size, is_write, ip);
		ret = true;
	}

	user_access_restore(flags);

	return ret;
}

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
#ifdef CONFIG_KASAN_INLINE
/*
 * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
 * canonical half of the address space) cause out-of-bounds shadow memory reads
 * before the actual access. For addresses in the low canonical half of the
 * address space, as well as most non-canonical addresses, that out-of-bounds
 * shadow memory access lands in the non-canonical part of the address space.
 * Help the user figure out what the original bogus pointer was.
 */
void kasan_non_canonical_hook(unsigned long addr)
{
	unsigned long orig_addr;
	const char *bug_type;

	if (addr < KASAN_SHADOW_OFFSET)
		return;

	orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT;
	/*
	 * For faults near the shadow address for NULL, we can be fairly certain
	 * that this is a KASAN shadow memory access.
	 * For faults that correspond to shadow for low canonical addresses, we
	 * can still be pretty sure - that shadow region is a fairly narrow
	 * chunk of the non-canonical address space.
	 * But faults that look like shadow for non-canonical addresses are a
	 * really large chunk of the address space. In that case, we still
	 * print the decoded address, but make it clear that this is not
	 * necessarily what's actually going on.
	 */
	if (orig_addr < PAGE_SIZE)
		bug_type = "null-ptr-deref";
	else if (orig_addr < TASK_SIZE)
		bug_type = "probably user-memory-access";
	else
		bug_type = "maybe wild-memory-access";
	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
442
		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
443 444
}
#endif