report.c 11.4 KB
Newer Older
1 2 3 4
/*
 * This file contains error reporting code.
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6
 *
7
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 9 10 11 12 13 14 15
 *        Andrey Konovalov <adech.fo@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */

16
#include <linux/bitops.h>
17
#include <linux/ftrace.h>
18
#include <linux/init.h>
19 20 21 22 23
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
24
#include <linux/stackdepot.h>
25 26 27 28
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
29
#include <linux/module.h>
30

31 32
#include <asm/sections.h>

33
#include "kasan.h"
34
#include "../slab.h"
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53

/* Shadow layout customization. */
#define SHADOW_BYTES_PER_BLOCK 1
#define SHADOW_BLOCKS_PER_ROW 16
#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
#define SHADOW_ROWS_AROUND_ADDR 2

static const void *find_first_bad_addr(const void *addr, size_t size)
{
	u8 shadow_val = *(u8 *)kasan_mem_to_shadow(addr);
	const void *first_bad_addr = addr;

	while (!shadow_val && first_bad_addr < addr + size) {
		first_bad_addr += KASAN_SHADOW_SCALE_SIZE;
		shadow_val = *(u8 *)kasan_mem_to_shadow(first_bad_addr);
	}
	return first_bad_addr;
}

54 55 56 57 58 59 60
static bool addr_has_shadow(struct kasan_access_info *info)
{
	return (info->access_addr >=
		kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
}

static const char *get_shadow_bug_type(struct kasan_access_info *info)
61
{
62
	const char *bug_type = "unknown-crash";
63
	u8 *shadow_addr;
64 65 66 67

	info->first_bad_addr = find_first_bad_addr(info->access_addr,
						info->access_size);

68
	shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
69

70 71 72 73 74 75 76 77
	/*
	 * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
	 * at the next shadow byte to determine the type of the bad access.
	 */
	if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
		shadow_addr++;

	switch (*shadow_addr) {
78
	case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
79 80 81 82
		/*
		 * In theory it's still possible to see these shadow values
		 * due to a data race in the kernel code.
		 */
83
		bug_type = "out-of-bounds";
84
		break;
85 86
	case KASAN_PAGE_REDZONE:
	case KASAN_KMALLOC_REDZONE:
87 88
		bug_type = "slab-out-of-bounds";
		break;
89
	case KASAN_GLOBAL_REDZONE:
90
		bug_type = "global-out-of-bounds";
91
		break;
92 93 94 95
	case KASAN_STACK_LEFT:
	case KASAN_STACK_MID:
	case KASAN_STACK_RIGHT:
	case KASAN_STACK_PARTIAL:
96 97 98 99 100
		bug_type = "stack-out-of-bounds";
		break;
	case KASAN_FREE_PAGE:
	case KASAN_KMALLOC_FREE:
		bug_type = "use-after-free";
101
		break;
102 103 104
	case KASAN_USE_AFTER_SCOPE:
		bug_type = "use-after-scope";
		break;
105 106
	}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	return bug_type;
}

const char *get_wild_bug_type(struct kasan_access_info *info)
{
	const char *bug_type = "unknown-crash";

	if ((unsigned long)info->access_addr < PAGE_SIZE)
		bug_type = "null-ptr-deref";
	else if ((unsigned long)info->access_addr < TASK_SIZE)
		bug_type = "user-memory-access";
	else
		bug_type = "wild-memory-access";

	return bug_type;
}

A
Andrey Konovalov 已提交
124 125 126 127 128 129 130
static const char *get_bug_type(struct kasan_access_info *info)
{
	if (addr_has_shadow(info))
		return get_shadow_bug_type(info);
	return get_wild_bug_type(info);
}

131 132
static void print_error_description(struct kasan_access_info *info)
{
A
Andrey Konovalov 已提交
133
	const char *bug_type = get_bug_type(info);
134

A
Andrey Konovalov 已提交
135 136 137
	pr_err("BUG: KASAN: %s in %pS\n",
		bug_type, (void *)info->ip);
	pr_err("%s of size %zu at addr %p by task %s/%d\n",
A
Andrey Konovalov 已提交
138
		info->is_write ? "Write" : "Read", info->access_size,
A
Andrey Konovalov 已提交
139
		info->access_addr, current->comm, task_pid_nr(current));
140 141
}

142 143
static inline bool kernel_or_module_addr(const void *addr)
{
144 145 146 147 148
	if (addr >= (void *)_stext && addr < (void *)_end)
		return true;
	if (is_module_address((unsigned long)addr))
		return true;
	return false;
149 150 151 152 153 154 155 156 157
}

static inline bool init_task_stack_addr(const void *addr)
{
	return addr >= (void *)&init_thread_union.stack &&
		(addr <= (void *)&init_thread_union.stack +
			sizeof(init_thread_union.stack));
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static DEFINE_SPINLOCK(report_lock);

static void kasan_start_report(unsigned long *flags)
{
	/*
	 * Make sure we don't end up in loop.
	 */
	kasan_disable_current();
	spin_lock_irqsave(&report_lock, *flags);
	pr_err("==================================================================\n");
}

static void kasan_end_report(unsigned long *flags)
{
	pr_err("==================================================================\n");
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
	spin_unlock_irqrestore(&report_lock, *flags);
D
Dmitry Vyukov 已提交
175 176
	if (panic_on_warn)
		panic("panic_on_warn set ...\n");
177 178 179
	kasan_enable_current();
}

180
static void print_track(struct kasan_track *track, const char *prefix)
A
Alexander Potapenko 已提交
181
{
182
	pr_err("%s by task %u:\n", prefix, track->pid);
183 184 185 186 187 188 189 190
	if (track->stack) {
		struct stack_trace trace;

		depot_fetch_stack(track->stack, &trace);
		print_stack_trace(&trace, 0);
	} else {
		pr_err("(stack is not available)\n");
	}
A
Alexander Potapenko 已提交
191 192
}

193 194 195 196 197 198 199 200
static struct page *addr_to_page(const void *addr)
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_head_page(addr);
	return NULL;
}

201 202
static void describe_object_addr(struct kmem_cache *cache, void *object,
				const void *addr)
A
Alexander Potapenko 已提交
203
{
204 205 206 207
	unsigned long access_addr = (unsigned long)addr;
	unsigned long object_addr = (unsigned long)object;
	const char *rel_type;
	int rel_bytes;
A
Alexander Potapenko 已提交
208

209 210 211
	pr_err("The buggy address belongs to the object at %p\n"
	       " which belongs to the cache %s of size %d\n",
		object, cache->name, cache->object_size);
212

213
	if (!addr)
A
Alexander Potapenko 已提交
214
		return;
215

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	if (access_addr < object_addr) {
		rel_type = "to the left";
		rel_bytes = object_addr - access_addr;
	} else if (access_addr >= object_addr + cache->object_size) {
		rel_type = "to the right";
		rel_bytes = access_addr - (object_addr + cache->object_size);
	} else {
		rel_type = "inside";
		rel_bytes = access_addr - object_addr;
	}

	pr_err("The buggy address is located %d bytes %s of\n"
	       " %d-byte region [%p, %p)\n",
		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
		(void *)(object_addr + cache->object_size));
}

static void describe_object(struct kmem_cache *cache, void *object,
				const void *addr)
{
	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);

	if (cache->flags & SLAB_KASAN) {
		print_track(&alloc_info->alloc_track, "Allocated");
		print_track(&alloc_info->free_track, "Freed");
	}

	describe_object_addr(cache, object, addr);
A
Alexander Potapenko 已提交
244 245
}

246
static void print_address_description(void *addr)
247
{
248
	struct page *page = addr_to_page(addr);
249

250 251 252 253
	dump_stack();

	if (page && PageSlab(page)) {
		struct kmem_cache *cache = page->slab_cache;
254
		void *object = nearest_obj(cache, page,	addr);
255

256
		describe_object(cache, object, addr);
257 258
	}

259 260 261 262 263 264 265 266
	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
	}

	if (page) {
		pr_err("The buggy address belongs to the page:\n");
		dump_page(page, "kasan: bad access detected");
267
	}
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
}

static bool row_is_guilty(const void *row, const void *guilty)
{
	return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
}

static int shadow_pointer_offset(const void *row, const void *shadow)
{
	/* The length of ">ff00ff00ff00ff00: " is
	 *    3 + (BITS_PER_LONG/8)*2 chars.
	 */
	return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
		(shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
}

static void print_shadow_for_address(const void *addr)
{
	int i;
	const void *shadow = kasan_mem_to_shadow(addr);
	const void *shadow_row;

	shadow_row = (void *)round_down((unsigned long)shadow,
					SHADOW_BYTES_PER_ROW)
		- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;

	pr_err("Memory state around the buggy address:\n");

	for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
		const void *kaddr = kasan_shadow_to_mem(shadow_row);
		char buffer[4 + (BITS_PER_LONG/8)*2];
299
		char shadow_buf[SHADOW_BYTES_PER_ROW];
300 301 302

		snprintf(buffer, sizeof(buffer),
			(i == 0) ? ">%p: " : " %p: ", kaddr);
303 304 305 306 307 308
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
		memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
309 310
		print_hex_dump(KERN_ERR, buffer,
			DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
311
			shadow_buf, SHADOW_BYTES_PER_ROW, 0);
312 313 314 315 316 317 318 319 320 321

		if (row_is_guilty(shadow_row, shadow))
			pr_err("%*c\n",
				shadow_pointer_offset(shadow_row, shadow),
				'^');

		shadow_row += SHADOW_BYTES_PER_ROW;
	}
}

322 323 324 325 326 327 328 329 330 331 332 333
void kasan_report_double_free(struct kmem_cache *cache, void *object,
				void *ip)
{
	unsigned long flags;

	kasan_start_report(&flags);
	pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", ip);
	print_address_description(object);
	print_shadow_for_address(object);
	kasan_end_report(&flags);
}

334
static void kasan_report_error(struct kasan_access_info *info)
335 336 337
{
	unsigned long flags;

338 339
	kasan_start_report(&flags);

A
Andrey Konovalov 已提交
340 341
	print_error_description(info);

342
	if (!addr_has_shadow(info)) {
343 344
		dump_stack();
	} else {
345
		print_address_description((void *)info->access_addr);
346 347
		print_shadow_for_address(info->first_bad_addr);
	}
348 349

	kasan_end_report(&flags);
350 351
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
static unsigned long kasan_flags;

#define KASAN_BIT_REPORTED	0
#define KASAN_BIT_MULTI_SHOT	1

bool kasan_save_enable_multi_shot(void)
{
	return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
}
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);

void kasan_restore_multi_shot(bool enabled)
{
	if (!enabled)
		clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
}
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);

static int __init kasan_set_multi_shot(char *str)
{
	set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
	return 1;
}
__setup("kasan_multi_shot", kasan_set_multi_shot);

static inline bool kasan_report_enabled(void)
{
	if (current->kasan_depth)
		return false;
	if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
		return true;
	return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}

386 387 388 389 390
void kasan_report(unsigned long addr, size_t size,
		bool is_write, unsigned long ip)
{
	struct kasan_access_info info;

391
	if (likely(!kasan_report_enabled()))
392 393
		return;

394 395
	disable_trace_on_warning();

396 397 398 399
	info.access_addr = (void *)addr;
	info.access_size = size;
	info.is_write = is_write;
	info.ip = ip;
400

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	kasan_report_error(&info);
}


#define DEFINE_ASAN_REPORT_LOAD(size)                     \
void __asan_report_load##size##_noabort(unsigned long addr) \
{                                                         \
	kasan_report(addr, size, false, _RET_IP_);	  \
}                                                         \
EXPORT_SYMBOL(__asan_report_load##size##_noabort)

#define DEFINE_ASAN_REPORT_STORE(size)                     \
void __asan_report_store##size##_noabort(unsigned long addr) \
{                                                          \
	kasan_report(addr, size, true, _RET_IP_);	   \
}                                                          \
EXPORT_SYMBOL(__asan_report_store##size##_noabort)

DEFINE_ASAN_REPORT_LOAD(1);
DEFINE_ASAN_REPORT_LOAD(2);
DEFINE_ASAN_REPORT_LOAD(4);
DEFINE_ASAN_REPORT_LOAD(8);
DEFINE_ASAN_REPORT_LOAD(16);
DEFINE_ASAN_REPORT_STORE(1);
DEFINE_ASAN_REPORT_STORE(2);
DEFINE_ASAN_REPORT_STORE(4);
DEFINE_ASAN_REPORT_STORE(8);
DEFINE_ASAN_REPORT_STORE(16);

void __asan_report_load_n_noabort(unsigned long addr, size_t size)
{
	kasan_report(addr, size, false, _RET_IP_);
}
EXPORT_SYMBOL(__asan_report_load_n_noabort);

void __asan_report_store_n_noabort(unsigned long addr, size_t size)
{
	kasan_report(addr, size, true, _RET_IP_);
}
EXPORT_SYMBOL(__asan_report_store_n_noabort);