common.c 11.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common KASAN code.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
 *
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
 *        Andrey Konovalov <andreyknvl@gmail.com>
 */

#include <linux/export.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>

#include "kasan.h"
#include "../slab.h"

33
depot_stack_handle_t kasan_save_stack(gfp_t flags)
34 35
{
	unsigned long entries[KASAN_STACK_DEPTH];
36
	unsigned int nr_entries;
37

38 39 40
	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
	nr_entries = filter_irq_stacks(entries, nr_entries);
	return stack_depot_save(entries, nr_entries, flags);
41 42
}

43
void kasan_set_track(struct kasan_track *track, gfp_t flags)
44 45
{
	track->pid = current->pid;
46
	track->stack = kasan_save_stack(flags);
47 48
}

49
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
50 51 52 53 54 55 56 57 58
void kasan_enable_current(void)
{
	current->kasan_depth++;
}

void kasan_disable_current(void)
{
	current->kasan_depth--;
}
59
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
60

61 62 63 64 65
void kasan_unpoison_range(const void *address, size_t size)
{
	unpoison_range(address, size);
}

66 67 68 69 70
static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
{
	void *base = task_stack_page(task);
	size_t size = sp - base;

71
	unpoison_range(base, size);
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
}

/* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack(struct task_struct *task)
{
	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
}

/* Unpoison the stack for the current task beyond a watermark sp value. */
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{
	/*
	 * Calculate the task stack base address.  Avoid using 'current'
	 * because this function is called by early resume code which hasn't
	 * yet set up the percpu register (%gs).
	 */
	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));

90
	unpoison_range(base, watermark - base);
91 92 93 94
}

void kasan_alloc_pages(struct page *page, unsigned int order)
{
95 96 97
	u8 tag;
	unsigned long i;

98 99
	if (unlikely(PageHighMem(page)))
		return;
100 101 102 103

	tag = random_tag();
	for (i = 0; i < (1 << order); i++)
		page_kasan_tag_set(page + i, tag);
104
	unpoison_range(page_address(page), PAGE_SIZE << order);
105 106 107 108 109
}

void kasan_free_pages(struct page *page, unsigned int order)
{
	if (likely(!PageHighMem(page)))
110
		poison_range(page_address(page),
111 112 113 114 115 116 117 118 119 120
				PAGE_SIZE << order,
				KASAN_FREE_PAGE);
}

/*
 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
 * For larger allocations larger redzones are used.
 */
static inline unsigned int optimal_redzone(unsigned int object_size)
{
121 122 123
	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
		return 0;

124 125 126 127 128 129 130 131 132 133 134 135 136 137
	return
		object_size <= 64        - 16   ? 16 :
		object_size <= 128       - 32   ? 32 :
		object_size <= 512       - 64   ? 64 :
		object_size <= 4096      - 128  ? 128 :
		object_size <= (1 << 14) - 256  ? 256 :
		object_size <= (1 << 15) - 512  ? 512 :
		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
}

void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
			slab_flags_t *flags)
{
	unsigned int orig_size = *size;
138
	unsigned int redzone_size;
139 140 141 142 143 144 145
	int redzone_adjust;

	/* Add alloc meta. */
	cache->kasan_info.alloc_meta_offset = *size;
	*size += sizeof(struct kasan_alloc_meta);

	/* Add free meta. */
146 147 148
	if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
	    (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
	     cache->object_size < sizeof(struct kasan_free_meta))) {
149 150 151 152
		cache->kasan_info.free_meta_offset = *size;
		*size += sizeof(struct kasan_free_meta);
	}

153 154
	redzone_size = optimal_redzone(cache->object_size);
	redzone_adjust = redzone_size -	(*size - cache->object_size);
155 156 157 158
	if (redzone_adjust > 0)
		*size += redzone_adjust;

	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
159
			max(*size, cache->object_size + redzone_size));
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197

	/*
	 * If the metadata doesn't fit, don't enable KASAN at all.
	 */
	if (*size <= cache->kasan_info.alloc_meta_offset ||
			*size <= cache->kasan_info.free_meta_offset) {
		cache->kasan_info.alloc_meta_offset = 0;
		cache->kasan_info.free_meta_offset = 0;
		*size = orig_size;
		return;
	}

	*flags |= SLAB_KASAN;
}

size_t kasan_metadata_size(struct kmem_cache *cache)
{
	return (cache->kasan_info.alloc_meta_offset ?
		sizeof(struct kasan_alloc_meta) : 0) +
		(cache->kasan_info.free_meta_offset ?
		sizeof(struct kasan_free_meta) : 0);
}

struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
					const void *object)
{
	return (void *)object + cache->kasan_info.alloc_meta_offset;
}

struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
				      const void *object)
{
	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
	return (void *)object + cache->kasan_info.free_meta_offset;
}

void kasan_poison_slab(struct page *page)
{
198 199
	unsigned long i;

200
	for (i = 0; i < compound_nr(page); i++)
201
		page_kasan_tag_reset(page + i);
202 203
	poison_range(page_address(page), page_size(page),
		     KASAN_KMALLOC_REDZONE);
204 205 206 207
}

void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
208
	unpoison_range(object, cache->object_size);
209 210 211 212
}

void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
213
	poison_range(object,
214
			round_up(cache->object_size, KASAN_GRANULE_SIZE),
215 216 217
			KASAN_KMALLOC_REDZONE);
}

218
/*
219 220 221 222 223 224 225 226 227 228 229 230
 * This function assigns a tag to an object considering the following:
 * 1. A cache might have a constructor, which might save a pointer to a slab
 *    object somewhere (e.g. in the object itself). We preassign a tag for
 *    each object in caches with constructors during slab creation and reuse
 *    the same tag each time a particular object is allocated.
 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
 *    accessed after being freed. We preassign tags for objects in these
 *    caches as well.
 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
 *    is stored as an array of indexes instead of a linked list. Assign tags
 *    based on objects indexes, so that objects that are next to each other
 *    get different tags.
231
 */
232
static u8 assign_tag(struct kmem_cache *cache, const void *object,
233
			bool init, bool keep_tag)
234
{
235 236 237 238 239 240 241
	/*
	 * 1. When an object is kmalloc()'ed, two hooks are called:
	 *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
	 *    tag only in the first one.
	 * 2. We reuse the same tag for krealloc'ed objects.
	 */
	if (keep_tag)
242 243 244 245 246 247
		return get_tag(object);

	/*
	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
	 * set, assign a tag when the object is being allocated (init == false).
	 */
248
	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
249
		return init ? KASAN_TAG_KERNEL : random_tag();
250

251
	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
252
#ifdef CONFIG_SLAB
253
	/* For SLAB assign tags based on the object index in the freelist. */
254 255
	return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
#else
256 257 258 259 260
	/*
	 * For SLUB assign a random tag during slab creation, otherwise reuse
	 * the already assigned tag.
	 */
	return init ? random_tag() : get_tag(object);
261 262 263
#endif
}

264 265
void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
						const void *object)
266 267 268 269 270 271 272 273 274
{
	struct kasan_alloc_meta *alloc_info;

	if (!(cache->flags & SLAB_KASAN))
		return (void *)object;

	alloc_info = get_alloc_info(cache, object);
	__memset(alloc_info, 0, sizeof(*alloc_info));

275
	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
276 277
		object = set_tag(object,
				assign_tag(cache, object, true, false));
278

279 280 281 282 283 284
	return (void *)object;
}

static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
			      unsigned long ip, bool quarantine)
{
285 286
	u8 tag;
	void *tagged_object;
287 288
	unsigned long rounded_up_size;

289 290 291 292
	tag = get_tag(object);
	tagged_object = object;
	object = reset_tag(object);

293 294
	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
	    object)) {
295
		kasan_report_invalid_free(tagged_object, ip);
296 297 298 299 300 301 302
		return true;
	}

	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
		return false;

303
	if (check_invalid_free(tagged_object)) {
304
		kasan_report_invalid_free(tagged_object, ip);
305 306 307
		return true;
	}

308
	rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
309
	poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
310

311 312
	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
			unlikely(!(cache->flags & SLAB_KASAN)))
313 314
		return false;

315 316
	kasan_set_free_info(cache, object, tag);

317
	quarantine_put(get_free_info(cache, object), cache);
318 319

	return IS_ENABLED(CONFIG_KASAN_GENERIC);
320 321 322 323 324 325 326
}

bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
{
	return __kasan_slab_free(cache, object, ip, true);
}

327
static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
328
				size_t size, gfp_t flags, bool keep_tag)
329 330 331
{
	unsigned long redzone_start;
	unsigned long redzone_end;
332
	u8 tag = 0xff;
333 334 335 336 337 338 339 340

	if (gfpflags_allow_blocking(flags))
		quarantine_reduce();

	if (unlikely(object == NULL))
		return NULL;

	redzone_start = round_up((unsigned long)(object + size),
341
				KASAN_GRANULE_SIZE);
342
	redzone_end = round_up((unsigned long)object + cache->object_size,
343
				KASAN_GRANULE_SIZE);
344

345
	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
346
		tag = assign_tag(cache, object, false, keep_tag);
347 348

	/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
349 350 351
	unpoison_range(set_tag(object, tag), size);
	poison_range((void *)redzone_start, redzone_end - redzone_start,
		     KASAN_KMALLOC_REDZONE);
352 353

	if (cache->flags & SLAB_KASAN)
354
		kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
355

356
	return set_tag(object, tag);
357
}
358

359 360 361 362 363 364
void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
					gfp_t flags)
{
	return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
}

365 366 367
void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
				size_t size, gfp_t flags)
{
368
	return __kasan_kmalloc(cache, object, size, flags, true);
369
}
370 371
EXPORT_SYMBOL(kasan_kmalloc);

372 373
void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
						gfp_t flags)
374 375 376 377 378 379 380 381 382 383 384 385 386
{
	struct page *page;
	unsigned long redzone_start;
	unsigned long redzone_end;

	if (gfpflags_allow_blocking(flags))
		quarantine_reduce();

	if (unlikely(ptr == NULL))
		return NULL;

	page = virt_to_page(ptr);
	redzone_start = round_up((unsigned long)(ptr + size),
387
				KASAN_GRANULE_SIZE);
388
	redzone_end = (unsigned long)ptr + page_size(page);
389

390 391 392
	unpoison_range(ptr, size);
	poison_range((void *)redzone_start, redzone_end - redzone_start,
		     KASAN_PAGE_REDZONE);
393 394 395 396

	return (void *)ptr;
}

397
void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
398 399 400 401 402 403 404 405 406 407 408
{
	struct page *page;

	if (unlikely(object == ZERO_SIZE_PTR))
		return (void *)object;

	page = virt_to_head_page(object);

	if (unlikely(!PageSlab(page)))
		return kasan_kmalloc_large(object, size, flags);
	else
409 410
		return __kasan_kmalloc(page->slab_cache, object, size,
						flags, true);
411 412 413 414 415 416 417 418 419
}

void kasan_poison_kfree(void *ptr, unsigned long ip)
{
	struct page *page;

	page = virt_to_head_page(ptr);

	if (unlikely(!PageSlab(page))) {
420
		if (ptr != page_address(page)) {
421 422 423
			kasan_report_invalid_free(ptr, ip);
			return;
		}
424
		poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
425 426 427 428 429 430 431
	} else {
		__kasan_slab_free(page->slab_cache, ptr, ip, false);
	}
}

void kasan_kfree_large(void *ptr, unsigned long ip)
{
432
	if (ptr != page_address(virt_to_head_page(ptr)))
433 434 435
		kasan_report_invalid_free(ptr, ip);
	/* The object will be poisoned by page_alloc. */
}