common.c 12.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * This file contains common KASAN code.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
 *
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
 *        Andrey Konovalov <andreyknvl@gmail.com>
 */

#include <linux/export.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>

#include "kasan.h"
#include "../slab.h"

33
depot_stack_handle_t kasan_save_stack(gfp_t flags)
34 35
{
	unsigned long entries[KASAN_STACK_DEPTH];
36
	unsigned int nr_entries;
37

38 39 40
	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
	nr_entries = filter_irq_stacks(entries, nr_entries);
	return stack_depot_save(entries, nr_entries, flags);
41 42
}

43
void kasan_set_track(struct kasan_track *track, gfp_t flags)
44 45
{
	track->pid = current->pid;
46
	track->stack = kasan_save_stack(flags);
47 48
}

49
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
50 51 52 53 54 55 56 57 58
void kasan_enable_current(void)
{
	current->kasan_depth++;
}

void kasan_disable_current(void)
{
	current->kasan_depth--;
}
59
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
60

61
void __kasan_unpoison_range(const void *address, size_t size)
62 63 64 65
{
	unpoison_range(address, size);
}

66
#if CONFIG_KASAN_STACK
67 68 69
/* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack(struct task_struct *task)
{
70 71 72
	void *base = task_stack_page(task);

	unpoison_range(base, THREAD_SIZE);
73 74 75 76 77 78 79 80 81 82 83 84
}

/* Unpoison the stack for the current task beyond a watermark sp value. */
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{
	/*
	 * Calculate the task stack base address.  Avoid using 'current'
	 * because this function is called by early resume code which hasn't
	 * yet set up the percpu register (%gs).
	 */
	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));

85
	unpoison_range(base, watermark - base);
86
}
87
#endif /* CONFIG_KASAN_STACK */
88

89
void __kasan_alloc_pages(struct page *page, unsigned int order)
90
{
91 92 93
	u8 tag;
	unsigned long i;

94 95
	if (unlikely(PageHighMem(page)))
		return;
96 97 98 99

	tag = random_tag();
	for (i = 0; i < (1 << order); i++)
		page_kasan_tag_set(page + i, tag);
100
	unpoison_range(page_address(page), PAGE_SIZE << order);
101 102
}

103
void __kasan_free_pages(struct page *page, unsigned int order)
104 105
{
	if (likely(!PageHighMem(page)))
106
		poison_range(page_address(page),
107 108 109 110 111 112 113 114 115 116
				PAGE_SIZE << order,
				KASAN_FREE_PAGE);
}

/*
 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
 * For larger allocations larger redzones are used.
 */
static inline unsigned int optimal_redzone(unsigned int object_size)
{
117
	if (!IS_ENABLED(CONFIG_KASAN_GENERIC))
118 119
		return 0;

120 121 122 123 124 125 126 127 128 129
	return
		object_size <= 64        - 16   ? 16 :
		object_size <= 128       - 32   ? 32 :
		object_size <= 512       - 64   ? 64 :
		object_size <= 4096      - 128  ? 128 :
		object_size <= (1 << 14) - 256  ? 256 :
		object_size <= (1 << 15) - 512  ? 512 :
		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
}

130 131
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
			  slab_flags_t *flags)
132 133
{
	unsigned int orig_size = *size;
134
	unsigned int redzone_size;
135 136
	int redzone_adjust;

137 138 139 140 141
	if (!kasan_stack_collection_enabled()) {
		*flags |= SLAB_KASAN;
		return;
	}

142 143 144 145 146
	/* Add alloc meta. */
	cache->kasan_info.alloc_meta_offset = *size;
	*size += sizeof(struct kasan_alloc_meta);

	/* Add free meta. */
147 148 149
	if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
	    (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
	     cache->object_size < sizeof(struct kasan_free_meta))) {
150 151 152 153
		cache->kasan_info.free_meta_offset = *size;
		*size += sizeof(struct kasan_free_meta);
	}

154 155
	redzone_size = optimal_redzone(cache->object_size);
	redzone_adjust = redzone_size -	(*size - cache->object_size);
156 157 158 159
	if (redzone_adjust > 0)
		*size += redzone_adjust;

	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
160
			max(*size, cache->object_size + redzone_size));
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175

	/*
	 * If the metadata doesn't fit, don't enable KASAN at all.
	 */
	if (*size <= cache->kasan_info.alloc_meta_offset ||
			*size <= cache->kasan_info.free_meta_offset) {
		cache->kasan_info.alloc_meta_offset = 0;
		cache->kasan_info.free_meta_offset = 0;
		*size = orig_size;
		return;
	}

	*flags |= SLAB_KASAN;
}

176
size_t __kasan_metadata_size(struct kmem_cache *cache)
177
{
178 179
	if (!kasan_stack_collection_enabled())
		return 0;
180 181 182 183 184 185
	return (cache->kasan_info.alloc_meta_offset ?
		sizeof(struct kasan_alloc_meta) : 0) +
		(cache->kasan_info.free_meta_offset ?
		sizeof(struct kasan_free_meta) : 0);
}

186 187
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
					      const void *object)
188
{
189
	return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
190 191
}

192 193
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
					    const void *object)
194 195
{
	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
196
	return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
197 198
}

199
void __kasan_poison_slab(struct page *page)
200
{
201 202
	unsigned long i;

203
	for (i = 0; i < compound_nr(page); i++)
204
		page_kasan_tag_reset(page + i);
205 206
	poison_range(page_address(page), page_size(page),
		     KASAN_KMALLOC_REDZONE);
207 208
}

209
void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
210
{
211
	unpoison_range(object, cache->object_size);
212 213
}

214
void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
215
{
A
Andrey Konovalov 已提交
216
	poison_range(object, cache->object_size, KASAN_KMALLOC_REDZONE);
217 218
}

219
/*
220 221 222 223 224 225 226 227 228 229 230 231
 * This function assigns a tag to an object considering the following:
 * 1. A cache might have a constructor, which might save a pointer to a slab
 *    object somewhere (e.g. in the object itself). We preassign a tag for
 *    each object in caches with constructors during slab creation and reuse
 *    the same tag each time a particular object is allocated.
 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
 *    accessed after being freed. We preassign tags for objects in these
 *    caches as well.
 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
 *    is stored as an array of indexes instead of a linked list. Assign tags
 *    based on objects indexes, so that objects that are next to each other
 *    get different tags.
232
 */
233
static u8 assign_tag(struct kmem_cache *cache, const void *object,
234
			bool init, bool keep_tag)
235
{
236 237 238
	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
		return 0xff;

239 240 241 242 243 244 245
	/*
	 * 1. When an object is kmalloc()'ed, two hooks are called:
	 *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
	 *    tag only in the first one.
	 * 2. We reuse the same tag for krealloc'ed objects.
	 */
	if (keep_tag)
246 247 248 249 250 251
		return get_tag(object);

	/*
	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
	 * set, assign a tag when the object is being allocated (init == false).
	 */
252
	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
253
		return init ? KASAN_TAG_KERNEL : random_tag();
254

255
	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
256
#ifdef CONFIG_SLAB
257
	/* For SLAB assign tags based on the object index in the freelist. */
258 259
	return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
#else
260 261 262 263 264
	/*
	 * For SLUB assign a random tag during slab creation, otherwise reuse
	 * the already assigned tag.
	 */
	return init ? random_tag() : get_tag(object);
265 266 267
#endif
}

268
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
269
						const void *object)
270
{
271
	struct kasan_alloc_meta *alloc_meta;
272

273 274 275
	if (kasan_stack_collection_enabled()) {
		if (!(cache->flags & SLAB_KASAN))
			return (void *)object;
276

277 278 279
		alloc_meta = kasan_get_alloc_meta(cache, object);
		__memset(alloc_meta, 0, sizeof(*alloc_meta));
	}
280

281 282
	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
	object = set_tag(object, assign_tag(cache, object, true, false));
283

284 285 286
	return (void *)object;
}

287
static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
288 289
			      unsigned long ip, bool quarantine)
{
290 291
	u8 tag;
	void *tagged_object;
292

293 294
	tag = get_tag(object);
	tagged_object = object;
295
	object = kasan_reset_tag(object);
296

297 298
	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
	    object)) {
299
		kasan_report_invalid_free(tagged_object, ip);
300 301 302 303 304 305 306
		return true;
	}

	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
		return false;

307
	if (check_invalid_free(tagged_object)) {
308
		kasan_report_invalid_free(tagged_object, ip);
309 310 311
		return true;
	}

A
Andrey Konovalov 已提交
312
	poison_range(object, cache->object_size, KASAN_KMALLOC_FREE);
313

314 315 316
	if (!kasan_stack_collection_enabled())
		return false;

317 318
	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
			unlikely(!(cache->flags & SLAB_KASAN)))
319 320
		return false;

321 322
	kasan_set_free_info(cache, object, tag);

323
	quarantine_put(cache, object);
324 325

	return IS_ENABLED(CONFIG_KASAN_GENERIC);
326 327
}

328
bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
329
{
330
	return ____kasan_slab_free(cache, object, ip, true);
331 332
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
{
	struct page *page;

	page = virt_to_head_page(ptr);

	/*
	 * Even though this function is only called for kmem_cache_alloc and
	 * kmalloc backed mempool allocations, those allocations can still be
	 * !PageSlab() when the size provided to kmalloc is larger than
	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
	 */
	if (unlikely(!PageSlab(page))) {
		if (ptr != page_address(page)) {
			kasan_report_invalid_free(ptr, ip);
			return;
		}
		poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
	} else {
		____kasan_slab_free(page->slab_cache, ptr, ip, false);
	}
}

A
Andrey Konovalov 已提交
356 357 358 359 360
static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
{
	kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
}

361
static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
362
				size_t size, gfp_t flags, bool keep_tag)
363 364 365
{
	unsigned long redzone_start;
	unsigned long redzone_end;
366
	u8 tag;
367 368 369 370 371 372 373 374

	if (gfpflags_allow_blocking(flags))
		quarantine_reduce();

	if (unlikely(object == NULL))
		return NULL;

	redzone_start = round_up((unsigned long)(object + size),
375
				KASAN_GRANULE_SIZE);
376
	redzone_end = round_up((unsigned long)object + cache->object_size,
377
				KASAN_GRANULE_SIZE);
378
	tag = assign_tag(cache, object, false, keep_tag);
379

380
	/* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
381 382 383
	unpoison_range(set_tag(object, tag), size);
	poison_range((void *)redzone_start, redzone_end - redzone_start,
		     KASAN_KMALLOC_REDZONE);
384

385
	if (kasan_stack_collection_enabled() && (cache->flags & SLAB_KASAN))
A
Andrey Konovalov 已提交
386
		set_alloc_info(cache, (void *)object, flags);
387

388
	return set_tag(object, tag);
389
}
390

391 392
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
					void *object, gfp_t flags)
393
{
394
	return ____kasan_kmalloc(cache, object, cache->object_size, flags, false);
395 396
}

397 398
void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
					size_t size, gfp_t flags)
399
{
400
	return ____kasan_kmalloc(cache, object, size, flags, true);
401
}
402
EXPORT_SYMBOL(__kasan_kmalloc);
403

404
void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
405
						gfp_t flags)
406 407 408 409 410 411 412 413 414 415 416 417 418
{
	struct page *page;
	unsigned long redzone_start;
	unsigned long redzone_end;

	if (gfpflags_allow_blocking(flags))
		quarantine_reduce();

	if (unlikely(ptr == NULL))
		return NULL;

	page = virt_to_page(ptr);
	redzone_start = round_up((unsigned long)(ptr + size),
419
				KASAN_GRANULE_SIZE);
420
	redzone_end = (unsigned long)ptr + page_size(page);
421

422 423 424
	unpoison_range(ptr, size);
	poison_range((void *)redzone_start, redzone_end - redzone_start,
		     KASAN_PAGE_REDZONE);
425 426 427 428

	return (void *)ptr;
}

429
void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
430 431 432 433 434 435 436 437 438
{
	struct page *page;

	if (unlikely(object == ZERO_SIZE_PTR))
		return (void *)object;

	page = virt_to_head_page(object);

	if (unlikely(!PageSlab(page)))
439
		return __kasan_kmalloc_large(object, size, flags);
440
	else
441
		return ____kasan_kmalloc(page->slab_cache, object, size,
442
						flags, true);
443 444
}

445
void __kasan_kfree_large(void *ptr, unsigned long ip)
446
{
447
	if (ptr != page_address(virt_to_head_page(ptr)))
448
		kasan_report_invalid_free(ptr, ip);
449
	/* The object will be poisoned by kasan_free_pages(). */
450
}