slab_common.c 33.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12
/*
 * Slab allocator functions that are independent of the allocator strategy
 *
 * (C) 2012 Christoph Lameter <cl@linux.com>
 */
#include <linux/slab.h>

#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/interrupt.h>
#include <linux/memory.h>
13
#include <linux/cache.h>
14
#include <linux/compiler.h>
15
#include <linux/kfence.h>
16
#include <linux/module.h>
17 18
#include <linux/cpu.h>
#include <linux/uaccess.h>
19 20
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
21
#include <linux/debugfs.h>
22
#include <linux/kasan.h>
23 24 25
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
26
#include <linux/memcontrol.h>
27 28

#define CREATE_TRACE_POINTS
29
#include <trace/events/kmem.h>
30

31 32
#include "internal.h"

33 34 35
#include "slab.h"

enum slab_state slab_state;
36 37
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
38
struct kmem_cache *kmem_cache;
39

40 41 42 43 44 45 46 47
#ifdef CONFIG_HARDENED_USERCOPY
bool usercopy_fallback __ro_after_init =
		IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
module_param(usercopy_fallback, bool, 0400);
MODULE_PARM_DESC(usercopy_fallback,
		"WARN instead of reject usercopy whitelist violations");
#endif

48 49 50 51 52
static LIST_HEAD(slab_caches_to_rcu_destroy);
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
		    slab_caches_to_rcu_destroy_workfn);

53 54 55 56
/*
 * Set of flags that will prevent slab merging
 */
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
57
		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
58
		SLAB_FAILSLAB | kasan_never_merge())
59

V
Vladimir Davydov 已提交
60
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
61
			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
62 63 64 65

/*
 * Merge control. If this is set then no merging of slab caches will occur.
 */
66
static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
67 68 69

static int __init setup_slab_nomerge(char *str)
{
70
	slab_nomerge = true;
71 72 73
	return 1;
}

74 75 76 77 78 79
static int __init setup_slab_merge(char *str)
{
	slab_nomerge = false;
	return 1;
}

80 81
#ifdef CONFIG_SLUB
__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
82
__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
83 84 85
#endif

__setup("slab_nomerge", setup_slab_nomerge);
86
__setup("slab_merge", setup_slab_merge);
87

88 89 90 91 92 93 94 95 96
/*
 * Determine the size of a slab object
 */
unsigned int kmem_cache_size(struct kmem_cache *s)
{
	return s->object_size;
}
EXPORT_SYMBOL(kmem_cache_size);

97
#ifdef CONFIG_DEBUG_VM
98
static int kmem_cache_sanity_check(const char *name, unsigned int size)
99
{
100
	if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
101 102
		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
		return -EINVAL;
103
	}
104

105
	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
106 107 108
	return 0;
}
#else
109
static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
110 111 112
{
	return 0;
}
113 114
#endif

115 116 117 118
void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
{
	size_t i;

119 120 121 122 123 124
	for (i = 0; i < nr; i++) {
		if (s)
			kmem_cache_free(s, p[i]);
		else
			kfree(p[i]);
	}
125 126
}

127
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
128 129 130 131 132 133 134 135
								void **p)
{
	size_t i;

	for (i = 0; i < nr; i++) {
		void *x = p[i] = kmem_cache_alloc(s, flags);
		if (!x) {
			__kmem_cache_free_bulk(s, i, p);
136
			return 0;
137 138
		}
	}
139
	return i;
140 141
}

142 143 144 145
/*
 * Figure out what the alignment of the objects will be given a set of
 * flags, a user specified alignment and the size of the objects.
 */
146 147
static unsigned int calculate_alignment(slab_flags_t flags,
		unsigned int align, unsigned int size)
148 149 150 151 152 153 154 155 156
{
	/*
	 * If the user wants hardware cache aligned objects then follow that
	 * suggestion if the object is sufficiently large.
	 *
	 * The hardware cache alignment cannot override the specified
	 * alignment though. If that is greater then use it.
	 */
	if (flags & SLAB_HWCACHE_ALIGN) {
157
		unsigned int ralign;
158 159 160 161 162 163 164 165 166 167 168 169 170

		ralign = cache_line_size();
		while (size <= ralign / 2)
			ralign /= 2;
		align = max(align, ralign);
	}

	if (align < ARCH_SLAB_MINALIGN)
		align = ARCH_SLAB_MINALIGN;

	return ALIGN(align, sizeof(void *));
}

171 172 173 174 175 176 177 178 179 180 181
/*
 * Find a mergeable slab cache
 */
int slab_unmergeable(struct kmem_cache *s)
{
	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
		return 1;

	if (s->ctor)
		return 1;

182 183 184
	if (s->usersize)
		return 1;

185 186 187 188 189 190 191 192 193
	/*
	 * We may have set a slab to be unmergeable during bootstrap.
	 */
	if (s->refcount < 0)
		return 1;

	return 0;
}

194
struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
195
		slab_flags_t flags, const char *name, void (*ctor)(void *))
196 197 198
{
	struct kmem_cache *s;

199
	if (slab_nomerge)
200 201 202 203 204 205 206 207
		return NULL;

	if (ctor)
		return NULL;

	size = ALIGN(size, sizeof(void *));
	align = calculate_alignment(flags, align, size);
	size = ALIGN(size, align);
208
	flags = kmem_cache_flags(size, flags, name);
209

210 211 212
	if (flags & SLAB_NEVER_MERGE)
		return NULL;

213
	list_for_each_entry_reverse(s, &slab_caches, list) {
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
		if (slab_unmergeable(s))
			continue;

		if (size > s->size)
			continue;

		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
			continue;
		/*
		 * Check if alignment is compatible.
		 * Courtesy of Adrian Drzewiecki
		 */
		if ((s->size & ~(align - 1)) != s->size)
			continue;

		if (s->size - size >= sizeof(void *))
			continue;

232 233 234 235
		if (IS_ENABLED(CONFIG_SLAB) && align &&
			(align > s->align || s->align % align))
			continue;

236 237 238 239 240
		return s;
	}
	return NULL;
}

241
static struct kmem_cache *create_cache(const char *name,
242
		unsigned int object_size, unsigned int align,
243 244
		slab_flags_t flags, unsigned int useroffset,
		unsigned int usersize, void (*ctor)(void *),
245
		struct kmem_cache *root_cache)
246 247 248 249
{
	struct kmem_cache *s;
	int err;

250 251 252
	if (WARN_ON(useroffset + usersize > object_size))
		useroffset = usersize = 0;

253 254 255 256 257 258
	err = -ENOMEM;
	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
	if (!s)
		goto out;

	s->name = name;
259
	s->size = s->object_size = object_size;
260 261
	s->align = align;
	s->ctor = ctor;
262 263
	s->useroffset = useroffset;
	s->usersize = usersize;
264 265 266 267 268 269 270 271 272 273 274 275 276

	err = __kmem_cache_create(s, flags);
	if (err)
		goto out_free_cache;

	s->refcount = 1;
	list_add(&s->list, &slab_caches);
out:
	if (err)
		return ERR_PTR(err);
	return s;

out_free_cache:
277
	kmem_cache_free(kmem_cache, s);
278 279
	goto out;
}
280

281 282 283
/**
 * kmem_cache_create_usercopy - Create a cache with a region suitable
 * for copying to userspace
284 285 286 287
 * @name: A string which is used in /proc/slabinfo to identify this cache.
 * @size: The size of objects to be created in this cache.
 * @align: The required alignment for the objects.
 * @flags: SLAB flags
288 289
 * @useroffset: Usercopy region offset
 * @usersize: Usercopy region size
290 291 292 293 294 295 296 297 298 299
 * @ctor: A constructor for the objects.
 *
 * Cannot be called within a interrupt, but can be interrupted.
 * The @ctor is run when new pages are allocated by the cache.
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
300
 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
301 302 303 304 305
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
306 307
 *
 * Return: a pointer to the cache on success, NULL on failure.
308
 */
309
struct kmem_cache *
310 311
kmem_cache_create_usercopy(const char *name,
		  unsigned int size, unsigned int align,
312 313
		  slab_flags_t flags,
		  unsigned int useroffset, unsigned int usersize,
314
		  void (*ctor)(void *))
315
{
316
	struct kmem_cache *s = NULL;
317
	const char *cache_name;
318
	int err;
319

320 321 322 323 324 325 326 327 328 329
#ifdef CONFIG_SLUB_DEBUG
	/*
	 * If no slub_debug was enabled globally, the static key is not yet
	 * enabled by setup_slub_debug(). Enable it if the cache is being
	 * created with any of the debugging flags passed explicitly.
	 */
	if (flags & SLAB_DEBUG_FLAGS)
		static_branch_enable(&slub_debug_enabled);
#endif

330
	mutex_lock(&slab_mutex);
331

332
	err = kmem_cache_sanity_check(name, size);
A
Andrew Morton 已提交
333
	if (err) {
334
		goto out_unlock;
A
Andrew Morton 已提交
335
	}
336

337 338 339 340 341 342
	/* Refuse requests with allocator specific flags */
	if (flags & ~SLAB_FLAGS_PERMITTED) {
		err = -EINVAL;
		goto out_unlock;
	}

343 344 345 346 347 348 349
	/*
	 * Some allocators will constraint the set of valid flags to a subset
	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
	 * case, and we'll just provide them with a sanitized version of the
	 * passed flags.
	 */
	flags &= CACHE_CREATE_MASK;
350

351 352 353 354 355 356 357
	/* Fail closed on bad usersize of useroffset values. */
	if (WARN_ON(!usersize && useroffset) ||
	    WARN_ON(size < usersize || size - usersize < useroffset))
		usersize = useroffset = 0;

	if (!usersize)
		s = __kmem_cache_alias(name, size, align, flags, ctor);
358
	if (s)
359
		goto out_unlock;
360

361
	cache_name = kstrdup_const(name, GFP_KERNEL);
362 363 364 365
	if (!cache_name) {
		err = -ENOMEM;
		goto out_unlock;
	}
366

367
	s = create_cache(cache_name, size,
368
			 calculate_alignment(flags, align, size),
369
			 flags, useroffset, usersize, ctor, NULL);
370 371
	if (IS_ERR(s)) {
		err = PTR_ERR(s);
372
		kfree_const(cache_name);
373
	}
374 375

out_unlock:
376
	mutex_unlock(&slab_mutex);
377

378
	if (err) {
379
		if (flags & SLAB_PANIC)
380 381
			panic("%s: Failed to create slab '%s'. Error %d\n",
				__func__, name, err);
382
		else {
383 384
			pr_warn("%s(%s) failed with error %d\n",
				__func__, name, err);
385 386 387 388
			dump_stack();
		}
		return NULL;
	}
389 390
	return s;
}
391 392
EXPORT_SYMBOL(kmem_cache_create_usercopy);

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
/**
 * kmem_cache_create - Create a cache.
 * @name: A string which is used in /proc/slabinfo to identify this cache.
 * @size: The size of objects to be created in this cache.
 * @align: The required alignment for the objects.
 * @flags: SLAB flags
 * @ctor: A constructor for the objects.
 *
 * Cannot be called within a interrupt, but can be interrupted.
 * The @ctor is run when new pages are allocated by the cache.
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 *
 * Return: a pointer to the cache on success, NULL on failure.
 */
418
struct kmem_cache *
419
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
420 421
		slab_flags_t flags, void (*ctor)(void *))
{
422
	return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
423 424
					  ctor);
}
425
EXPORT_SYMBOL(kmem_cache_create);
426

427
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
428
{
429 430
	LIST_HEAD(to_destroy);
	struct kmem_cache *s, *s2;
431

432
	/*
433
	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
434
	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
435
	 * through RCU and the associated kmem_cache are dereferenced
436 437 438 439 440 441 442 443
	 * while freeing the pages, so the kmem_caches should be freed only
	 * after the pending RCU operations are finished.  As rcu_barrier()
	 * is a pretty slow operation, we batch all pending destructions
	 * asynchronously.
	 */
	mutex_lock(&slab_mutex);
	list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
	mutex_unlock(&slab_mutex);
444

445 446 447 448 449 450
	if (list_empty(&to_destroy))
		return;

	rcu_barrier();

	list_for_each_entry_safe(s, s2, &to_destroy, list) {
451
		debugfs_slab_release(s);
452
		kfence_shutdown_cache(s);
453 454 455 456 457 458
#ifdef SLAB_SUPPORTS_SYSFS
		sysfs_slab_release(s);
#else
		slab_kmem_cache_release(s);
#endif
	}
459 460
}

461
static int shutdown_cache(struct kmem_cache *s)
462
{
463 464 465
	/* free asan quarantined objects */
	kasan_cache_shutdown(s);

466 467
	if (__kmem_cache_shutdown(s) != 0)
		return -EBUSY;
468

469
	list_del(&s->list);
470

471
	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
472 473 474
#ifdef SLAB_SUPPORTS_SYSFS
		sysfs_slab_unlink(s);
#endif
475 476 477
		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
		schedule_work(&slab_caches_to_rcu_destroy_work);
	} else {
478
		kfence_shutdown_cache(s);
479
		debugfs_slab_release(s);
480
#ifdef SLAB_SUPPORTS_SYSFS
481
		sysfs_slab_unlink(s);
482
		sysfs_slab_release(s);
483 484 485 486
#else
		slab_kmem_cache_release(s);
#endif
	}
487 488

	return 0;
489 490
}

491 492
void slab_kmem_cache_release(struct kmem_cache *s)
{
493
	__kmem_cache_release(s);
494
	kfree_const(s->name);
495 496 497
	kmem_cache_free(kmem_cache, s);
}

498 499
void kmem_cache_destroy(struct kmem_cache *s)
{
500
	int err;
501

502 503 504
	if (unlikely(!s))
		return;

505
	mutex_lock(&slab_mutex);
506

507
	s->refcount--;
508 509 510
	if (s->refcount)
		goto out_unlock;

511
	err = shutdown_cache(s);
512
	if (err) {
513 514
		pr_err("%s %s: Slab cache still has objects\n",
		       __func__, s->name);
515 516
		dump_stack();
	}
517 518
out_unlock:
	mutex_unlock(&slab_mutex);
519 520 521
}
EXPORT_SYMBOL(kmem_cache_destroy);

522 523 524 525 526 527
/**
 * kmem_cache_shrink - Shrink a cache.
 * @cachep: The cache to shrink.
 *
 * Releases as many slabs as possible for a cache.
 * To help debugging, a zero exit status indicates all slabs were released.
528 529
 *
 * Return: %0 if all slabs were released, non-zero otherwise
530 531 532 533 534
 */
int kmem_cache_shrink(struct kmem_cache *cachep)
{
	int ret;

535

536
	kasan_cache_shrink(cachep);
537
	ret = __kmem_cache_shrink(cachep);
538

539 540 541 542
	return ret;
}
EXPORT_SYMBOL(kmem_cache_shrink);

543
bool slab_is_available(void)
544 545 546
{
	return slab_state >= UP;
}
547

548
#ifdef CONFIG_PRINTK
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/**
 * kmem_valid_obj - does the pointer reference a valid slab object?
 * @object: pointer to query.
 *
 * Return: %true if the pointer is to a not-yet-freed object from
 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
 * is to an already-freed object, and %false otherwise.
 */
bool kmem_valid_obj(void *object)
{
	struct page *page;

	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
		return false;
	page = virt_to_head_page(object);
	return PageSlab(page);
}
567
EXPORT_SYMBOL_GPL(kmem_valid_obj);
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623

/**
 * kmem_dump_obj - Print available slab provenance information
 * @object: slab object for which to find provenance information.
 *
 * This function uses pr_cont(), so that the caller is expected to have
 * printed out whatever preamble is appropriate.  The provenance information
 * depends on the type of object and on how much debugging is enabled.
 * For a slab-cache object, the fact that it is a slab object is printed,
 * and, if available, the slab name, return address, and stack trace from
 * the allocation of that object.
 *
 * This function will splat if passed a pointer to a non-slab object.
 * If you are not sure what type of object you have, you should instead
 * use mem_dump_obj().
 */
void kmem_dump_obj(void *object)
{
	char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
	int i;
	struct page *page;
	unsigned long ptroffset;
	struct kmem_obj_info kp = { };

	if (WARN_ON_ONCE(!virt_addr_valid(object)))
		return;
	page = virt_to_head_page(object);
	if (WARN_ON_ONCE(!PageSlab(page))) {
		pr_cont(" non-slab memory.\n");
		return;
	}
	kmem_obj_info(&kp, object, page);
	if (kp.kp_slab_cache)
		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
	else
		pr_cont(" slab%s", cp);
	if (kp.kp_objp)
		pr_cont(" start %px", kp.kp_objp);
	if (kp.kp_data_offset)
		pr_cont(" data offset %lu", kp.kp_data_offset);
	if (kp.kp_objp) {
		ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
		pr_cont(" pointer offset %lu", ptroffset);
	}
	if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
		pr_cont(" size %u", kp.kp_slab_cache->usersize);
	if (kp.kp_ret)
		pr_cont(" allocated at %pS\n", kp.kp_ret);
	else
		pr_cont("\n");
	for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
		if (!kp.kp_stack[i])
			break;
		pr_info("    %pS\n", kp.kp_stack[i]);
	}
}
624
EXPORT_SYMBOL_GPL(kmem_dump_obj);
625
#endif
626

627 628
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
629 630 631
void __init create_boot_cache(struct kmem_cache *s, const char *name,
		unsigned int size, slab_flags_t flags,
		unsigned int useroffset, unsigned int usersize)
632 633
{
	int err;
634
	unsigned int align = ARCH_KMALLOC_MINALIGN;
635 636 637

	s->name = name;
	s->size = s->object_size = size;
638 639 640 641 642 643 644 645 646

	/*
	 * For power of two sizes, guarantee natural alignment for kmalloc
	 * caches, regardless of SL*B debugging options.
	 */
	if (is_power_of_2(size))
		align = max(align, size);
	s->align = calculate_alignment(flags, align, size);

647 648
	s->useroffset = useroffset;
	s->usersize = usersize;
649

650 651 652
	err = __kmem_cache_create(s, flags);

	if (err)
653
		panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
654 655 656 657 658
					name, size, err);

	s->refcount = -1;	/* Exempt from merging for now */
}

659 660 661
struct kmem_cache *__init create_kmalloc_cache(const char *name,
		unsigned int size, slab_flags_t flags,
		unsigned int useroffset, unsigned int usersize)
662 663 664 665 666 667
{
	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);

	if (!s)
		panic("Out of memory when creating slab %s\n", name);

668
	create_boot_cache(s, name, size, flags, useroffset, usersize);
669
	kasan_cache_create_kmalloc(s);
670 671 672 673 674
	list_add(&s->list, &slab_caches);
	s->refcount = 1;
	return s;
}

675
struct kmem_cache *
676 677
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
678 679
EXPORT_SYMBOL(kmalloc_caches);

680 681 682 683 684 685
/*
 * Conversion table for small slabs sizes / 8 to the index in the
 * kmalloc array. This is necessary for slabs < 192 since we have non power
 * of two cache sizes there. The size of larger slabs can be determined using
 * fls.
 */
686
static u8 size_index[24] __ro_after_init = {
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
	3,	/* 8 */
	4,	/* 16 */
	5,	/* 24 */
	5,	/* 32 */
	6,	/* 40 */
	6,	/* 48 */
	6,	/* 56 */
	6,	/* 64 */
	1,	/* 72 */
	1,	/* 80 */
	1,	/* 88 */
	1,	/* 96 */
	7,	/* 104 */
	7,	/* 112 */
	7,	/* 120 */
	7,	/* 128 */
	2,	/* 136 */
	2,	/* 144 */
	2,	/* 152 */
	2,	/* 160 */
	2,	/* 168 */
	2,	/* 176 */
	2,	/* 184 */
	2	/* 192 */
};

713
static inline unsigned int size_index_elem(unsigned int bytes)
714 715 716 717 718 719 720 721 722 723
{
	return (bytes - 1) / 8;
}

/*
 * Find the kmem_cache structure that serves a given size of
 * allocation
 */
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{
724
	unsigned int index;
725 726 727 728 729 730

	if (size <= 192) {
		if (!size)
			return ZERO_SIZE_PTR;

		index = size_index[size_index_elem(size)];
731
	} else {
732
		if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
733
			return NULL;
734
		index = fls(size - 1);
735
	}
736

737
	return kmalloc_caches[kmalloc_type(flags)][index];
738 739
}

740
#ifdef CONFIG_ZONE_DMA
741 742 743 744 745 746 747
#define KMALLOC_DMA_NAME(sz)	.name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
#else
#define KMALLOC_DMA_NAME(sz)
#endif

#ifdef CONFIG_MEMCG_KMEM
#define KMALLOC_CGROUP_NAME(sz)	.name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
748
#else
749 750 751
#define KMALLOC_CGROUP_NAME(sz)
#endif

752 753 754 755
#define INIT_KMALLOC_INFO(__size, __short_size)			\
{								\
	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,	\
756 757
	KMALLOC_CGROUP_NAME(__short_size)			\
	KMALLOC_DMA_NAME(__short_size)				\
758 759 760
	.size = __size,						\
}

761 762
/*
 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
763 764
 * kmalloc_index() supports up to 2^25=32MB, so the final entry of the table is
 * kmalloc-32M.
765
 */
766
const struct kmalloc_info_struct kmalloc_info[] __initconst = {
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
	INIT_KMALLOC_INFO(0, 0),
	INIT_KMALLOC_INFO(96, 96),
	INIT_KMALLOC_INFO(192, 192),
	INIT_KMALLOC_INFO(8, 8),
	INIT_KMALLOC_INFO(16, 16),
	INIT_KMALLOC_INFO(32, 32),
	INIT_KMALLOC_INFO(64, 64),
	INIT_KMALLOC_INFO(128, 128),
	INIT_KMALLOC_INFO(256, 256),
	INIT_KMALLOC_INFO(512, 512),
	INIT_KMALLOC_INFO(1024, 1k),
	INIT_KMALLOC_INFO(2048, 2k),
	INIT_KMALLOC_INFO(4096, 4k),
	INIT_KMALLOC_INFO(8192, 8k),
	INIT_KMALLOC_INFO(16384, 16k),
	INIT_KMALLOC_INFO(32768, 32k),
	INIT_KMALLOC_INFO(65536, 64k),
	INIT_KMALLOC_INFO(131072, 128k),
	INIT_KMALLOC_INFO(262144, 256k),
	INIT_KMALLOC_INFO(524288, 512k),
	INIT_KMALLOC_INFO(1048576, 1M),
	INIT_KMALLOC_INFO(2097152, 2M),
	INIT_KMALLOC_INFO(4194304, 4M),
	INIT_KMALLOC_INFO(8388608, 8M),
	INIT_KMALLOC_INFO(16777216, 16M),
792
	INIT_KMALLOC_INFO(33554432, 32M)
793 794
};

795
/*
796 797 798 799 800 801 802 803 804
 * Patch up the size_index table if we have strange large alignment
 * requirements for the kmalloc array. This is only the case for
 * MIPS it seems. The standard arches will not generate any code here.
 *
 * Largest permitted alignment is 256 bytes due to the way we
 * handle the index determination for the smaller caches.
 *
 * Make sure that nothing crazy happens if someone starts tinkering
 * around with ARCH_KMALLOC_MINALIGN
805
 */
806
void __init setup_kmalloc_cache_index_table(void)
807
{
808
	unsigned int i;
809

810 811 812 813
	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));

	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
814
		unsigned int elem = size_index_elem(i);
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839

		if (elem >= ARRAY_SIZE(size_index))
			break;
		size_index[elem] = KMALLOC_SHIFT_LOW;
	}

	if (KMALLOC_MIN_SIZE >= 64) {
		/*
		 * The 96 byte size cache is not used if the alignment
		 * is 64 byte.
		 */
		for (i = 64 + 8; i <= 96; i += 8)
			size_index[size_index_elem(i)] = 7;

	}

	if (KMALLOC_MIN_SIZE >= 128) {
		/*
		 * The 192 byte sized cache is not used if the alignment
		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
		 * instead.
		 */
		for (i = 128 + 8; i <= 192; i += 8)
			size_index[size_index_elem(i)] = 8;
	}
840 841
}

842
static void __init
843
new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
844
{
845
	if (type == KMALLOC_RECLAIM) {
846
		flags |= SLAB_RECLAIM_ACCOUNT;
847 848 849 850 851 852 853
	} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
		if (cgroup_memory_nokmem) {
			kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
			return;
		}
		flags |= SLAB_ACCOUNT;
	}
854

855 856
	kmalloc_caches[type][idx] = create_kmalloc_cache(
					kmalloc_info[idx].name[type],
857 858
					kmalloc_info[idx].size, flags, 0,
					kmalloc_info[idx].size);
859 860 861 862 863 864 865

	/*
	 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
	 * KMALLOC_NORMAL caches.
	 */
	if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
		kmalloc_caches[type][idx]->refcount = -1;
866 867
}

868 869 870 871 872
/*
 * Create the kmalloc array. Some of the regular kmalloc arrays
 * may already have been created because they were needed to
 * enable allocations for slab creation.
 */
873
void __init create_kmalloc_caches(slab_flags_t flags)
874
{
875 876
	int i;
	enum kmalloc_cache_type type;
877

878 879 880
	/*
	 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
	 */
881 882 883 884
	for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
		for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
			if (!kmalloc_caches[type][i])
				new_kmalloc_cache(i, type, flags);
885

886 887 888 889 890 891 892 893 894 895 896 897
			/*
			 * Caches that are not of the two-to-the-power-of size.
			 * These have to be created immediately after the
			 * earlier power of two caches
			 */
			if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
					!kmalloc_caches[type][1])
				new_kmalloc_cache(1, type, flags);
			if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
					!kmalloc_caches[type][2])
				new_kmalloc_cache(2, type, flags);
		}
898 899
	}

900 901 902 903 904
	/* Kmalloc array is now usable */
	slab_state = UP;

#ifdef CONFIG_ZONE_DMA
	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
905
		struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
906 907

		if (s) {
908
			kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
909
				kmalloc_info[i].name[KMALLOC_DMA],
910
				kmalloc_info[i].size,
911 912
				SLAB_CACHE_DMA | flags, 0,
				kmalloc_info[i].size);
913 914 915 916
		}
	}
#endif
}
917 918
#endif /* !CONFIG_SLOB */

919 920 921 922 923 924 925 926 927 928 929 930
gfp_t kmalloc_fix_flags(gfp_t flags)
{
	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;

	flags &= ~GFP_SLAB_BUG_MASK;
	pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
			invalid_mask, &invalid_mask, flags, &flags);
	dump_stack();

	return flags;
}

V
Vladimir Davydov 已提交
931 932 933 934 935
/*
 * To avoid unnecessary overhead, we pass through large allocation requests
 * directly to the page allocator. We use __GFP_COMP, because we will need to
 * know the allocation order to free the pages properly in kfree.
 */
V
Vladimir Davydov 已提交
936 937
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
938
	void *ret = NULL;
V
Vladimir Davydov 已提交
939 940
	struct page *page;

941 942 943
	if (unlikely(flags & GFP_SLAB_BUG_MASK))
		flags = kmalloc_fix_flags(flags);

V
Vladimir Davydov 已提交
944
	flags |= __GFP_COMP;
945
	page = alloc_pages(flags, order);
946 947
	if (likely(page)) {
		ret = page_address(page);
948 949
		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
				      PAGE_SIZE << order);
950
	}
951
	ret = kasan_kmalloc_large(ret, size, flags);
952
	/* As ret might get tagged, call kmemleak hook after KASAN. */
953
	kmemleak_alloc(ret, size, 1, flags);
V
Vladimir Davydov 已提交
954 955 956 957
	return ret;
}
EXPORT_SYMBOL(kmalloc_order);

958 959 960 961 962 963 964 965 966
#ifdef CONFIG_TRACING
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	void *ret = kmalloc_order(size, flags, order);
	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_order_trace);
#endif
967

968 969 970
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Randomize a generic freelist */
static void freelist_randomize(struct rnd_state *state, unsigned int *list,
971
			       unsigned int count)
972 973
{
	unsigned int rand;
974
	unsigned int i;
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014

	for (i = 0; i < count; i++)
		list[i] = i;

	/* Fisher-Yates shuffle */
	for (i = count - 1; i > 0; i--) {
		rand = prandom_u32_state(state);
		rand %= (i + 1);
		swap(list[i], list[rand]);
	}
}

/* Create a random sequence per cache */
int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
				    gfp_t gfp)
{
	struct rnd_state state;

	if (count < 2 || cachep->random_seq)
		return 0;

	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
	if (!cachep->random_seq)
		return -ENOMEM;

	/* Get best entropy at this stage of boot */
	prandom_seed_state(&state, get_random_long());

	freelist_randomize(&state, cachep->random_seq, count);
	return 0;
}

/* Destroy the per-cache random freelist sequence */
void cache_random_seq_destroy(struct kmem_cache *cachep)
{
	kfree(cachep->random_seq);
	cachep->random_seq = NULL;
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

Y
Yang Shi 已提交
1015
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1016
#ifdef CONFIG_SLAB
1017
#define SLABINFO_RIGHTS (0600)
1018
#else
1019
#define SLABINFO_RIGHTS (0400)
1020 1021
#endif

1022
static void print_slabinfo_header(struct seq_file *m)
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
{
	/*
	 * Output format version, so at least we can change it
	 * without _too_ many complaints.
	 */
#ifdef CONFIG_DEBUG_SLAB
	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
#else
	seq_puts(m, "slabinfo - version: 2.1\n");
#endif
J
Joe Perches 已提交
1033
	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1034 1035 1036
	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#ifdef CONFIG_DEBUG_SLAB
J
Joe Perches 已提交
1037
	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1038 1039 1040 1041 1042
	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
	seq_putc(m, '\n');
}

1043
void *slab_start(struct seq_file *m, loff_t *pos)
1044 1045
{
	mutex_lock(&slab_mutex);
1046
	return seq_list_start(&slab_caches, *pos);
1047 1048
}

1049
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1050
{
1051
	return seq_list_next(p, &slab_caches, pos);
1052 1053
}

1054
void slab_stop(struct seq_file *m, void *p)
1055 1056 1057 1058
{
	mutex_unlock(&slab_mutex);
}

1059
static void cache_show(struct kmem_cache *s, struct seq_file *m)
1060
{
1061 1062 1063 1064 1065 1066
	struct slabinfo sinfo;

	memset(&sinfo, 0, sizeof(sinfo));
	get_slabinfo(s, &sinfo);

	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1067
		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
1068 1069 1070 1071 1072 1073 1074 1075
		   sinfo.objects_per_slab, (1 << sinfo.cache_order));

	seq_printf(m, " : tunables %4u %4u %4u",
		   sinfo.limit, sinfo.batchcount, sinfo.shared);
	seq_printf(m, " : slabdata %6lu %6lu %6lu",
		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
	slabinfo_show_stats(m, s);
	seq_putc(m, '\n');
1076 1077
}

1078
static int slab_show(struct seq_file *m, void *p)
1079
{
1080
	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1081

1082
	if (p == slab_caches.next)
1083
		print_slabinfo_header(m);
1084
	cache_show(s, m);
1085 1086 1087
	return 0;
}

1088 1089
void dump_unreclaimable_slab(void)
{
1090
	struct kmem_cache *s;
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	struct slabinfo sinfo;

	/*
	 * Here acquiring slab_mutex is risky since we don't prefer to get
	 * sleep in oom path. But, without mutex hold, it may introduce a
	 * risk of crash.
	 * Use mutex_trylock to protect the list traverse, dump nothing
	 * without acquiring the mutex.
	 */
	if (!mutex_trylock(&slab_mutex)) {
		pr_warn("excessive unreclaimable slab but cannot dump stats\n");
		return;
	}

	pr_info("Unreclaimable slab info:\n");
	pr_info("Name                      Used          Total\n");

1108
	list_for_each_entry(s, &slab_caches, list) {
1109
		if (s->flags & SLAB_RECLAIM_ACCOUNT)
1110 1111 1112 1113 1114
			continue;

		get_slabinfo(s, &sinfo);

		if (sinfo.num_objs > 0)
1115
			pr_info("%-17s %10luKB %10luKB\n", s->name,
1116 1117 1118 1119 1120 1121
				(sinfo.active_objs * s->size) / 1024,
				(sinfo.num_objs * s->size) / 1024);
	}
	mutex_unlock(&slab_mutex);
}

1122
#if defined(CONFIG_MEMCG_KMEM)
1123 1124
int memcg_slab_show(struct seq_file *m, void *p)
{
1125 1126 1127 1128
	/*
	 * Deprecated.
	 * Please, take a look at tools/cgroup/slabinfo.py .
	 */
1129
	return 0;
1130
}
1131
#endif
1132

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
/*
 * slabinfo_op - iterator that generates /proc/slabinfo
 *
 * Output layout:
 * cache-name
 * num-active-objs
 * total-objs
 * object size
 * num-active-slabs
 * total-slabs
 * num-pages-per-slab
 * + further values on SMP and with statistics enabled
 */
static const struct seq_operations slabinfo_op = {
1147
	.start = slab_start,
1148 1149
	.next = slab_next,
	.stop = slab_stop,
1150
	.show = slab_show,
1151 1152 1153 1154 1155 1156 1157
};

static int slabinfo_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &slabinfo_op);
}

1158
static const struct proc_ops slabinfo_proc_ops = {
1159
	.proc_flags	= PROC_ENTRY_PERMANENT,
1160 1161 1162 1163 1164
	.proc_open	= slabinfo_open,
	.proc_read	= seq_read,
	.proc_write	= slabinfo_write,
	.proc_lseek	= seq_lseek,
	.proc_release	= seq_release,
1165 1166 1167 1168
};

static int __init slab_proc_init(void)
{
1169
	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1170 1171 1172
	return 0;
}
module_init(slab_proc_init);
1173

Y
Yang Shi 已提交
1174
#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1175 1176 1177 1178 1179

static __always_inline void *__do_krealloc(const void *p, size_t new_size,
					   gfp_t flags)
{
	void *ret;
1180
	size_t ks;
1181

1182 1183 1184 1185 1186 1187 1188
	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
	if (likely(!ZERO_OR_NULL_PTR(p))) {
		if (!kasan_check_byte(p))
			return NULL;
		ks = kfence_ksize(p) ?: __ksize(p);
	} else
		ks = 0;
1189

1190
	/* If the object still fits, repoison it precisely. */
1191
	if (ks >= new_size) {
1192
		p = kasan_krealloc((void *)p, new_size, flags);
1193
		return (void *)p;
1194
	}
1195 1196

	ret = kmalloc_track_caller(new_size, flags);
1197 1198 1199 1200 1201 1202
	if (ret && p) {
		/* Disable KASAN checks as the object's redzone is accessed. */
		kasan_disable_current();
		memcpy(ret, kasan_reset_tag(p), ks);
		kasan_enable_current();
	}
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213

	return ret;
}

/**
 * krealloc - reallocate memory. The contents will remain unchanged.
 * @p: object to reallocate memory for.
 * @new_size: how many bytes of memory are required.
 * @flags: the type of memory to allocate.
 *
 * The contents of the object pointed to are preserved up to the
1214 1215 1216
 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
 * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1217 1218
 *
 * Return: pointer to the allocated memory or %NULL in case of error
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
 */
void *krealloc(const void *p, size_t new_size, gfp_t flags)
{
	void *ret;

	if (unlikely(!new_size)) {
		kfree(p);
		return ZERO_SIZE_PTR;
	}

	ret = __do_krealloc(p, new_size, flags);
1230
	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1231 1232 1233 1234 1235 1236 1237
		kfree(p);

	return ret;
}
EXPORT_SYMBOL(krealloc);

/**
1238
 * kfree_sensitive - Clear sensitive information in memory before freeing
1239 1240 1241
 * @p: object to free memory of
 *
 * The memory of the object @p points to is zeroed before freed.
1242
 * If @p is %NULL, kfree_sensitive() does nothing.
1243 1244 1245 1246 1247
 *
 * Note: this function zeroes the whole allocated buffer which can be a good
 * deal bigger than the requested buffer size passed to kmalloc(). So be
 * careful when using this function in performance sensitive code.
 */
1248
void kfree_sensitive(const void *p)
1249 1250 1251 1252 1253
{
	size_t ks;
	void *mem = (void *)p;

	ks = ksize(mem);
1254 1255
	if (ks)
		memzero_explicit(mem, ks);
1256 1257
	kfree(mem);
}
1258
EXPORT_SYMBOL(kfree_sensitive);
1259

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
/**
 * ksize - get the actual amount of memory allocated for a given object
 * @objp: Pointer to the object
 *
 * kmalloc may internally round up allocations and return more memory
 * than requested. ksize() can be used to determine the actual amount of
 * memory allocated. The caller may use this additional memory, even though
 * a smaller amount of memory was initially specified with the kmalloc call.
 * The caller must guarantee that objp points to a valid object previously
 * allocated with either kmalloc() or kmem_cache_alloc(). The object
 * must not be freed during the duration of the call.
 *
 * Return: size of the actual memory used by @objp in bytes
 */
size_t ksize(const void *objp)
{
1276 1277 1278
	size_t size;

	/*
1279 1280 1281 1282
	 * We need to first check that the pointer to the object is valid, and
	 * only then unpoison the memory. The report printed from ksize() is
	 * more useful, then when it's printed later when the behaviour could
	 * be undefined due to a potential use-after-free or double-free.
1283
	 *
1284 1285 1286 1287
	 * We use kasan_check_byte(), which is supported for the hardware
	 * tag-based KASAN mode, unlike kasan_check_read/write().
	 *
	 * If the pointed to memory is invalid, we return 0 to avoid users of
1288 1289 1290 1291 1292
	 * ksize() writing to and potentially corrupting the memory region.
	 *
	 * We want to perform the check before __ksize(), to avoid potentially
	 * crashing in __ksize() due to accessing invalid metadata.
	 */
1293
	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
1294 1295
		return 0;

1296
	size = kfence_ksize(objp) ?: __ksize(objp);
1297 1298 1299 1300
	/*
	 * We assume that ksize callers could use whole allocated area,
	 * so we need to unpoison this area.
	 */
1301
	kasan_unpoison_range(objp, size);
1302 1303 1304 1305
	return size;
}
EXPORT_SYMBOL(ksize);

1306 1307 1308 1309 1310 1311 1312
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1313 1314 1315 1316 1317 1318 1319 1320

int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
	if (__should_failslab(s, gfpflags))
		return -ENOMEM;
	return 0;
}
ALLOW_ERROR_INJECTION(should_failslab, ERRNO);