slab.h 22.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2
/*
3 4
 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
 *
C
Christoph Lameter 已提交
5
 * (C) SGI 2006, Christoph Lameter
6 7
 * 	Cleaned up and restructured to ease the addition of alternative
 * 	implementations of SLAB allocators.
8 9
 * (C) Linux Foundation 2008-2013
 *      Unified interface for all slab allocators
L
Linus Torvalds 已提交
10 11 12 13 14
 */

#ifndef _LINUX_SLAB_H
#define	_LINUX_SLAB_H

15
#include <linux/gfp.h>
16
#include <linux/overflow.h>
17
#include <linux/types.h>
G
Glauber Costa 已提交
18
#include <linux/workqueue.h>
19
#include <linux/percpu-refcount.h>
G
Glauber Costa 已提交
20

L
Linus Torvalds 已提交
21

22 23
/*
 * Flags to pass to kmem_cache_create().
24
 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
L
Linus Torvalds 已提交
25
 */
26
/* DEBUG: Perform (expensive) checks on alloc/free */
27
#define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100U)
28
/* DEBUG: Red zone objs in a cache */
29
#define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400U)
30
/* DEBUG: Poison objects */
31
#define SLAB_POISON		((slab_flags_t __force)0x00000800U)
32
/* Align objs on cache lines */
33
#define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
34
/* Use GFP_DMA memory */
35
#define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
36 37
/* Use GFP_DMA32 memory */
#define SLAB_CACHE_DMA32	((slab_flags_t __force)0x00008000U)
38
/* DEBUG: Store the last owner for bug hunting */
39
#define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
40
/* Panic if kmem_cache_create() fails */
41
#define SLAB_PANIC		((slab_flags_t __force)0x00040000U)
42
/*
43
 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
 *
 * This delays freeing the SLAB page by a grace period, it does _NOT_
 * delay object freeing. This means that if you do kmem_cache_free()
 * that memory location is free to be reused at any time. Thus it may
 * be possible to see another object there in the same RCU grace period.
 *
 * This feature only ensures the memory location backing the object
 * stays valid, the trick to using this is relying on an independent
 * object validation pass. Something like:
 *
 *  rcu_read_lock()
 * again:
 *  obj = lockless_lookup(key);
 *  if (obj) {
 *    if (!try_get_ref(obj)) // might fail for free objects
 *      goto again;
 *
 *    if (obj->key != key) { // not the object we expected
 *      put_ref(obj);
 *      goto again;
 *    }
 *  }
 *  rcu_read_unlock();
 *
68 69 70 71 72 73 74 75
 * This is useful if we need to approach a kernel structure obliquely,
 * from its address obtained without the usual locking. We can lock
 * the structure to stabilize it and check it's still at the given address,
 * only if we can be sure that the memory has not been meanwhile reused
 * for some other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
76 77
 *
 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
78
 */
79
/* Defer freeing slabs to RCU */
80
#define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000U)
81
/* Spread some memory over cpuset */
82
#define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000U)
83
/* Trace allocations and frees */
84
#define SLAB_TRACE		((slab_flags_t __force)0x00200000U)
L
Linus Torvalds 已提交
85

86 87
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
88
# define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000U)
89
#else
90
# define SLAB_DEBUG_OBJECTS	0
91 92
#endif

93
/* Avoid kmemleak tracing */
94
#define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000U)
95

96
/* Fault injection mark */
97
#ifdef CONFIG_FAILSLAB
98
# define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000U)
99
#else
100
# define SLAB_FAILSLAB		0
101
#endif
102
/* Account to memcg */
103
#ifdef CONFIG_MEMCG_KMEM
104
# define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000U)
V
Vladimir Davydov 已提交
105
#else
106
# define SLAB_ACCOUNT		0
V
Vladimir Davydov 已提交
107
#endif
V
Vegard Nossum 已提交
108

A
Alexander Potapenko 已提交
109
#ifdef CONFIG_KASAN
110
#define SLAB_KASAN		((slab_flags_t __force)0x08000000U)
A
Alexander Potapenko 已提交
111
#else
112
#define SLAB_KASAN		0
A
Alexander Potapenko 已提交
113 114
#endif

115
/* The following flags affect the page allocator grouping pages by mobility */
116
/* Objects are reclaimable */
117
#define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
118
#define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
119 120 121 122 123 124 125 126 127 128
/*
 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
 *
 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
 *
 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
 * Both make kfree a no-op.
 */
#define ZERO_SIZE_PTR ((void *)16)

129
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
130 131
				(unsigned long)ZERO_SIZE_PTR)

132
#include <linux/kasan.h>
133

134
struct mem_cgroup;
135 136 137 138
/*
 * struct kmem_cache related prototypes
 */
void __init kmem_cache_init(void);
139
bool slab_is_available(void);
L
Linus Torvalds 已提交
140

141 142
extern bool usercopy_fallback;

143 144
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
			unsigned int align, slab_flags_t flags,
145 146
			void (*ctor)(void *));
struct kmem_cache *kmem_cache_create_usercopy(const char *name,
147 148
			unsigned int size, unsigned int align,
			slab_flags_t flags,
149
			unsigned int useroffset, unsigned int usersize,
150
			void (*ctor)(void *));
151 152
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
153 154 155

void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
void memcg_deactivate_kmem_caches(struct mem_cgroup *);
156

157 158 159 160 161 162 163 164
/*
 * Please use this macro to create slab caches. Simply specify the
 * name of the structure and maybe some flags that are listed above.
 *
 * The alignment of the struct determines object alignment. If you
 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 * then the objects will be properly aligned in SMP configurations.
 */
165 166 167 168 169 170 171 172 173 174 175 176 177 178
#define KMEM_CACHE(__struct, __flags)					\
		kmem_cache_create(#__struct, sizeof(struct __struct),	\
			__alignof__(struct __struct), (__flags), NULL)

/*
 * To whitelist a single field for copying to/from usercopy, use this
 * macro instead for KMEM_CACHE() above.
 */
#define KMEM_CACHE_USERCOPY(__struct, __flags, __field)			\
		kmem_cache_create_usercopy(#__struct,			\
			sizeof(struct __struct),			\
			__alignof__(struct __struct), (__flags),	\
			offsetof(struct __struct, __field),		\
			sizeof_field(struct __struct, __field), NULL)
179

180 181 182 183 184 185 186
/*
 * Common kmalloc functions provided by all allocators
 */
void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
187
size_t __ksize(const void *);
188 189
size_t ksize(const void *);

K
Kees Cook 已提交
190
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
191 192
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
			bool to_user);
K
Kees Cook 已提交
193
#else
194 195
static inline void __check_heap_object(const void *ptr, unsigned long n,
				       struct page *page, bool to_user) { }
K
Kees Cook 已提交
196 197
#endif

198 199 200 201 202 203 204 205 206 207 208 209 210
/*
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than the alignment of a 64-bit integer.
 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 */
#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
/*
 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 * Intended for arches that get misalignment faults even for 64 bit integer
 * aligned buffers.
 */
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

/*
 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
 * aligned pointers.
 */
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)

229
/*
230 231 232 233 234 235
 * Kmalloc array related definitions
 */

#ifdef CONFIG_SLAB
/*
 * The largest kmalloc size supported by the SLAB allocators is
236 237 238 239 240 241 242
 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 * less than 32 MB.
 *
 * WARNING: Its not easy to increase this value since the allocators have
 * to do various tricks to work around compiler limitations in order to
 * ensure proper constant folding.
 */
243 244
#define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
245
#define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
246
#ifndef KMALLOC_SHIFT_LOW
247
#define KMALLOC_SHIFT_LOW	5
248
#endif
249 250 251
#endif

#ifdef CONFIG_SLUB
252
/*
253 254
 * SLUB directly allocates requests fitting in to an order-1 page
 * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
255 256
 */
#define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
257
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
258
#ifndef KMALLOC_SHIFT_LOW
259 260
#define KMALLOC_SHIFT_LOW	3
#endif
261
#endif
262

263 264
#ifdef CONFIG_SLOB
/*
265
 * SLOB passes all requests larger than one page to the page allocator.
266 267 268 269
 * No kmalloc array is necessary since objects of different sizes can
 * be allocated from the same page.
 */
#define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
270
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
271 272 273 274 275
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW	3
#endif
#endif

276 277 278 279 280 281
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
/* Maximum order allocatable via the slab allocagtor */
#define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
282

283 284 285
/*
 * Kmalloc subsystem.
 */
286
#ifndef KMALLOC_MIN_SIZE
287
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
288 289
#endif

J
Joonsoo Kim 已提交
290 291 292 293 294 295 296 297 298 299 300
/*
 * This restriction comes from byte sized index implementation.
 * Page size is normally 2^12 bytes and, in this case, if we want to use
 * byte sized index which can represent 2^8 entries, the size of the object
 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
 * If minimum size of kmalloc is less than 16, we use it as minimum object
 * size and give up to use byte sized index.
 */
#define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
                               (KMALLOC_MIN_SIZE) : 16)

301 302 303 304
/*
 * Whenever changing this, take care of that kmalloc_type() and
 * create_kmalloc_caches() still work as intended.
 */
305 306
enum kmalloc_cache_type {
	KMALLOC_NORMAL = 0,
307
	KMALLOC_RECLAIM,
308 309 310 311 312 313
#ifdef CONFIG_ZONE_DMA
	KMALLOC_DMA,
#endif
	NR_KMALLOC_TYPES
};

314
#ifndef CONFIG_SLOB
315 316 317 318 319
extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];

static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
{
320
#ifdef CONFIG_ZONE_DMA
321 322 323 324 325 326
	/*
	 * The most common case is KMALLOC_NORMAL, so test for it
	 * with a single branch for both flags.
	 */
	if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
		return KMALLOC_NORMAL;
327 328

	/*
329 330
	 * At least one of the flags has to be set. If both are, __GFP_DMA
	 * is more important.
331
	 */
332 333 334 335
	return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
#else
	return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
#endif
336 337
}

338 339 340 341 342
/*
 * Figure out which kmalloc slab an allocation of a certain size
 * belongs to.
 * 0 = zero alloc
 * 1 =  65 .. 96 bytes
343 344
 * 2 = 129 .. 192 bytes
 * n = 2^(n-1)+1 .. 2^n
345
 */
346
static __always_inline unsigned int kmalloc_index(size_t size)
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
{
	if (!size)
		return 0;

	if (size <= KMALLOC_MIN_SIZE)
		return KMALLOC_SHIFT_LOW;

	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
		return 1;
	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
		return 2;
	if (size <=          8) return 3;
	if (size <=         16) return 4;
	if (size <=         32) return 5;
	if (size <=         64) return 6;
	if (size <=        128) return 7;
	if (size <=        256) return 8;
	if (size <=        512) return 9;
	if (size <=       1024) return 10;
	if (size <=   2 * 1024) return 11;
	if (size <=   4 * 1024) return 12;
	if (size <=   8 * 1024) return 13;
	if (size <=  16 * 1024) return 14;
	if (size <=  32 * 1024) return 15;
	if (size <=  64 * 1024) return 16;
	if (size <= 128 * 1024) return 17;
	if (size <= 256 * 1024) return 18;
	if (size <= 512 * 1024) return 19;
	if (size <= 1024 * 1024) return 20;
	if (size <=  2 * 1024 * 1024) return 21;
	if (size <=  4 * 1024 * 1024) return 22;
	if (size <=  8 * 1024 * 1024) return 23;
	if (size <=  16 * 1024 * 1024) return 24;
	if (size <=  32 * 1024 * 1024) return 25;
	if (size <=  64 * 1024 * 1024) return 26;
	BUG();

	/* Will never be reached. Needed because the compiler may complain */
	return -1;
}
387
#endif /* !CONFIG_SLOB */
388

389 390
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
391
void kmem_cache_free(struct kmem_cache *, void *);
392

393
/*
J
Jesper Dangaard Brouer 已提交
394
 * Bulk allocation and freeing operations. These are accelerated in an
395 396 397 398 399 400
 * allocator specific way to avoid taking locks repeatedly or building
 * metadata structures unnecessarily.
 *
 * Note that interrupts must be enabled when calling these functions.
 */
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
401
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
402

403 404 405 406 407 408 409 410 411
/*
 * Caller must not use kfree_bulk() on memory not originally allocated
 * by kmalloc(), because the SLOB allocator cannot handle this.
 */
static __always_inline void kfree_bulk(size_t size, void **p)
{
	kmem_cache_free_bulk(NULL, size, p);
}

412
#ifdef CONFIG_NUMA
413 414
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
415 416 417 418 419 420 421 422 423 424 425 426 427
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __kmalloc(size, flags);
}

static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
{
	return kmem_cache_alloc(s, flags);
}
#endif

#ifdef CONFIG_TRACING
428
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
429 430 431 432

#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
					   gfp_t gfpflags,
433
					   int node, size_t size) __assume_slab_alignment __malloc;
434 435 436 437 438 439 440 441 442 443 444 445 446 447
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node, size_t size)
{
	return kmem_cache_alloc_trace(s, gfpflags, size);
}
#endif /* CONFIG_NUMA */

#else /* CONFIG_TRACING */
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
		gfp_t flags, size_t size)
{
448 449
	void *ret = kmem_cache_alloc(s, flags);

450
	ret = kasan_kmalloc(s, ret, size, flags);
451
	return ret;
452 453 454 455 456 457 458
}

static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node, size_t size)
{
459 460
	void *ret = kmem_cache_alloc_node(s, gfpflags, node);

461
	ret = kasan_kmalloc(s, ret, size, gfpflags);
462
	return ret;
463 464 465
}
#endif /* CONFIG_TRACING */

466
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
467 468

#ifdef CONFIG_TRACING
469
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
470 471 472 473 474 475
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	return kmalloc_order(size, flags, order);
}
476 477
#endif

478 479 480 481 482 483 484 485 486
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
	unsigned int order = get_order(size);
	return kmalloc_order_trace(size, flags, order);
}

/**
 * kmalloc - allocate memory
 * @size: how many bytes of memory are required.
487
 * @flags: the type of memory to allocate.
488 489 490
 *
 * kmalloc is the normal method of allocating memory
 * for objects smaller than page size in the kernel.
491
 *
492 493 494
 * The @flags argument may be one of the GFP flags defined at
 * include/linux/gfp.h and described at
 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
495
 *
496
 * The recommended usage of the @flags is described at
497
 * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>`
498
 *
499
 * Below is a brief outline of the most useful GFP flags
500
 *
501 502
 * %GFP_KERNEL
 *	Allocate normal kernel ram. May sleep.
503
 *
504 505
 * %GFP_NOWAIT
 *	Allocation will not sleep.
506
 *
507 508
 * %GFP_ATOMIC
 *	Allocation will not sleep.  May use emergency pools.
509
 *
510 511
 * %GFP_HIGHUSER
 *	Allocate memory from high memory on behalf of user.
512 513 514 515
 *
 * Also it is possible to set different flags by OR'ing
 * in one or more of the following additional @flags:
 *
516 517
 * %__GFP_HIGH
 *	This allocation has high priority and may use emergency pools.
518
 *
519 520 521
 * %__GFP_NOFAIL
 *	Indicate that this allocation is in no way allowed to fail
 *	(think twice before using).
522
 *
523 524 525
 * %__GFP_NORETRY
 *	If memory is not immediately available,
 *	then give up at once.
526
 *
527 528
 * %__GFP_NOWARN
 *	If allocation fails, don't issue any warnings.
529
 *
530 531 532
 * %__GFP_RETRY_MAYFAIL
 *	Try really hard to succeed the allocation but fail
 *	eventually.
533 534 535 536
 */
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	if (__builtin_constant_p(size)) {
537 538 539
#ifndef CONFIG_SLOB
		unsigned int index;
#endif
540 541 542
		if (size > KMALLOC_MAX_CACHE_SIZE)
			return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
543
		index = kmalloc_index(size);
544

545 546
		if (!index)
			return ZERO_SIZE_PTR;
547

548 549 550
		return kmem_cache_alloc_trace(
				kmalloc_caches[kmalloc_type(flags)][index],
				flags, size);
551 552 553 554 555
#endif
	}
	return __kmalloc(size, flags);
}

556 557 558 559 560
/*
 * Determine size used for the nth kmalloc cache.
 * return size or 0 if a kmalloc cache for that
 * size does not exist
 */
561
static __always_inline unsigned int kmalloc_size(unsigned int n)
562
{
563
#ifndef CONFIG_SLOB
564
	if (n > 2)
565
		return 1U << n;
566 567 568 569 570 571

	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
		return 96;

	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
		return 192;
572
#endif
573 574 575
	return 0;
}

576 577 578 579
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
	if (__builtin_constant_p(size) &&
580
		size <= KMALLOC_MAX_CACHE_SIZE) {
581
		unsigned int i = kmalloc_index(size);
582 583 584 585

		if (!i)
			return ZERO_SIZE_PTR;

586 587
		return kmem_cache_alloc_node_trace(
				kmalloc_caches[kmalloc_type(flags)][i],
588 589 590 591 592 593
						flags, node, size);
	}
#endif
	return __kmalloc_node(size, flags, node);
}

594 595 596 597 598
struct memcg_cache_array {
	struct rcu_head rcu;
	struct kmem_cache *entries[0];
};

G
Glauber Costa 已提交
599 600 601 602
/*
 * This is the main placeholder for memcg-related information in kmem caches.
 * Both the root cache and the child caches will have it. For the root cache,
 * this will hold a dynamically allocated array large enough to hold
603 604 605
 * information about the currently limited memcgs in the system. To allow the
 * array to be accessed without taking any locks, on relocation we free the old
 * version only after a grace period.
G
Glauber Costa 已提交
606
 *
T
Tejun Heo 已提交
607
 * Root and child caches hold different metadata.
G
Glauber Costa 已提交
608
 *
T
Tejun Heo 已提交
609 610
 * @root_cache:	Common to root and child caches.  NULL for root, pointer to
 *		the root cache for children.
611
 *
T
Tejun Heo 已提交
612 613 614 615 616 617
 * The following fields are specific to root caches.
 *
 * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
 *		used to index child cachces during allocation and cleared
 *		early during shutdown.
 *
618 619
 * @root_caches_node: List node for slab_root_caches list.
 *
T
Tejun Heo 已提交
620 621 622 623 624 625 626 627 628
 * @children:	List of all child caches.  While the child caches are also
 *		reachable through @memcg_caches, a child cache remains on
 *		this list until it is actually destroyed.
 *
 * The following fields are specific to child caches.
 *
 * @memcg:	Pointer to the memcg this cache belongs to.
 *
 * @children_node: List node for @root_cache->children list.
629 630
 *
 * @kmem_caches_node: List node for @memcg->kmem_caches list.
G
Glauber Costa 已提交
631 632
 */
struct memcg_cache_params {
T
Tejun Heo 已提交
633
	struct kmem_cache *root_cache;
G
Glauber Costa 已提交
634
	union {
T
Tejun Heo 已提交
635 636
		struct {
			struct memcg_cache_array __rcu *memcg_caches;
637
			struct list_head __root_caches_node;
T
Tejun Heo 已提交
638
			struct list_head children;
639
			bool dying;
T
Tejun Heo 已提交
640
		};
641 642
		struct {
			struct mem_cgroup *memcg;
T
Tejun Heo 已提交
643
			struct list_head children_node;
644
			struct list_head kmem_caches_node;
645
			struct percpu_ref refcnt;
646

647
			void (*work_fn)(struct kmem_cache *);
648
			union {
649 650
				struct rcu_head rcu_head;
				struct work_struct work;
651
			};
652
		};
G
Glauber Costa 已提交
653 654 655
	};
};

656 657
int memcg_update_all_caches(int num_memcgs);

658 659 660 661 662
/**
 * kmalloc_array - allocate memory for an array.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate (see kmalloc).
663
 */
X
Xi Wang 已提交
664
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
L
Linus Torvalds 已提交
665
{
666 667 668
	size_t bytes;

	if (unlikely(check_mul_overflow(n, size, &bytes)))
P
Paul Mundt 已提交
669
		return NULL;
670
	if (__builtin_constant_p(n) && __builtin_constant_p(size))
671 672
		return kmalloc(bytes, flags);
	return __kmalloc(bytes, flags);
X
Xi Wang 已提交
673 674 675 676 677 678 679 680 681 682 683
}

/**
 * kcalloc - allocate memory for an array. The memory is set to zero.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
	return kmalloc_array(n, size, flags | __GFP_ZERO);
L
Linus Torvalds 已提交
684 685
}

686 687 688 689 690 691 692 693
/*
 * kmalloc_track_caller is a special version of kmalloc that records the
 * calling function of the routine calling it for slab leak tracking instead
 * of just the calling function (confusing, eh?).
 * It's useful when the call to kmalloc comes from a widely-used standard
 * allocator where we care about the real place the memory allocation
 * request comes from.
 */
694
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
695
#define kmalloc_track_caller(size, flags) \
696
	__kmalloc_track_caller(size, flags, _RET_IP_)
L
Linus Torvalds 已提交
697

698 699 700
static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
				       int node)
{
701 702 703
	size_t bytes;

	if (unlikely(check_mul_overflow(n, size, &bytes)))
704 705
		return NULL;
	if (__builtin_constant_p(n) && __builtin_constant_p(size))
706 707
		return kmalloc_node(bytes, flags, node);
	return __kmalloc_node(bytes, flags, node);
708 709 710 711 712 713 714 715
}

static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}


716
#ifdef CONFIG_NUMA
717
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
718 719
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node_track_caller(size, flags, node, \
720
			_RET_IP_)
721

722 723 724 725
#else /* CONFIG_NUMA */

#define kmalloc_node_track_caller(size, flags, node) \
	kmalloc_track_caller(size, flags)
726

P
Pascal Terjan 已提交
727
#endif /* CONFIG_NUMA */
728

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
/*
 * Shortcuts
 */
static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
{
	return kmem_cache_alloc(k, flags | __GFP_ZERO);
}

/**
 * kzalloc - allocate memory. The memory is set to zero.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kzalloc(size_t size, gfp_t flags)
{
	return kmalloc(size, flags | __GFP_ZERO);
}

J
Jeff Layton 已提交
747 748 749 750 751 752 753 754 755 756 757
/**
 * kzalloc_node - allocate zeroed memory from a particular memory node.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 * @node: memory node from which to allocate
 */
static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
{
	return kmalloc_node(size, flags | __GFP_ZERO, node);
}

758
unsigned int kmem_cache_size(struct kmem_cache *s);
759 760
void __init kmem_cache_init_late(void);

761 762 763 764 765 766 767 768
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
int slab_prepare_cpu(unsigned int cpu);
int slab_dead_cpu(unsigned int cpu);
#else
#define slab_prepare_cpu	NULL
#define slab_dead_cpu		NULL
#endif

L
Linus Torvalds 已提交
769
#endif	/* _LINUX_SLAB_H */