slab.h 20.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
 *
C
Christoph Lameter 已提交
4
 * (C) SGI 2006, Christoph Lameter
5 6
 * 	Cleaned up and restructured to ease the addition of alternative
 * 	implementations of SLAB allocators.
7 8
 * (C) Linux Foundation 2008-2013
 *      Unified interface for all slab allocators
L
Linus Torvalds 已提交
9 10 11 12 13
 */

#ifndef _LINUX_SLAB_H
#define	_LINUX_SLAB_H

14 15
#include <linux/gfp.h>
#include <linux/types.h>
G
Glauber Costa 已提交
16 17
#include <linux/workqueue.h>

L
Linus Torvalds 已提交
18

19 20
/*
 * Flags to pass to kmem_cache_create().
21
 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
L
Linus Torvalds 已提交
22
 */
23
#define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
C
Christoph Lameter 已提交
24 25 26
#define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
#define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
27 28 29
#define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
#define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
#define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
/*
 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
 *
 * This delays freeing the SLAB page by a grace period, it does _NOT_
 * delay object freeing. This means that if you do kmem_cache_free()
 * that memory location is free to be reused at any time. Thus it may
 * be possible to see another object there in the same RCU grace period.
 *
 * This feature only ensures the memory location backing the object
 * stays valid, the trick to using this is relying on an independent
 * object validation pass. Something like:
 *
 *  rcu_read_lock()
 * again:
 *  obj = lockless_lookup(key);
 *  if (obj) {
 *    if (!try_get_ref(obj)) // might fail for free objects
 *      goto again;
 *
 *    if (obj->key != key) { // not the object we expected
 *      put_ref(obj);
 *      goto again;
 *    }
 *  }
 *  rcu_read_unlock();
 *
56 57 58 59 60 61 62 63
 * This is useful if we need to approach a kernel structure obliquely,
 * from its address obtained without the usual locking. We can lock
 * the structure to stabilize it and check it's still at the given address,
 * only if we can be sure that the memory has not been meanwhile reused
 * for some other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
64
 */
65
#define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
66
#define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
C
Christoph Lameter 已提交
67
#define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
L
Linus Torvalds 已提交
68

69 70 71 72 73 74 75
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
# define SLAB_DEBUG_OBJECTS	0x00400000UL
#else
# define SLAB_DEBUG_OBJECTS	0x00000000UL
#endif

76 77
#define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */

V
Vegard Nossum 已提交
78 79 80 81 82 83
/* Don't track use of uninitialized memory */
#ifdef CONFIG_KMEMCHECK
# define SLAB_NOTRACK		0x01000000UL
#else
# define SLAB_NOTRACK		0x00000000UL
#endif
84 85 86 87 88
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
#else
# define SLAB_FAILSLAB		0x00000000UL
#endif
89
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
V
Vladimir Davydov 已提交
90 91 92 93
# define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
#else
# define SLAB_ACCOUNT		0x00000000UL
#endif
V
Vegard Nossum 已提交
94

A
Alexander Potapenko 已提交
95 96 97 98 99 100
#ifdef CONFIG_KASAN
#define SLAB_KASAN		0x08000000UL
#else
#define SLAB_KASAN		0x00000000UL
#endif

101 102 103
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
#define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
104 105 106 107 108 109 110 111 112 113
/*
 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
 *
 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
 *
 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
 * Both make kfree a no-op.
 */
#define ZERO_SIZE_PTR ((void *)16)

114
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
115 116
				(unsigned long)ZERO_SIZE_PTR)

117
#include <linux/kmemleak.h>
118
#include <linux/kasan.h>
119

120
struct mem_cgroup;
121 122 123 124
/*
 * struct kmem_cache related prototypes
 */
void __init kmem_cache_init(void);
125
bool slab_is_available(void);
L
Linus Torvalds 已提交
126

127
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128
			unsigned long,
129
			void (*)(void *));
130 131
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
132 133 134 135

void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
void memcg_deactivate_kmem_caches(struct mem_cgroup *);
void memcg_destroy_kmem_caches(struct mem_cgroup *);
136

137 138 139 140 141 142 143 144 145 146
/*
 * Please use this macro to create slab caches. Simply specify the
 * name of the structure and maybe some flags that are listed above.
 *
 * The alignment of the struct determines object alignment. If you
 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 * then the objects will be properly aligned in SMP configurations.
 */
#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
		sizeof(struct __struct), __alignof__(struct __struct),\
147
		(__flags), NULL)
148

149 150 151 152 153 154 155 156 157
/*
 * Common kmalloc functions provided by all allocators
 */
void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);

K
Kees Cook 已提交
158 159 160 161 162 163 164 165 166 167 168 169
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
const char *__check_heap_object(const void *ptr, unsigned long n,
				struct page *page);
#else
static inline const char *__check_heap_object(const void *ptr,
					      unsigned long n,
					      struct page *page)
{
	return NULL;
}
#endif

170 171 172 173 174 175 176 177 178 179 180 181 182
/*
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than the alignment of a 64-bit integer.
 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 */
#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
/*
 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 * Intended for arches that get misalignment faults even for 64 bit integer
 * aligned buffers.
 */
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

/*
 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
 * aligned pointers.
 */
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)

201
/*
202 203 204 205 206 207
 * Kmalloc array related definitions
 */

#ifdef CONFIG_SLAB
/*
 * The largest kmalloc size supported by the SLAB allocators is
208 209 210 211 212 213 214
 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 * less than 32 MB.
 *
 * WARNING: Its not easy to increase this value since the allocators have
 * to do various tricks to work around compiler limitations in order to
 * ensure proper constant folding.
 */
215 216
#define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
217
#define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
218
#ifndef KMALLOC_SHIFT_LOW
219
#define KMALLOC_SHIFT_LOW	5
220
#endif
221 222 223
#endif

#ifdef CONFIG_SLUB
224
/*
225 226
 * SLUB directly allocates requests fitting in to an order-1 page
 * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
227 228
 */
#define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
229
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
230
#ifndef KMALLOC_SHIFT_LOW
231 232
#define KMALLOC_SHIFT_LOW	3
#endif
233
#endif
234

235 236
#ifdef CONFIG_SLOB
/*
237
 * SLOB passes all requests larger than one page to the page allocator.
238 239 240 241
 * No kmalloc array is necessary since objects of different sizes can
 * be allocated from the same page.
 */
#define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
242
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
243 244 245 246 247
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW	3
#endif
#endif

248 249 250 251 252 253
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
/* Maximum order allocatable via the slab allocagtor */
#define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
254

255 256 257
/*
 * Kmalloc subsystem.
 */
258
#ifndef KMALLOC_MIN_SIZE
259
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
260 261
#endif

J
Joonsoo Kim 已提交
262 263 264 265 266 267 268 269 270 271 272
/*
 * This restriction comes from byte sized index implementation.
 * Page size is normally 2^12 bytes and, in this case, if we want to use
 * byte sized index which can represent 2^8 entries, the size of the object
 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
 * If minimum size of kmalloc is less than 16, we use it as minimum object
 * size and give up to use byte sized index.
 */
#define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
                               (KMALLOC_MIN_SIZE) : 16)

273
#ifndef CONFIG_SLOB
274 275 276 277 278
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
#endif

279 280 281 282 283
/*
 * Figure out which kmalloc slab an allocation of a certain size
 * belongs to.
 * 0 = zero alloc
 * 1 =  65 .. 96 bytes
284 285
 * 2 = 129 .. 192 bytes
 * n = 2^(n-1)+1 .. 2^n
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
 */
static __always_inline int kmalloc_index(size_t size)
{
	if (!size)
		return 0;

	if (size <= KMALLOC_MIN_SIZE)
		return KMALLOC_SHIFT_LOW;

	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
		return 1;
	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
		return 2;
	if (size <=          8) return 3;
	if (size <=         16) return 4;
	if (size <=         32) return 5;
	if (size <=         64) return 6;
	if (size <=        128) return 7;
	if (size <=        256) return 8;
	if (size <=        512) return 9;
	if (size <=       1024) return 10;
	if (size <=   2 * 1024) return 11;
	if (size <=   4 * 1024) return 12;
	if (size <=   8 * 1024) return 13;
	if (size <=  16 * 1024) return 14;
	if (size <=  32 * 1024) return 15;
	if (size <=  64 * 1024) return 16;
	if (size <= 128 * 1024) return 17;
	if (size <= 256 * 1024) return 18;
	if (size <= 512 * 1024) return 19;
	if (size <= 1024 * 1024) return 20;
	if (size <=  2 * 1024 * 1024) return 21;
	if (size <=  4 * 1024 * 1024) return 22;
	if (size <=  8 * 1024 * 1024) return 23;
	if (size <=  16 * 1024 * 1024) return 24;
	if (size <=  32 * 1024 * 1024) return 25;
	if (size <=  64 * 1024 * 1024) return 26;
	BUG();

	/* Will never be reached. Needed because the compiler may complain */
	return -1;
}
328
#endif /* !CONFIG_SLOB */
329

330 331
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
332
void kmem_cache_free(struct kmem_cache *, void *);
333

334
/*
J
Jesper Dangaard Brouer 已提交
335
 * Bulk allocation and freeing operations. These are accelerated in an
336 337 338 339 340 341
 * allocator specific way to avoid taking locks repeatedly or building
 * metadata structures unnecessarily.
 *
 * Note that interrupts must be enabled when calling these functions.
 */
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
342
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
343

344 345 346 347 348 349 350 351 352
/*
 * Caller must not use kfree_bulk() on memory not originally allocated
 * by kmalloc(), because the SLOB allocator cannot handle this.
 */
static __always_inline void kfree_bulk(size_t size, void **p)
{
	kmem_cache_free_bulk(NULL, size, p);
}

353
#ifdef CONFIG_NUMA
354 355
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
356 357 358 359 360 361 362 363 364 365 366 367 368
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __kmalloc(size, flags);
}

static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
{
	return kmem_cache_alloc(s, flags);
}
#endif

#ifdef CONFIG_TRACING
369
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
370 371 372 373

#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
					   gfp_t gfpflags,
374
					   int node, size_t size) __assume_slab_alignment __malloc;
375 376 377 378 379 380 381 382 383 384 385 386 387 388
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node, size_t size)
{
	return kmem_cache_alloc_trace(s, gfpflags, size);
}
#endif /* CONFIG_NUMA */

#else /* CONFIG_TRACING */
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
		gfp_t flags, size_t size)
{
389 390
	void *ret = kmem_cache_alloc(s, flags);

391
	kasan_kmalloc(s, ret, size, flags);
392
	return ret;
393 394 395 396 397 398 399
}

static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node, size_t size)
{
400 401
	void *ret = kmem_cache_alloc_node(s, gfpflags, node);

402
	kasan_kmalloc(s, ret, size, gfpflags);
403
	return ret;
404 405 406
}
#endif /* CONFIG_TRACING */

407
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
408 409

#ifdef CONFIG_TRACING
410
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
411 412 413 414 415 416
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	return kmalloc_order(size, flags, order);
}
417 418
#endif

419 420 421 422 423 424 425 426 427
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
	unsigned int order = get_order(size);
	return kmalloc_order_trace(size, flags, order);
}

/**
 * kmalloc - allocate memory
 * @size: how many bytes of memory are required.
428
 * @flags: the type of memory to allocate.
429 430 431
 *
 * kmalloc is the normal method of allocating memory
 * for objects smaller than page size in the kernel.
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
 *
 * The @flags argument may be one of:
 *
 * %GFP_USER - Allocate memory on behalf of user.  May sleep.
 *
 * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
 *
 * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
 *   For example, use this inside interrupt handlers.
 *
 * %GFP_HIGHUSER - Allocate pages from high memory.
 *
 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
 *
 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
 *
 * %GFP_NOWAIT - Allocation will not sleep.
 *
450
 * %__GFP_THISNODE - Allocate node-local memory only.
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
 *
 * %GFP_DMA - Allocation suitable for DMA.
 *   Should only be used for kmalloc() caches. Otherwise, use a
 *   slab created with SLAB_DMA.
 *
 * Also it is possible to set different flags by OR'ing
 * in one or more of the following additional @flags:
 *
 * %__GFP_COLD - Request cache-cold pages instead of
 *   trying to return cache-warm pages.
 *
 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
 *
 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
 *   (think twice before using).
 *
 * %__GFP_NORETRY - If memory is not immediately available,
 *   then give up at once.
 *
 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
 *
 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
 *
 * There are other flags available as well, but these are not intended
 * for general use, and so are not documented here. For a full list of
 * potential flags, always refer to linux/gfp.h.
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
 */
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	if (__builtin_constant_p(size)) {
		if (size > KMALLOC_MAX_CACHE_SIZE)
			return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
		if (!(flags & GFP_DMA)) {
			int index = kmalloc_index(size);

			if (!index)
				return ZERO_SIZE_PTR;

			return kmem_cache_alloc_trace(kmalloc_caches[index],
					flags, size);
		}
#endif
	}
	return __kmalloc(size, flags);
}

498 499 500 501 502 503 504
/*
 * Determine size used for the nth kmalloc cache.
 * return size or 0 if a kmalloc cache for that
 * size does not exist
 */
static __always_inline int kmalloc_size(int n)
{
505
#ifndef CONFIG_SLOB
506 507 508 509 510 511 512 513
	if (n > 2)
		return 1 << n;

	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
		return 96;

	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
		return 192;
514
#endif
515 516 517
	return 0;
}

518 519 520 521
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
	if (__builtin_constant_p(size) &&
522
		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
523 524 525 526 527 528 529 530 531 532 533 534
		int i = kmalloc_index(size);

		if (!i)
			return ZERO_SIZE_PTR;

		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
						flags, node, size);
	}
#endif
	return __kmalloc_node(size, flags, node);
}

535 536 537 538 539
struct memcg_cache_array {
	struct rcu_head rcu;
	struct kmem_cache *entries[0];
};

G
Glauber Costa 已提交
540 541 542 543
/*
 * This is the main placeholder for memcg-related information in kmem caches.
 * Both the root cache and the child caches will have it. For the root cache,
 * this will hold a dynamically allocated array large enough to hold
544 545 546
 * information about the currently limited memcgs in the system. To allow the
 * array to be accessed without taking any locks, on relocation we free the old
 * version only after a grace period.
G
Glauber Costa 已提交
547
 *
T
Tejun Heo 已提交
548
 * Root and child caches hold different metadata.
G
Glauber Costa 已提交
549
 *
T
Tejun Heo 已提交
550 551
 * @root_cache:	Common to root and child caches.  NULL for root, pointer to
 *		the root cache for children.
552
 *
T
Tejun Heo 已提交
553 554 555 556 557 558
 * The following fields are specific to root caches.
 *
 * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
 *		used to index child cachces during allocation and cleared
 *		early during shutdown.
 *
559 560
 * @root_caches_node: List node for slab_root_caches list.
 *
T
Tejun Heo 已提交
561 562 563 564 565 566 567 568 569
 * @children:	List of all child caches.  While the child caches are also
 *		reachable through @memcg_caches, a child cache remains on
 *		this list until it is actually destroyed.
 *
 * The following fields are specific to child caches.
 *
 * @memcg:	Pointer to the memcg this cache belongs to.
 *
 * @children_node: List node for @root_cache->children list.
570 571
 *
 * @kmem_caches_node: List node for @memcg->kmem_caches list.
G
Glauber Costa 已提交
572 573
 */
struct memcg_cache_params {
T
Tejun Heo 已提交
574
	struct kmem_cache *root_cache;
G
Glauber Costa 已提交
575
	union {
T
Tejun Heo 已提交
576 577
		struct {
			struct memcg_cache_array __rcu *memcg_caches;
578
			struct list_head __root_caches_node;
T
Tejun Heo 已提交
579 580
			struct list_head children;
		};
581 582
		struct {
			struct mem_cgroup *memcg;
T
Tejun Heo 已提交
583
			struct list_head children_node;
584
			struct list_head kmem_caches_node;
585 586 587 588 589 590

			void (*deact_fn)(struct kmem_cache *);
			union {
				struct rcu_head deact_rcu_head;
				struct work_struct deact_work;
			};
591
		};
G
Glauber Costa 已提交
592 593 594
	};
};

595 596
int memcg_update_all_caches(int num_memcgs);

597 598 599 600 601
/**
 * kmalloc_array - allocate memory for an array.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate (see kmalloc).
602
 */
X
Xi Wang 已提交
603
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
L
Linus Torvalds 已提交
604
{
X
Xi Wang 已提交
605
	if (size != 0 && n > SIZE_MAX / size)
P
Paul Mundt 已提交
606
		return NULL;
607 608
	if (__builtin_constant_p(n) && __builtin_constant_p(size))
		return kmalloc(n * size, flags);
X
Xi Wang 已提交
609 610 611 612 613 614 615 616 617 618 619 620
	return __kmalloc(n * size, flags);
}

/**
 * kcalloc - allocate memory for an array. The memory is set to zero.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
	return kmalloc_array(n, size, flags | __GFP_ZERO);
L
Linus Torvalds 已提交
621 622
}

623 624 625 626 627 628 629 630
/*
 * kmalloc_track_caller is a special version of kmalloc that records the
 * calling function of the routine calling it for slab leak tracking instead
 * of just the calling function (confusing, eh?).
 * It's useful when the call to kmalloc comes from a widely-used standard
 * allocator where we care about the real place the memory allocation
 * request comes from.
 */
631
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
632
#define kmalloc_track_caller(size, flags) \
633
	__kmalloc_track_caller(size, flags, _RET_IP_)
L
Linus Torvalds 已提交
634

635
#ifdef CONFIG_NUMA
636
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
637 638
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node_track_caller(size, flags, node, \
639
			_RET_IP_)
640

641 642 643 644
#else /* CONFIG_NUMA */

#define kmalloc_node_track_caller(size, flags, node) \
	kmalloc_track_caller(size, flags)
645

P
Pascal Terjan 已提交
646
#endif /* CONFIG_NUMA */
647

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
/*
 * Shortcuts
 */
static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
{
	return kmem_cache_alloc(k, flags | __GFP_ZERO);
}

/**
 * kzalloc - allocate memory. The memory is set to zero.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kzalloc(size_t size, gfp_t flags)
{
	return kmalloc(size, flags | __GFP_ZERO);
}

J
Jeff Layton 已提交
666 667 668 669 670 671 672 673 674 675 676
/**
 * kzalloc_node - allocate zeroed memory from a particular memory node.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 * @node: memory node from which to allocate
 */
static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
{
	return kmalloc_node(size, flags | __GFP_ZERO, node);
}

677
unsigned int kmem_cache_size(struct kmem_cache *s);
678 679
void __init kmem_cache_init_late(void);

680 681 682 683 684 685 686 687
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
int slab_prepare_cpu(unsigned int cpu);
int slab_dead_cpu(unsigned int cpu);
#else
#define slab_prepare_cpu	NULL
#define slab_dead_cpu		NULL
#endif

L
Linus Torvalds 已提交
688
#endif	/* _LINUX_SLAB_H */