slab.c 110.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
30
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
54
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
55 56 57 58
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
59
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70 71
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
72
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
73 74 75 76 77 78
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
79 80 81 82 83 84 85 86 87
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
88 89 90 91
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
92
#include	<linux/poison.h>
L
Linus Torvalds 已提交
93 94 95 96 97
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
98
#include	<linux/cpuset.h>
99
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
107
#include	<linux/string.h>
108
#include	<linux/uaccess.h>
109
#include	<linux/nodemask.h>
110
#include	<linux/kmemleak.h>
111
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
112
#include	<linux/mutex.h>
113
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
114
#include	<linux/rtmutex.h>
115
#include	<linux/reciprocal_div.h>
116
#include	<linux/debugobjects.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
119
#include	<linux/sched/task_stack.h>
L
Linus Torvalds 已提交
120

121 122
#include	<net/sock.h>

L
Linus Torvalds 已提交
123 124 125 126
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

127 128
#include <trace/events/kmem.h>

129 130
#include	"internal.h"

131 132
#include	"slab.h"

L
Linus Torvalds 已提交
133
/*
134
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
155
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
156 157 158 159 160

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

161 162 163 164 165 166 167 168 169
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

170
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171

L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
189
	void *entry[];	/*
A
Andrew Morton 已提交
190 191 192 193
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
L
Linus Torvalds 已提交
194 195
};

J
Joonsoo Kim 已提交
196 197 198 199 200
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

201 202 203
/*
 * Need this for bootstrapping a per node allocator.
 */
204
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206
#define	CACHE_CACHE 0
207
#define	SIZE_NODE (MAX_NUMNODES)
208

209
static int drain_freelist(struct kmem_cache *cache,
210
			struct kmem_cache_node *n, int tofree);
211
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 213
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215
static void cache_reap(struct work_struct *unused);
216

217 218 219 220 221
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
222 223
static int slab_early_init = 1;

224
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
225

226
static void kmem_cache_node_init(struct kmem_cache_node *parent)
227 228 229 230
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
231
	parent->total_slabs = 0;
232
	parent->free_slabs = 0;
233 234
	parent->shared = NULL;
	parent->alien = NULL;
235
	parent->colour_next = 0;
236 237 238 239 240
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
241 242 243
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
244
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
245 246
	} while (0)

A
Andrew Morton 已提交
247 248
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
249 250 251 252
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
253

254 255
#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
256
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
L
Linus Torvalds 已提交
257 258 259
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
260 261 262
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
263
 *
A
Adrian Bunk 已提交
264
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
265 266
 * which could lock up otherwise freeable slabs.
 */
267 268
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
L
Linus Torvalds 已提交
269 270 271 272 273 274

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
275
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
276 277 278 279 280
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
281 282
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
283
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
284
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
285 286 287 288 289
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
299
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
300 301 302
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
303
#define	STATS_INC_NODEFREES(x)	do { } while (0)
304
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
305
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
306 307 308 309 310 311 312 313
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
314 315
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
316
 * 0		: objp
317
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
318 319
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
320
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
321
 * 		redzone word.
322
 * cachep->obj_offset: The real object.
323 324
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
325
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
326
 */
327
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
328
{
329
	return cachep->obj_offset;
L
Linus Torvalds 已提交
330 331
}

332
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
333 334
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 336
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
337 338
}

339
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
340 341 342
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
343
		return (unsigned long long *)(objp + cachep->size -
344
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
345
					      REDZONE_ALIGN);
346
	return (unsigned long long *) (objp + cachep->size -
347
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
348 349
}

350
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
351 352
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
354 355 356 357
}

#else

358
#define obj_offset(x)			0
359 360
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
361 362 363 364
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

365 366
#ifdef CONFIG_DEBUG_SLAB_LEAK

367
static inline bool is_store_user_clean(struct kmem_cache *cachep)
368
{
369 370
	return atomic_read(&cachep->store_user_clean) == 1;
}
371

372 373 374 375
static inline void set_store_user_clean(struct kmem_cache *cachep)
{
	atomic_set(&cachep->store_user_clean, 1);
}
376

377 378 379 380
static inline void set_store_user_dirty(struct kmem_cache *cachep)
{
	if (is_store_user_clean(cachep))
		atomic_set(&cachep->store_user_clean, 0);
381 382 383
}

#else
384
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
385 386 387

#endif

L
Linus Torvalds 已提交
388
/*
389 390
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
391
 */
392 393 394
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
395
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
396

397 398
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
399
	struct page *page = virt_to_head_page(obj);
C
Christoph Lameter 已提交
400
	return page->slab_cache;
401 402
}

403
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
404 405
				 unsigned int idx)
{
406
	return page->s_mem + cache->size * idx;
407 408
}

409
/*
410 411 412
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
413 414 415
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
416
					const struct page *page, void *obj)
417
{
418
	u32 offset = (obj - page->s_mem);
419
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
420 421
}

422
#define BOOT_CPUCACHE_ENTRIES	1
L
Linus Torvalds 已提交
423
/* internal cache of cache description objs */
424
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
425 426 427
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
428
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
429
	.name = "kmem_cache",
L
Linus Torvalds 已提交
430 431
};

432
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
433

434
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
435
{
436
	return this_cpu_ptr(cachep->cpu_cache);
L
Linus Torvalds 已提交
437 438
}

A
Andrew Morton 已提交
439 440 441
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
442
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
443
		slab_flags_t flags, size_t *left_over)
444
{
445
	unsigned int num;
446
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
447

448 449 450 451 452 453
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
454 455 456 457 458
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
459 460 461 462 463 464
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
465
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
466
		num = slab_size / buffer_size;
467
		*left_over = slab_size % buffer_size;
468
	} else {
469
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
470 471
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
472
	}
473 474

	return num;
L
Linus Torvalds 已提交
475 476
}

477
#if DEBUG
478
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
479

A
Andrew Morton 已提交
480 481
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
482
{
483
	pr_err("slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
484
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
485
	dump_stack();
486
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
487
}
488
#endif
L
Linus Torvalds 已提交
489

490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

506 507 508 509 510 511 512 513 514 515 516
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

517 518 519 520 521 522 523
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
524
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
525 526 527

static void init_reap_node(int cpu)
{
528 529
	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
						    node_online_map);
530 531 532 533
}

static void next_reap_node(void)
{
534
	int node = __this_cpu_read(slab_reap_node);
535

536
	node = next_node_in(node, node_online_map);
537
	__this_cpu_write(slab_reap_node, node);
538 539 540 541 542 543 544
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
545 546 547 548 549 550 551
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
552
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
553
{
554
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
555

556
	if (reap_work->work.func == NULL) {
557
		init_reap_node(cpu);
558
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
559 560
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
561 562 563
	}
}

564
static void init_arraycache(struct array_cache *ac, int limit, int batch)
L
Linus Torvalds 已提交
565
{
566 567
	/*
	 * The array_cache structures contain pointers to free object.
L
Lucas De Marchi 已提交
568
	 * However, when such objects are allocated or transferred to another
569 570 571 572
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
573 574 575 576 577 578
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
L
Linus Torvalds 已提交
579
	}
580 581 582 583 584
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
585
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
586 587 588 589 590
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
L
Linus Torvalds 已提交
591 592
}

593 594
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
595
{
596 597 598
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
599

600 601
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
602

603 604 605
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
606

607
	slabs_destroy(cachep, &list);
608 609
}

610 611 612 613 614 615 616 617 618 619
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
620
	int nr = min3(from->avail, max, to->limit - to->avail);
621 622 623 624 625 626 627 628 629 630 631 632

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

633 634 635
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
636
#define reap_alien(cachep, n) do { } while (0)
637

J
Joonsoo Kim 已提交
638 639
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
640
{
641
	return NULL;
642 643
}

J
Joonsoo Kim 已提交
644
static inline void free_alien_cache(struct alien_cache **ac_ptr)
645 646 647 648 649 650 651 652 653 654 655 656 657 658
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

659
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
660 661 662 663 664
		 gfp_t flags, int nodeid)
{
	return NULL;
}

D
David Rientjes 已提交
665 666
static inline gfp_t gfp_exact_node(gfp_t flags)
{
667
	return flags & ~__GFP_NOFAIL;
D
David Rientjes 已提交
668 669
}

670 671
#else	/* CONFIG_NUMA */

672
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
673
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
674

J
Joonsoo Kim 已提交
675 676 677
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
678
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
J
Joonsoo Kim 已提交
679 680 681 682
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
683
	spin_lock_init(&alc->lock);
J
Joonsoo Kim 已提交
684 685 686 687
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
688
{
J
Joonsoo Kim 已提交
689
	struct alien_cache **alc_ptr;
690
	size_t memsize = sizeof(void *) * nr_node_ids;
691 692 693 694
	int i;

	if (limit > 1)
		limit = 12;
J
Joonsoo Kim 已提交
695 696 697 698 699 700 701 702 703 704 705 706 707
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
708 709
		}
	}
J
Joonsoo Kim 已提交
710
	return alc_ptr;
711 712
}

J
Joonsoo Kim 已提交
713
static void free_alien_cache(struct alien_cache **alc_ptr)
714 715 716
{
	int i;

J
Joonsoo Kim 已提交
717
	if (!alc_ptr)
718 719
		return;
	for_each_node(i)
J
Joonsoo Kim 已提交
720 721
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
722 723
}

724
static void __drain_alien_cache(struct kmem_cache *cachep,
725 726
				struct array_cache *ac, int node,
				struct list_head *list)
727
{
728
	struct kmem_cache_node *n = get_node(cachep, node);
729 730

	if (ac->avail) {
731
		spin_lock(&n->list_lock);
732 733 734 735 736
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
737 738
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
739

740
		free_block(cachep, ac->entry, ac->avail, node, list);
741
		ac->avail = 0;
742
		spin_unlock(&n->list_lock);
743 744 745
	}
}

746 747 748
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
749
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
750
{
751
	int node = __this_cpu_read(slab_reap_node);
752

753
	if (n->alien) {
J
Joonsoo Kim 已提交
754 755 756 757 758
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
759
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
760 761 762
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
763
				spin_unlock_irq(&alc->lock);
764
				slabs_destroy(cachep, &list);
J
Joonsoo Kim 已提交
765
			}
766 767 768 769
		}
	}
}

A
Andrew Morton 已提交
770
static void drain_alien_cache(struct kmem_cache *cachep,
J
Joonsoo Kim 已提交
771
				struct alien_cache **alien)
772
{
P
Pekka Enberg 已提交
773
	int i = 0;
J
Joonsoo Kim 已提交
774
	struct alien_cache *alc;
775 776 777 778
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
J
Joonsoo Kim 已提交
779 780
		alc = alien[i];
		if (alc) {
781 782
			LIST_HEAD(list);

J
Joonsoo Kim 已提交
783
			ac = &alc->ac;
784
			spin_lock_irqsave(&alc->lock, flags);
785
			__drain_alien_cache(cachep, ac, i, &list);
786
			spin_unlock_irqrestore(&alc->lock, flags);
787
			slabs_destroy(cachep, &list);
788 789 790
		}
	}
}
791

792 793
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
794
{
795
	struct kmem_cache_node *n;
J
Joonsoo Kim 已提交
796 797
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
798
	LIST_HEAD(list);
P
Pekka Enberg 已提交
799

800
	n = get_node(cachep, node);
801
	STATS_INC_NODEFREES(cachep);
802 803
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
J
Joonsoo Kim 已提交
804
		ac = &alien->ac;
805
		spin_lock(&alien->lock);
J
Joonsoo Kim 已提交
806
		if (unlikely(ac->avail == ac->limit)) {
807
			STATS_INC_ACOVERFLOW(cachep);
808
			__drain_alien_cache(cachep, ac, page_node, &list);
809
		}
810
		ac->entry[ac->avail++] = objp;
811
		spin_unlock(&alien->lock);
812
		slabs_destroy(cachep, &list);
813
	} else {
814
		n = get_node(cachep, page_node);
815
		spin_lock(&n->list_lock);
816
		free_block(cachep, &objp, 1, page_node, &list);
817
		spin_unlock(&n->list_lock);
818
		slabs_destroy(cachep, &list);
819 820 821
	}
	return 1;
}
822 823 824 825 826 827 828 829 830 831 832 833 834 835

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
D
David Rientjes 已提交
836 837

/*
838 839
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
D
David Rientjes 已提交
840 841 842
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
843
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
D
David Rientjes 已提交
844
}
845 846
#endif

847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

887
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
888
/*
889
 * Allocates and initializes node for a node on each slab cache, used for
890
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
891
 * will be allocated off-node since memory is not yet online for the new node.
892
 * When hotplugging memory or a cpu, existing node are not replaced if
893 894
 * already in use.
 *
895
 * Must hold slab_mutex.
896
 */
897
static int init_cache_node_node(int node)
898
{
899
	int ret;
900 901
	struct kmem_cache *cachep;

902
	list_for_each_entry(cachep, &slab_caches, list) {
903 904 905
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
906
	}
907

908 909
	return 0;
}
910
#endif
911

912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

961 962 963 964 965 966
	/*
	 * To protect lockless access to n->shared during irq disabled context.
	 * If n->shared isn't NULL in irq disabled context, accessing to it is
	 * guaranteed to be valid until irq is re-enabled, because it will be
	 * freed after synchronize_sched().
	 */
967
	if (old_shared && force_change)
968 969
		synchronize_sched();

970 971 972 973 974 975 976 977
fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

978 979
#ifdef CONFIG_SMP

980
static void cpuup_canceled(long cpu)
981 982
{
	struct kmem_cache *cachep;
983
	struct kmem_cache_node *n = NULL;
984
	int node = cpu_to_mem(cpu);
985
	const struct cpumask *mask = cpumask_of_node(node);
986

987
	list_for_each_entry(cachep, &slab_caches, list) {
988 989
		struct array_cache *nc;
		struct array_cache *shared;
J
Joonsoo Kim 已提交
990
		struct alien_cache **alien;
991
		LIST_HEAD(list);
992

993
		n = get_node(cachep, node);
994
		if (!n)
995
			continue;
996

997
		spin_lock_irq(&n->list_lock);
998

999 1000
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1001 1002 1003 1004

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
		if (nc) {
1005
			free_block(cachep, nc->entry, nc->avail, node, &list);
1006 1007
			nc->avail = 0;
		}
1008

1009
		if (!cpumask_empty(mask)) {
1010
			spin_unlock_irq(&n->list_lock);
1011
			goto free_slab;
1012 1013
		}

1014
		shared = n->shared;
1015 1016
		if (shared) {
			free_block(cachep, shared->entry,
1017
				   shared->avail, node, &list);
1018
			n->shared = NULL;
1019 1020
		}

1021 1022
		alien = n->alien;
		n->alien = NULL;
1023

1024
		spin_unlock_irq(&n->list_lock);
1025 1026 1027 1028 1029 1030

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
1031 1032

free_slab:
1033
		slabs_destroy(cachep, &list);
1034 1035 1036 1037 1038 1039
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1040
	list_for_each_entry(cachep, &slab_caches, list) {
1041
		n = get_node(cachep, node);
1042
		if (!n)
1043
			continue;
1044
		drain_freelist(cachep, n, INT_MAX);
1045 1046 1047
	}
}

1048
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1049
{
1050
	struct kmem_cache *cachep;
1051
	int node = cpu_to_mem(cpu);
1052
	int err;
L
Linus Torvalds 已提交
1053

1054 1055 1056 1057
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1058
	 * kmem_cache_node and not this cpu's kmem_cache_node
1059
	 */
1060
	err = init_cache_node_node(node);
1061 1062
	if (err < 0)
		goto bad;
1063 1064 1065 1066 1067

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1068
	list_for_each_entry(cachep, &slab_caches, list) {
1069 1070 1071
		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
		if (err)
			goto bad;
1072
	}
1073

1074 1075
	return 0;
bad:
1076
	cpuup_canceled(cpu);
1077 1078 1079
	return -ENOMEM;
}

1080
int slab_prepare_cpu(unsigned int cpu)
1081
{
1082
	int err;
1083

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	mutex_lock(&slab_mutex);
	err = cpuup_prepare(cpu);
	mutex_unlock(&slab_mutex);
	return err;
}

/*
 * This is called for a failed online attempt and for a successful
 * offline.
 *
 * Even if all the cpus of a node are down, we don't free the
 * kmem_list3 of any cache. This to avoid a race between cpu_down, and
 * a kmalloc allocation from another cpu for memory from the node of
 * the cpu going down.  The list3 structure is usually allocated from
 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
 */
int slab_dead_cpu(unsigned int cpu)
{
	mutex_lock(&slab_mutex);
	cpuup_canceled(cpu);
	mutex_unlock(&slab_mutex);
	return 0;
}
1107
#endif
1108 1109 1110 1111 1112

static int slab_online_cpu(unsigned int cpu)
{
	start_cpu_timer(cpu);
	return 0;
L
Linus Torvalds 已提交
1113 1114
}

1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
static int slab_offline_cpu(unsigned int cpu)
{
	/*
	 * Shutdown cache reaper. Note that the slab_mutex is held so
	 * that if cache_reap() is invoked it cannot do anything
	 * expensive but will only modify reap_work and reschedule the
	 * timer.
	 */
	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
	/* Now the cache_reaper is guaranteed to be not running. */
	per_cpu(slab_reap_work, cpu).work.func = NULL;
	return 0;
}
L
Linus Torvalds 已提交
1128

1129 1130 1131 1132 1133 1134
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1135
 * Must hold slab_mutex.
1136
 */
1137
static int __meminit drain_cache_node_node(int node)
1138 1139 1140 1141
{
	struct kmem_cache *cachep;
	int ret = 0;

1142
	list_for_each_entry(cachep, &slab_caches, list) {
1143
		struct kmem_cache_node *n;
1144

1145
		n = get_node(cachep, node);
1146
		if (!n)
1147 1148
			continue;

1149
		drain_freelist(cachep, n, INT_MAX);
1150

1151 1152
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1173
		mutex_lock(&slab_mutex);
1174
		ret = init_cache_node_node(nid);
1175
		mutex_unlock(&slab_mutex);
1176 1177
		break;
	case MEM_GOING_OFFLINE:
1178
		mutex_lock(&slab_mutex);
1179
		ret = drain_cache_node_node(nid);
1180
		mutex_unlock(&slab_mutex);
1181 1182 1183 1184 1185 1186 1187 1188
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1189
	return notifier_from_errno(ret);
1190 1191 1192
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1193
/*
1194
 * swap the static kmem_cache_node with kmalloced memory
1195
 */
1196
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1197
				int nodeid)
1198
{
1199
	struct kmem_cache_node *ptr;
1200

1201
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1202 1203
	BUG_ON(!ptr);

1204
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1205 1206 1207 1208 1209
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1210
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1211
	cachep->node[nodeid] = ptr;
1212 1213
}

1214
/*
1215 1216
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1217
 */
1218
static void __init set_up_node(struct kmem_cache *cachep, int index)
1219 1220 1221 1222
{
	int node;

	for_each_online_node(node) {
1223
		cachep->node[node] = &init_kmem_cache_node[index + node];
1224
		cachep->node[node]->next_reap = jiffies +
1225 1226
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1227 1228 1229
	}
}

A
Andrew Morton 已提交
1230 1231 1232
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1233 1234 1235
 */
void __init kmem_cache_init(void)
{
1236 1237
	int i;

1238 1239
	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
					sizeof(struct rcu_head));
1240 1241
	kmem_cache = &kmem_cache_boot;

1242
	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1243 1244
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1245
	for (i = 0; i < NUM_INIT_LISTS; i++)
1246
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1247

L
Linus Torvalds 已提交
1248 1249
	/*
	 * Fragmentation resistance on low memory - only use bigger
1250 1251
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1252
	 */
1253
	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1254
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1255 1256 1257

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1258 1259 1260
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1261
	 *    Initially an __init data area is used for the head array and the
1262
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1263
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1264
	 * 2) Create the first kmalloc cache.
1265
	 *    The struct kmem_cache for the new cache is allocated normally.
1266 1267 1268
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1269
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1270
	 *    kmalloc cache with kmalloc allocated arrays.
1271
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1272 1273
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1274 1275
	 */

1276
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1277

E
Eric Dumazet 已提交
1278
	/*
1279
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1280
	 */
1281
	create_boot_cache(kmem_cache, "kmem_cache",
1282
		offsetof(struct kmem_cache, node) +
1283
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1284
				  SLAB_HWCACHE_ALIGN, 0, 0);
1285
	list_add(&kmem_cache->list, &slab_caches);
1286
	slab_state = PARTIAL;
L
Linus Torvalds 已提交
1287

A
Andrew Morton 已提交
1288
	/*
1289 1290
	 * Initialize the caches that provide memory for the  kmem_cache_node
	 * structures first.  Without this, further allocations will bug.
1291
	 */
1292 1293
	kmalloc_caches[INDEX_NODE] = create_kmalloc_cache(
				kmalloc_info[INDEX_NODE].name,
1294 1295
				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS,
				0, kmalloc_size(INDEX_NODE));
1296
	slab_state = PARTIAL_NODE;
1297
	setup_kmalloc_cache_index_table();
1298

1299 1300
	slab_early_init = 0;

1301
	/* 5) Replace the bootstrap kmem_cache_node */
1302
	{
P
Pekka Enberg 已提交
1303 1304
		int nid;

1305
		for_each_online_node(nid) {
1306
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1307

1308
			init_list(kmalloc_caches[INDEX_NODE],
1309
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1310 1311
		}
	}
L
Linus Torvalds 已提交
1312

1313
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1314 1315 1316 1317 1318 1319 1320
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

	/* 6) resize the head arrays to their final sizes */
1321 1322
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1323 1324
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1325
	mutex_unlock(&slab_mutex);
1326

1327 1328 1329
	/* Done! */
	slab_state = FULL;

1330 1331 1332
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1333
	 * node.
1334 1335 1336 1337
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1338 1339 1340
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1341 1342 1343 1344 1345
	 */
}

static int __init cpucache_init(void)
{
1346
	int ret;
L
Linus Torvalds 已提交
1347

A
Andrew Morton 已提交
1348 1349
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1350
	 */
1351 1352 1353
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
				slab_online_cpu, slab_offline_cpu);
	WARN_ON(ret < 0);
1354

L
Linus Torvalds 已提交
1355 1356 1357 1358
	return 0;
}
__initcall(cpucache_init);

1359 1360 1361
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1362
#if DEBUG
1363
	struct kmem_cache_node *n;
1364 1365
	unsigned long flags;
	int node;
1366 1367 1368 1369 1370
	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
		return;
1371

1372 1373 1374
	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
		nodeid, gfpflags, &gfpflags);
	pr_warn("  cache: %s, object size: %d, order: %d\n",
1375
		cachep->name, cachep->size, cachep->gfporder);
1376

1377
	for_each_kmem_cache_node(cachep, node, n) {
1378
		unsigned long total_slabs, free_slabs, free_objs;
1379

1380
		spin_lock_irqsave(&n->list_lock, flags);
1381 1382 1383
		total_slabs = n->total_slabs;
		free_slabs = n->free_slabs;
		free_objs = n->free_objects;
1384
		spin_unlock_irqrestore(&n->list_lock, flags);
1385

1386 1387 1388 1389
		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
			node, total_slabs - free_slabs, total_slabs,
			(total_slabs * cachep->num) - free_objs,
			total_slabs * cachep->num);
1390
	}
1391
#endif
1392 1393
}

L
Linus Torvalds 已提交
1394
/*
W
Wang Sheng-Hui 已提交
1395 1396
 * Interface to system's page allocator. No need to hold the
 * kmem_cache_node ->list_lock.
L
Linus Torvalds 已提交
1397 1398 1399 1400 1401
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1402 1403
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1404 1405
{
	struct page *page;
1406
	int nr_pages;
1407

1408
	flags |= cachep->allocflags;
1409

1410
	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1411
	if (!page) {
1412
		slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1413
		return NULL;
1414
	}
L
Linus Torvalds 已提交
1415

1416 1417 1418 1419 1420
	if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
		__free_pages(page, cachep->gfporder);
		return NULL;
	}

1421
	nr_pages = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1422
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1423
		mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
1424
	else
1425
		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
1426

1427
	__SetPageSlab(page);
1428 1429
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1430
		SetPageSlabPfmemalloc(page);
1431

1432
	return page;
L
Linus Torvalds 已提交
1433 1434 1435 1436 1437
}

/*
 * Interface to system's page release.
 */
1438
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1439
{
1440 1441
	int order = cachep->gfporder;
	unsigned long nr_freed = (1 << order);
L
Linus Torvalds 已提交
1442

1443
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1444
		mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
1445
	else
1446
		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
J
Joonsoo Kim 已提交
1447

1448
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1449
	__ClearPageSlabPfmemalloc(page);
1450
	__ClearPageSlab(page);
1451 1452
	page_mapcount_reset(page);
	page->mapping = NULL;
G
Glauber Costa 已提交
1453

L
Linus Torvalds 已提交
1454 1455
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
1456 1457
	memcg_uncharge_slab(page, order, cachep);
	__free_pages(page, order);
L
Linus Torvalds 已提交
1458 1459 1460 1461
}

static void kmem_rcu_free(struct rcu_head *head)
{
1462 1463
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1464

1465 1466 1467 1468
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1469 1470 1471
}

#if DEBUG
1472 1473 1474 1475 1476 1477 1478 1479
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
{
	if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
		(cachep->size % PAGE_SIZE) == 0)
		return true;

	return false;
}
L
Linus Torvalds 已提交
1480 1481

#ifdef CONFIG_DEBUG_PAGEALLOC
1482
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
P
Pekka Enberg 已提交
1483
			    unsigned long caller)
L
Linus Torvalds 已提交
1484
{
1485
	int size = cachep->object_size;
L
Linus Torvalds 已提交
1486

1487
	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1488

P
Pekka Enberg 已提交
1489
	if (size < 5 * sizeof(unsigned long))
L
Linus Torvalds 已提交
1490 1491
		return;

P
Pekka Enberg 已提交
1492 1493 1494 1495
	*addr++ = 0x12345678;
	*addr++ = caller;
	*addr++ = smp_processor_id();
	size -= 3 * sizeof(unsigned long);
L
Linus Torvalds 已提交
1496 1497 1498 1499 1500 1501 1502
	{
		unsigned long *sptr = &caller;
		unsigned long svalue;

		while (!kstack_end(sptr)) {
			svalue = *sptr++;
			if (kernel_text_address(svalue)) {
P
Pekka Enberg 已提交
1503
				*addr++ = svalue;
L
Linus Torvalds 已提交
1504 1505 1506 1507 1508 1509 1510
				size -= sizeof(unsigned long);
				if (size <= sizeof(unsigned long))
					break;
			}
		}

	}
P
Pekka Enberg 已提交
1511
	*addr++ = 0x87654321;
L
Linus Torvalds 已提交
1512
}
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529

static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
				int map, unsigned long caller)
{
	if (!is_debug_pagealloc_cache(cachep))
		return;

	if (caller)
		store_stackinfo(cachep, objp, caller);

	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}

#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
				int map, unsigned long caller) {}

L
Linus Torvalds 已提交
1530 1531
#endif

1532
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1533
{
1534
	int size = cachep->object_size;
1535
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1536 1537

	memset(addr, val, size);
P
Pekka Enberg 已提交
1538
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1539 1540 1541 1542 1543
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1544 1545 1546
	unsigned char error = 0;
	int bad_count = 0;

1547
	pr_err("%03x: ", offset);
D
Dave Jones 已提交
1548 1549 1550 1551 1552 1553
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1554 1555
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1556 1557 1558 1559

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
1560
			pr_err("Single bit error detected. Probably bad RAM.\n");
D
Dave Jones 已提交
1561
#ifdef CONFIG_X86
1562
			pr_err("Run memtest86+ or a similar memory test tool.\n");
D
Dave Jones 已提交
1563
#else
1564
			pr_err("Run a memory test tool.\n");
D
Dave Jones 已提交
1565 1566 1567
#endif
		}
	}
L
Linus Torvalds 已提交
1568 1569 1570 1571 1572
}
#endif

#if DEBUG

1573
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1574 1575 1576 1577 1578
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1579 1580 1581
		pr_err("Redzone: 0x%llx/0x%llx\n",
		       *dbg_redzone1(cachep, objp),
		       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1582 1583
	}

1584 1585
	if (cachep->flags & SLAB_STORE_USER)
		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1586
	realobj = (char *)objp + obj_offset(cachep);
1587
	size = cachep->object_size;
P
Pekka Enberg 已提交
1588
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1589 1590
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1591 1592
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1593 1594 1595 1596
		dump_line(realobj, i, limit);
	}
}

1597
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1598 1599 1600 1601 1602
{
	char *realobj;
	int size, i;
	int lines = 0;

1603 1604 1605
	if (is_debug_pagealloc_cache(cachep))
		return;

1606
	realobj = (char *)objp + obj_offset(cachep);
1607
	size = cachep->object_size;
L
Linus Torvalds 已提交
1608

P
Pekka Enberg 已提交
1609
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1610
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1611
		if (i == size - 1)
L
Linus Torvalds 已提交
1612 1613 1614 1615 1616 1617
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
1618
				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1619 1620
				       print_tainted(), cachep->name,
				       realobj, size);
L
Linus Torvalds 已提交
1621 1622 1623
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1624
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1625
			limit = 16;
P
Pekka Enberg 已提交
1626 1627
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1640
		struct page *page = virt_to_head_page(objp);
1641
		unsigned int objnr;
L
Linus Torvalds 已提交
1642

1643
		objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
1644
		if (objnr) {
1645
			objp = index_to_obj(cachep, page, objnr - 1);
1646
			realobj = (char *)objp + obj_offset(cachep);
1647
			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1648 1649
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1650
		if (objnr + 1 < cachep->num) {
1651
			objp = index_to_obj(cachep, page, objnr + 1);
1652
			realobj = (char *)objp + obj_offset(cachep);
1653
			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1654 1655 1656 1657 1658 1659
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1660
#if DEBUG
1661 1662
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
L
Linus Torvalds 已提交
1663 1664
{
	int i;
1665 1666 1667 1668 1669 1670

	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
		poison_obj(cachep, page->freelist - obj_offset(cachep),
			POISON_FREE);
	}

L
Linus Torvalds 已提交
1671
	for (i = 0; i < cachep->num; i++) {
1672
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
1673 1674 1675

		if (cachep->flags & SLAB_POISON) {
			check_poison_obj(cachep, objp);
1676
			slab_kernel_map(cachep, objp, 1, 0);
L
Linus Torvalds 已提交
1677 1678 1679
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1680
				slab_error(cachep, "start of a freed object was overwritten");
L
Linus Torvalds 已提交
1681
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1682
				slab_error(cachep, "end of a freed object was overwritten");
L
Linus Torvalds 已提交
1683 1684
		}
	}
1685
}
L
Linus Torvalds 已提交
1686
#else
1687 1688
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
1689 1690
{
}
L
Linus Torvalds 已提交
1691 1692
#endif

1693 1694 1695
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
1696
 * @page: page pointer being destroyed
1697
 *
W
Wang Sheng-Hui 已提交
1698 1699 1700
 * Destroy all the objs in a slab page, and release the mem back to the system.
 * Before calling the slab page must have been unlinked from the cache. The
 * kmem_cache_node ->list_lock is not held/needed.
1701
 */
1702
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1703
{
1704
	void *freelist;
1705

1706 1707
	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
1708
	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1709 1710
		call_rcu(&page->rcu_head, kmem_rcu_free);
	else
1711
		kmem_freepages(cachep, page);
1712 1713

	/*
1714
	 * From now on, we don't use freelist
1715 1716 1717
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
1718
		kmem_cache_free(cachep->freelist_cache, freelist);
L
Linus Torvalds 已提交
1719 1720
}

1721 1722 1723 1724 1725 1726 1727 1728 1729 1730
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

	list_for_each_entry_safe(page, n, list, lru) {
		list_del(&page->lru);
		slab_destroy(cachep, page);
	}
}

1731
/**
1732 1733 1734 1735 1736 1737
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
1738 1739 1740 1741 1742
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
 */
A
Andrew Morton 已提交
1743
static size_t calculate_slab_order(struct kmem_cache *cachep,
1744
				size_t size, slab_flags_t flags)
1745 1746
{
	size_t left_over = 0;
1747
	int gfporder;
1748

1749
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1750 1751 1752
		unsigned int num;
		size_t remainder;

1753
		num = cache_estimate(gfporder, size, flags, &remainder);
1754 1755
		if (!num)
			continue;
1756

1757 1758 1759 1760
		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
		if (num > SLAB_OBJ_MAX_NUM)
			break;

1761
		if (flags & CFLGS_OFF_SLAB) {
1762 1763 1764 1765 1766 1767 1768 1769
			struct kmem_cache *freelist_cache;
			size_t freelist_size;

			freelist_size = num * sizeof(freelist_idx_t);
			freelist_cache = kmalloc_slab(freelist_size, 0u);
			if (!freelist_cache)
				continue;

1770
			/*
1771
			 * Needed to avoid possible looping condition
1772
			 * in cache_grow_begin()
1773
			 */
1774 1775
			if (OFF_SLAB(freelist_cache))
				continue;
1776

1777 1778 1779
			/* check if off slab has enough benefit */
			if (freelist_cache->size > cachep->size / 2)
				continue;
1780
		}
1781

1782
		/* Found something acceptable - save it away */
1783
		cachep->num = num;
1784
		cachep->gfporder = gfporder;
1785 1786
		left_over = remainder;

1787 1788 1789 1790 1791 1792 1793 1794
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

1795 1796 1797 1798
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
1799
		if (gfporder >= slab_max_order)
1800 1801
			break;

1802 1803 1804
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
1805
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1806 1807 1808 1809 1810
			break;
	}
	return left_over;
}

1811 1812 1813 1814 1815 1816 1817 1818
static struct array_cache __percpu *alloc_kmem_cache_cpus(
		struct kmem_cache *cachep, int entries, int batchcount)
{
	int cpu;
	size_t size;
	struct array_cache __percpu *cpu_cache;

	size = sizeof(void *) * entries + sizeof(struct array_cache);
1819
	cpu_cache = __alloc_percpu(size, sizeof(void *));
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831

	if (!cpu_cache)
		return NULL;

	for_each_possible_cpu(cpu) {
		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
				entries, batchcount);
	}

	return cpu_cache;
}

1832
static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1833
{
1834
	if (slab_state >= FULL)
1835
		return enable_cpucache(cachep, gfp);
1836

1837 1838 1839 1840
	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
	if (!cachep->cpu_cache)
		return 1;

1841
	if (slab_state == DOWN) {
1842 1843
		/* Creation of first cache (kmem_cache). */
		set_up_node(kmem_cache, CACHE_CACHE);
1844
	} else if (slab_state == PARTIAL) {
1845 1846
		/* For kmem_cache_node */
		set_up_node(cachep, SIZE_NODE);
1847
	} else {
1848
		int node;
1849

1850 1851 1852 1853 1854
		for_each_online_node(node) {
			cachep->node[node] = kmalloc_node(
				sizeof(struct kmem_cache_node), gfp, node);
			BUG_ON(!cachep->node[node]);
			kmem_cache_node_init(cachep->node[node]);
1855 1856
		}
	}
1857

1858
	cachep->node[numa_mem_id()]->next_reap =
1859 1860
			jiffies + REAPTIMEOUT_NODE +
			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1861 1862 1863 1864 1865 1866 1867

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1868
	return 0;
1869 1870
}

1871 1872
slab_flags_t kmem_cache_flags(unsigned long object_size,
	slab_flags_t flags, const char *name,
J
Joonsoo Kim 已提交
1873 1874 1875 1876 1877 1878 1879
	void (*ctor)(void *))
{
	return flags;
}

struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
1880
		   slab_flags_t flags, void (*ctor)(void *))
J
Joonsoo Kim 已提交
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
{
	struct kmem_cache *cachep;

	cachep = find_mergeable(size, align, flags, name, ctor);
	if (cachep) {
		cachep->refcount++;

		/*
		 * Adjust the object sizes so that we clear
		 * the complete object on kzalloc.
		 */
		cachep->object_size = max_t(int, cachep->object_size, size);
	}
	return cachep;
}

1897
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1898
			size_t size, slab_flags_t flags)
1899 1900 1901 1902 1903
{
	size_t left;

	cachep->num = 0;

1904
	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
		return false;

	left = calculate_slab_order(cachep, size,
			flags | CFLGS_OBJFREELIST_SLAB);
	if (!cachep->num)
		return false;

	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

1920
static bool set_off_slab_cache(struct kmem_cache *cachep,
1921
			size_t size, slab_flags_t flags)
1922 1923 1924 1925 1926 1927
{
	size_t left;

	cachep->num = 0;

	/*
1928 1929
	 * Always use on-slab management when SLAB_NOLEAKTRACE
	 * to avoid recursive calls into kmemleak.
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
	 */
	if (flags & SLAB_NOLEAKTRACE)
		return false;

	/*
	 * Size is large, assume best to place the slab management obj
	 * off-slab (should allow better packing of objs).
	 */
	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
	if (!cachep->num)
		return false;

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
	if (left >= cachep->num * sizeof(freelist_idx_t))
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

static bool set_on_slab_cache(struct kmem_cache *cachep,
1955
			size_t size, slab_flags_t flags)
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
{
	size_t left;

	cachep->num = 0;

	left = calculate_slab_order(cachep, size, flags);
	if (!cachep->num)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

L
Linus Torvalds 已提交
1970
/**
1971
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
1972
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
1973 1974 1975 1976
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
1977
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */
1991
int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
L
Linus Torvalds 已提交
1992
{
1993
	size_t ralign = BYTES_PER_WORD;
1994
	gfp_t gfp;
1995
	int err;
1996
	size_t size = cachep->size;
L
Linus Torvalds 已提交
1997 1998 1999 2000 2001 2002 2003 2004 2005

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
2006 2007
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
2008
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2009
	if (!(flags & SLAB_TYPESAFE_BY_RCU))
L
Linus Torvalds 已提交
2010 2011 2012 2013
		flags |= SLAB_POISON;
#endif
#endif

A
Andrew Morton 已提交
2014 2015
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
2016 2017 2018
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
2019
	size = ALIGN(size, BYTES_PER_WORD);
L
Linus Torvalds 已提交
2020

D
David Woodhouse 已提交
2021 2022 2023 2024
	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
2025
		size = ALIGN(size, REDZONE_ALIGN);
D
David Woodhouse 已提交
2026
	}
2027

2028
	/* 3) caller mandated alignment */
2029 2030
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
2031
	}
2032 2033
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
2034
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
2035
	/*
2036
	 * 4) Store it.
L
Linus Torvalds 已提交
2037
	 */
2038
	cachep->align = ralign;
2039 2040 2041 2042
	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
L
Linus Torvalds 已提交
2043

2044 2045 2046 2047 2048
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

L
Linus Torvalds 已提交
2049 2050
#if DEBUG

2051 2052 2053 2054
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
2055 2056
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
2057 2058
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
2059 2060
	}
	if (flags & SLAB_STORE_USER) {
2061
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
2062 2063
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
2064
		 */
D
David Woodhouse 已提交
2065 2066 2067 2068
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
2069
	}
2070 2071
#endif

A
Alexander Potapenko 已提交
2072 2073
	kasan_cache_create(cachep, &size, &flags);

2074 2075 2076 2077 2078 2079 2080 2081 2082
	size = ALIGN(size, cachep->align);
	/*
	 * We should restrict the number of objects in a slab to implement
	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
	 */
	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);

#if DEBUG
2083 2084 2085 2086 2087 2088 2089
	/*
	 * To activate debug pagealloc, off-slab management is necessary
	 * requirement. In early phase of initialization, small sized slab
	 * doesn't get initialized so it would not be possible. So, we need
	 * to check size >= 256. It guarantees that all necessary small
	 * sized slab is initialized in current slab initialization sequence.
	 */
2090
	if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
		size >= 256 && cachep->object_size > cache_line_size()) {
		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
			size_t tmp_size = ALIGN(size, PAGE_SIZE);

			if (set_off_slab_cache(cachep, tmp_size, flags)) {
				flags |= CFLGS_OFF_SLAB;
				cachep->obj_offset += tmp_size - size;
				size = tmp_size;
				goto done;
			}
		}
L
Linus Torvalds 已提交
2102 2103 2104
	}
#endif

2105 2106 2107 2108 2109
	if (set_objfreelist_slab_cache(cachep, size, flags)) {
		flags |= CFLGS_OBJFREELIST_SLAB;
		goto done;
	}

2110
	if (set_off_slab_cache(cachep, size, flags)) {
L
Linus Torvalds 已提交
2111
		flags |= CFLGS_OFF_SLAB;
2112
		goto done;
2113
	}
L
Linus Torvalds 已提交
2114

2115 2116
	if (set_on_slab_cache(cachep, size, flags))
		goto done;
L
Linus Torvalds 已提交
2117

2118
	return -E2BIG;
L
Linus Torvalds 已提交
2119

2120 2121
done:
	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
L
Linus Torvalds 已提交
2122
	cachep->flags = flags;
2123
	cachep->allocflags = __GFP_COMP;
Y
Yang Shi 已提交
2124
	if (flags & SLAB_CACHE_DMA)
2125
		cachep->allocflags |= GFP_DMA;
2126 2127
	if (flags & SLAB_RECLAIM_ACCOUNT)
		cachep->allocflags |= __GFP_RECLAIMABLE;
2128
	cachep->size = size;
2129
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2130

2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
#if DEBUG
	/*
	 * If we're going to use the generic kernel_map_pages()
	 * poisoning, then it's going to smash the contents of
	 * the redzone and userword anyhow, so switch them off.
	 */
	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
		(cachep->flags & SLAB_POISON) &&
		is_debug_pagealloc_cache(cachep))
		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif

	if (OFF_SLAB(cachep)) {
2144 2145
		cachep->freelist_cache =
			kmalloc_slab(cachep->freelist_size, 0u);
2146
	}
L
Linus Torvalds 已提交
2147

2148 2149
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2150
		__kmem_cache_release(cachep);
2151
		return err;
2152
	}
L
Linus Torvalds 已提交
2153

2154
	return 0;
L
Linus Torvalds 已提交
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2168 2169 2170 2171 2172
static void check_mutex_acquired(void)
{
	BUG_ON(!mutex_is_locked(&slab_mutex));
}

2173
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2174 2175 2176
{
#ifdef CONFIG_SMP
	check_irq_off();
2177
	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
L
Linus Torvalds 已提交
2178 2179
#endif
}
2180

2181
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2182 2183 2184
{
#ifdef CONFIG_SMP
	check_irq_off();
2185
	assert_spin_locked(&get_node(cachep, node)->list_lock);
2186 2187 2188
#endif
}

L
Linus Torvalds 已提交
2189 2190 2191
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
2192
#define check_mutex_acquired()	do { } while(0)
L
Linus Torvalds 已提交
2193
#define check_spinlock_acquired(x) do { } while(0)
2194
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2195 2196
#endif

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
				int node, bool free_all, struct list_head *list)
{
	int tofree;

	if (!ac || !ac->avail)
		return;

	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
	if (tofree > ac->avail)
		tofree = (ac->avail + 1) / 2;

	free_block(cachep, ac->entry, tofree, node, list);
	ac->avail -= tofree;
	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
}
2213

L
Linus Torvalds 已提交
2214 2215
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2216
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2217
	struct array_cache *ac;
2218
	int node = numa_mem_id();
2219
	struct kmem_cache_node *n;
2220
	LIST_HEAD(list);
L
Linus Torvalds 已提交
2221 2222

	check_irq_off();
2223
	ac = cpu_cache_get(cachep);
2224 2225
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
2226
	free_block(cachep, ac->entry, ac->avail, node, &list);
2227
	spin_unlock(&n->list_lock);
2228
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
2229 2230 2231
	ac->avail = 0;
}

2232
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2233
{
2234
	struct kmem_cache_node *n;
2235
	int node;
2236
	LIST_HEAD(list);
2237

2238
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2239
	check_irq_on();
2240 2241
	for_each_kmem_cache_node(cachep, node, n)
		if (n->alien)
2242
			drain_alien_cache(cachep, n->alien);
2243

2244 2245 2246 2247 2248 2249 2250
	for_each_kmem_cache_node(cachep, node, n) {
		spin_lock_irq(&n->list_lock);
		drain_array_locked(cachep, n->shared, node, true, &list);
		spin_unlock_irq(&n->list_lock);

		slabs_destroy(cachep, &list);
	}
L
Linus Torvalds 已提交
2251 2252
}

2253 2254 2255 2256 2257 2258 2259
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2260
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2261
{
2262 2263
	struct list_head *p;
	int nr_freed;
2264
	struct page *page;
L
Linus Torvalds 已提交
2265

2266
	nr_freed = 0;
2267
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2268

2269 2270 2271 2272
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2273 2274
			goto out;
		}
L
Linus Torvalds 已提交
2275

2276 2277
		page = list_entry(p, struct page, lru);
		list_del(&page->lru);
2278
		n->free_slabs--;
2279
		n->total_slabs--;
2280 2281 2282 2283
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2284 2285
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2286
		slab_destroy(cache, page);
2287
		nr_freed++;
L
Linus Torvalds 已提交
2288
	}
2289 2290
out:
	return nr_freed;
L
Linus Torvalds 已提交
2291 2292
}

2293
int __kmem_cache_shrink(struct kmem_cache *cachep)
2294
{
2295 2296
	int ret = 0;
	int node;
2297
	struct kmem_cache_node *n;
2298 2299 2300 2301

	drain_cpu_caches(cachep);

	check_irq_on();
2302
	for_each_kmem_cache_node(cachep, node, n) {
2303
		drain_freelist(cachep, n, INT_MAX);
2304

2305 2306
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2307 2308 2309 2310
	}
	return (ret ? 1 : 0);
}

2311 2312 2313 2314 2315 2316 2317
#ifdef CONFIG_MEMCG
void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
{
	__kmem_cache_shrink(cachep);
}
#endif

2318
int __kmem_cache_shutdown(struct kmem_cache *cachep)
2319
{
2320
	return __kmem_cache_shrink(cachep);
2321 2322 2323
}

void __kmem_cache_release(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2324
{
2325
	int i;
2326
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2327

T
Thomas Garnier 已提交
2328 2329
	cache_random_seq_destroy(cachep);

2330
	free_percpu(cachep->cpu_cache);
L
Linus Torvalds 已提交
2331

2332
	/* NUMA: free the node structures */
2333 2334 2335 2336 2337
	for_each_kmem_cache_node(cachep, i, n) {
		kfree(n->shared);
		free_alien_cache(n->alien);
		kfree(n);
		cachep->node[i] = NULL;
2338
	}
L
Linus Torvalds 已提交
2339 2340
}

2341 2342
/*
 * Get the memory for a slab management obj.
2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
 *
 * For a slab cache when the slab descriptor is off-slab, the
 * slab descriptor can't come from the same cache which is being created,
 * Because if it is the case, that means we defer the creation of
 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
 * And we eventually call down to __kmem_cache_create(), which
 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
 * This is a "chicken-and-egg" problem.
 *
 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
 * which are all initialized during kmem_cache_init().
2354
 */
2355
static void *alloc_slabmgmt(struct kmem_cache *cachep,
2356 2357
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2358
{
2359
	void *freelist;
2360
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2361

2362 2363 2364
	page->s_mem = addr + colour_off;
	page->active = 0;

2365 2366 2367
	if (OBJFREELIST_SLAB(cachep))
		freelist = NULL;
	else if (OFF_SLAB(cachep)) {
L
Linus Torvalds 已提交
2368
		/* Slab management obj is off-slab. */
2369
		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2370
					      local_flags, nodeid);
2371
		if (!freelist)
L
Linus Torvalds 已提交
2372 2373
			return NULL;
	} else {
2374 2375 2376
		/* We will use last bytes at the slab for freelist */
		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
				cachep->freelist_size;
L
Linus Torvalds 已提交
2377
	}
2378

2379
	return freelist;
L
Linus Torvalds 已提交
2380 2381
}

2382
static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
L
Linus Torvalds 已提交
2383
{
2384
	return ((freelist_idx_t *)page->freelist)[idx];
2385 2386 2387
}

static inline void set_free_obj(struct page *page,
2388
					unsigned int idx, freelist_idx_t val)
2389
{
2390
	((freelist_idx_t *)(page->freelist))[idx] = val;
L
Linus Torvalds 已提交
2391 2392
}

2393
static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
2394
{
2395
#if DEBUG
L
Linus Torvalds 已提交
2396 2397 2398
	int i;

	for (i = 0; i < cachep->num; i++) {
2399
		void *objp = index_to_obj(cachep, page, i);
2400

L
Linus Torvalds 已提交
2401 2402 2403 2404 2405 2406 2407 2408
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2409 2410 2411
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2412
		 */
A
Alexander Potapenko 已提交
2413 2414 2415
		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
			kasan_unpoison_object_data(cachep,
						   objp + obj_offset(cachep));
2416
			cachep->ctor(objp + obj_offset(cachep));
A
Alexander Potapenko 已提交
2417 2418 2419
			kasan_poison_object_data(
				cachep, objp + obj_offset(cachep));
		}
L
Linus Torvalds 已提交
2420 2421 2422

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2423
				slab_error(cachep, "constructor overwrote the end of an object");
L
Linus Torvalds 已提交
2424
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2425
				slab_error(cachep, "constructor overwrote the start of an object");
L
Linus Torvalds 已提交
2426
		}
2427 2428 2429 2430 2431
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON) {
			poison_obj(cachep, objp, POISON_FREE);
			slab_kernel_map(cachep, objp, 0, 0);
		}
2432
	}
L
Linus Torvalds 已提交
2433
#endif
2434 2435
}

T
Thomas Garnier 已提交
2436 2437 2438 2439 2440
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Hold information during a freelist initialization */
union freelist_init_state {
	struct {
		unsigned int pos;
2441
		unsigned int *list;
T
Thomas Garnier 已提交
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
		unsigned int count;
	};
	struct rnd_state rnd_state;
};

/*
 * Initialize the state based on the randomization methode available.
 * return true if the pre-computed list is available, false otherwize.
 */
static bool freelist_state_initialize(union freelist_init_state *state,
				struct kmem_cache *cachep,
				unsigned int count)
{
	bool ret;
	unsigned int rand;

	/* Use best entropy available to define a random shift */
2459
	rand = get_random_int();
T
Thomas Garnier 已提交
2460 2461 2462 2463 2464 2465 2466 2467

	/* Use a random state if the pre-computed list is not available */
	if (!cachep->random_seq) {
		prandom_seed_state(&state->rnd_state, rand);
		ret = false;
	} else {
		state->list = cachep->random_seq;
		state->count = count;
2468
		state->pos = rand % count;
T
Thomas Garnier 已提交
2469 2470 2471 2472 2473 2474 2475 2476
		ret = true;
	}
	return ret;
}

/* Get the next entry on the list and randomize it using a random shift */
static freelist_idx_t next_random_slot(union freelist_init_state *state)
{
2477 2478 2479
	if (state->pos >= state->count)
		state->pos = 0;
	return state->list[state->pos++];
T
Thomas Garnier 已提交
2480 2481
}

2482 2483 2484 2485 2486 2487 2488
/* Swap two freelist entries */
static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
{
	swap(((freelist_idx_t *)page->freelist)[a],
		((freelist_idx_t *)page->freelist)[b]);
}

T
Thomas Garnier 已提交
2489 2490 2491 2492 2493 2494
/*
 * Shuffle the freelist initialization state based on pre-computed lists.
 * return true if the list was successfully shuffled, false otherwise.
 */
static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
{
2495
	unsigned int objfreelist = 0, i, rand, count = cachep->num;
T
Thomas Garnier 已提交
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
	union freelist_init_state state;
	bool precomputed;

	if (count < 2)
		return false;

	precomputed = freelist_state_initialize(&state, cachep, count);

	/* Take a random entry as the objfreelist */
	if (OBJFREELIST_SLAB(cachep)) {
		if (!precomputed)
			objfreelist = count - 1;
		else
			objfreelist = next_random_slot(&state);
		page->freelist = index_to_obj(cachep, page, objfreelist) +
						obj_offset(cachep);
		count--;
	}

	/*
	 * On early boot, generate the list dynamically.
	 * Later use a pre-computed list for speed.
	 */
	if (!precomputed) {
2520 2521 2522 2523 2524 2525 2526 2527 2528
		for (i = 0; i < count; i++)
			set_free_obj(page, i, i);

		/* Fisher-Yates shuffle */
		for (i = count - 1; i > 0; i--) {
			rand = prandom_u32_state(&state.rnd_state);
			rand %= (i + 1);
			swap_free_obj(page, i, rand);
		}
T
Thomas Garnier 已提交
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
	} else {
		for (i = 0; i < count; i++)
			set_free_obj(page, i, next_random_slot(&state));
	}

	if (OBJFREELIST_SLAB(cachep))
		set_free_obj(page, cachep->num - 1, objfreelist);

	return true;
}
#else
static inline bool shuffle_freelist(struct kmem_cache *cachep,
				struct page *page)
{
	return false;
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

2547 2548 2549 2550
static void cache_init_objs(struct kmem_cache *cachep,
			    struct page *page)
{
	int i;
A
Alexander Potapenko 已提交
2551
	void *objp;
T
Thomas Garnier 已提交
2552
	bool shuffled;
2553 2554 2555

	cache_init_objs_debug(cachep, page);

T
Thomas Garnier 已提交
2556 2557 2558 2559
	/* Try to randomize the freelist if enabled */
	shuffled = shuffle_freelist(cachep, page);

	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2560 2561 2562 2563
		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
						obj_offset(cachep);
	}

2564
	for (i = 0; i < cachep->num; i++) {
2565 2566 2567
		objp = index_to_obj(cachep, page, i);
		kasan_init_slab_obj(cachep, objp);

2568
		/* constructor could break poison info */
A
Alexander Potapenko 已提交
2569 2570 2571 2572 2573
		if (DEBUG == 0 && cachep->ctor) {
			kasan_unpoison_object_data(cachep, objp);
			cachep->ctor(objp);
			kasan_poison_object_data(cachep, objp);
		}
2574

T
Thomas Garnier 已提交
2575 2576
		if (!shuffled)
			set_free_obj(page, i, i);
L
Linus Torvalds 已提交
2577 2578 2579
	}
}

2580
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2581
{
2582
	void *objp;
2583

2584
	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2585
	page->active++;
2586

2587 2588 2589 2590 2591
#if DEBUG
	if (cachep->flags & SLAB_STORE_USER)
		set_store_user_dirty(cachep);
#endif

2592 2593 2594
	return objp;
}

2595 2596
static void slab_put_obj(struct kmem_cache *cachep,
			struct page *page, void *objp)
2597
{
2598
	unsigned int objnr = obj_to_index(cachep, page, objp);
2599
#if DEBUG
J
Joonsoo Kim 已提交
2600
	unsigned int i;
2601 2602

	/* Verify double free bug */
2603
	for (i = page->active; i < cachep->num; i++) {
2604
		if (get_free_obj(page, i) == objnr) {
2605
			pr_err("slab: double free detected in cache '%s', objp %px\n",
J
Joe Perches 已提交
2606
			       cachep->name, objp);
2607 2608
			BUG();
		}
2609 2610
	}
#endif
2611
	page->active--;
2612 2613 2614
	if (!page->freelist)
		page->freelist = objp + obj_offset(cachep);

2615
	set_free_obj(page, page->active, objnr);
2616 2617
}

2618 2619 2620
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2621
 * virtual address for kfree, ksize, and slab debugging.
2622
 */
2623
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2624
			   void *freelist)
L
Linus Torvalds 已提交
2625
{
2626
	page->slab_cache = cache;
2627
	page->freelist = freelist;
L
Linus Torvalds 已提交
2628 2629 2630 2631 2632 2633
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2634 2635
static struct page *cache_grow_begin(struct kmem_cache *cachep,
				gfp_t flags, int nodeid)
L
Linus Torvalds 已提交
2636
{
2637
	void *freelist;
P
Pekka Enberg 已提交
2638 2639
	size_t offset;
	gfp_t local_flags;
2640
	int page_node;
2641
	struct kmem_cache_node *n;
2642
	struct page *page;
L
Linus Torvalds 已提交
2643

A
Andrew Morton 已提交
2644 2645 2646
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2647
	 */
2648
	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2649
		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2650 2651 2652 2653
		flags &= ~GFP_SLAB_BUG_MASK;
		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
				invalid_mask, &invalid_mask, flags, &flags);
		dump_stack();
2654
	}
C
Christoph Lameter 已提交
2655
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2656 2657

	check_irq_off();
2658
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2659 2660
		local_irq_enable();

A
Andrew Morton 已提交
2661 2662 2663
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2664
	 */
2665
	page = kmem_getpages(cachep, local_flags, nodeid);
2666
	if (!page)
L
Linus Torvalds 已提交
2667 2668
		goto failed;

2669 2670
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682

	/* Get colour for the slab, and cal the next value. */
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;

	offset = n->colour_next;
	if (offset >= cachep->colour)
		offset = 0;

	offset *= cachep->colour_off;

L
Linus Torvalds 已提交
2683
	/* Get slab management. */
2684
	freelist = alloc_slabmgmt(cachep, page, offset,
2685
			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2686
	if (OFF_SLAB(cachep) && !freelist)
L
Linus Torvalds 已提交
2687 2688
		goto opps1;

2689
	slab_map_pages(cachep, page, freelist);
L
Linus Torvalds 已提交
2690

A
Alexander Potapenko 已提交
2691
	kasan_poison_slab(page);
2692
	cache_init_objs(cachep, page);
L
Linus Torvalds 已提交
2693

2694
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2695 2696
		local_irq_disable();

2697 2698
	return page;

A
Andrew Morton 已提交
2699
opps1:
2700
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2701
failed:
2702
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2703
		local_irq_disable();
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
	return NULL;
}

static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
{
	struct kmem_cache_node *n;
	void *list = NULL;

	check_irq_off();

	if (!page)
		return;

	INIT_LIST_HEAD(&page->lru);
	n = get_node(cachep, page_to_nid(page));

	spin_lock(&n->list_lock);
2721
	n->total_slabs++;
2722
	if (!page->active) {
2723
		list_add_tail(&page->lru, &(n->slabs_free));
2724
		n->free_slabs++;
2725
	} else
2726
		fixup_slab_list(cachep, n, page, &list);
2727

2728 2729 2730 2731 2732
	STATS_INC_GROWN(cachep);
	n->free_objects += cachep->num - page->active;
	spin_unlock(&n->list_lock);

	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
2745
		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
P
Pekka Enberg 已提交
2746 2747
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2748 2749 2750
	}
}

2751 2752
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2753
	unsigned long long redzone1, redzone2;
2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2769
	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2770
	       obj, redzone1, redzone2);
2771 2772
}

2773
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2774
				   unsigned long caller)
L
Linus Torvalds 已提交
2775 2776
{
	unsigned int objnr;
2777
	struct page *page;
L
Linus Torvalds 已提交
2778

2779 2780
	BUG_ON(virt_to_cache(objp) != cachep);

2781
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2782
	kfree_debugcheck(objp);
2783
	page = virt_to_head_page(objp);
L
Linus Torvalds 已提交
2784 2785

	if (cachep->flags & SLAB_RED_ZONE) {
2786
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2787 2788 2789
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
2790 2791
	if (cachep->flags & SLAB_STORE_USER) {
		set_store_user_dirty(cachep);
2792
		*dbg_userword(cachep, objp) = (void *)caller;
2793
	}
L
Linus Torvalds 已提交
2794

2795
	objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
2796 2797

	BUG_ON(objnr >= cachep->num);
2798
	BUG_ON(objp != index_to_obj(cachep, page, objnr));
L
Linus Torvalds 已提交
2799 2800 2801

	if (cachep->flags & SLAB_POISON) {
		poison_obj(cachep, objp, POISON_FREE);
2802
		slab_kernel_map(cachep, objp, 0, caller);
L
Linus Torvalds 已提交
2803 2804 2805 2806 2807 2808 2809 2810 2811
	}
	return objp;
}

#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#endif

2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list)
{
#if DEBUG
	void *next = *list;
	void *objp;

	while (next) {
		objp = next - obj_offset(cachep);
		next = *(void **)next;
		poison_obj(cachep, objp, POISON_FREE);
	}
#endif
}

2827
static inline void fixup_slab_list(struct kmem_cache *cachep,
2828 2829
				struct kmem_cache_node *n, struct page *page,
				void **list)
2830 2831 2832
{
	/* move slabp to correct slabp list: */
	list_del(&page->lru);
2833
	if (page->active == cachep->num) {
2834
		list_add(&page->lru, &n->slabs_full);
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
		if (OBJFREELIST_SLAB(cachep)) {
#if DEBUG
			/* Poisoning will be done without holding the lock */
			if (cachep->flags & SLAB_POISON) {
				void **objp = page->freelist;

				*objp = *list;
				*list = objp;
			}
#endif
			page->freelist = NULL;
		}
	} else
2848 2849 2850
		list_add(&page->lru, &n->slabs_partial);
}

2851 2852
/* Try to find non-pfmemalloc slab if needed */
static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2853
					struct page *page, bool pfmemalloc)
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871
{
	if (!page)
		return NULL;

	if (pfmemalloc)
		return page;

	if (!PageSlabPfmemalloc(page))
		return page;

	/* No need to keep pfmemalloc slab if we have enough free objects */
	if (n->free_objects > n->free_limit) {
		ClearPageSlabPfmemalloc(page);
		return page;
	}

	/* Move pfmemalloc slab to the end of list to speed up next search */
	list_del(&page->lru);
2872
	if (!page->active) {
2873
		list_add_tail(&page->lru, &n->slabs_free);
2874
		n->free_slabs++;
2875
	} else
2876 2877 2878 2879 2880 2881 2882
		list_add_tail(&page->lru, &n->slabs_partial);

	list_for_each_entry(page, &n->slabs_partial, lru) {
		if (!PageSlabPfmemalloc(page))
			return page;
	}

2883
	n->free_touched = 1;
2884
	list_for_each_entry(page, &n->slabs_free, lru) {
2885
		if (!PageSlabPfmemalloc(page)) {
2886
			n->free_slabs--;
2887
			return page;
2888
		}
2889 2890 2891 2892 2893 2894
	}

	return NULL;
}

static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2895 2896 2897
{
	struct page *page;

2898
	assert_spin_locked(&n->list_lock);
2899
	page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
2900 2901
	if (!page) {
		n->free_touched = 1;
2902 2903
		page = list_first_entry_or_null(&n->slabs_free, struct page,
						lru);
2904
		if (page)
2905
			n->free_slabs--;
2906 2907
	}

2908
	if (sk_memalloc_socks())
2909
		page = get_valid_first_slab(n, page, pfmemalloc);
2910

2911 2912 2913
	return page;
}

2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941
static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
				struct kmem_cache_node *n, gfp_t flags)
{
	struct page *page;
	void *obj;
	void *list = NULL;

	if (!gfp_pfmemalloc_allowed(flags))
		return NULL;

	spin_lock(&n->list_lock);
	page = get_first_slab(n, true);
	if (!page) {
		spin_unlock(&n->list_lock);
		return NULL;
	}

	obj = slab_get_obj(cachep, page);
	n->free_objects--;

	fixup_slab_list(cachep, n, page, &list);

	spin_unlock(&n->list_lock);
	fixup_objfreelist_debug(cachep, &list);

	return obj;
}

2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965
/*
 * Slab list should be fixed up by fixup_slab_list() for existing slab
 * or cache_grow_end() for new slab
 */
static __always_inline int alloc_block(struct kmem_cache *cachep,
		struct array_cache *ac, struct page *page, int batchcount)
{
	/*
	 * There must be at least one object available for
	 * allocation.
	 */
	BUG_ON(page->active >= cachep->num);

	while (page->active < cachep->num && batchcount--) {
		STATS_INC_ALLOCED(cachep);
		STATS_INC_ACTIVE(cachep);
		STATS_SET_HIGH(cachep);

		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
	}

	return batchcount;
}

2966
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2967 2968
{
	int batchcount;
2969
	struct kmem_cache_node *n;
2970
	struct array_cache *ac, *shared;
P
Pekka Enberg 已提交
2971
	int node;
2972
	void *list = NULL;
2973
	struct page *page;
P
Pekka Enberg 已提交
2974

L
Linus Torvalds 已提交
2975
	check_irq_off();
2976
	node = numa_mem_id();
2977

2978
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2979 2980
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2981 2982 2983 2984
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2985 2986 2987
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2988
	n = get_node(cachep, node);
2989

2990
	BUG_ON(ac->avail > 0 || !n);
2991 2992 2993 2994
	shared = READ_ONCE(n->shared);
	if (!n->free_objects && (!shared || !shared->avail))
		goto direct_grow;

2995
	spin_lock(&n->list_lock);
2996
	shared = READ_ONCE(n->shared);
L
Linus Torvalds 已提交
2997

2998
	/* See if we can refill from the shared array */
2999 3000
	if (shared && transfer_objects(ac, shared, batchcount)) {
		shared->touched = 1;
3001
		goto alloc_done;
3002
	}
3003

L
Linus Torvalds 已提交
3004 3005
	while (batchcount > 0) {
		/* Get slab alloc is to come from. */
3006
		page = get_first_slab(n, false);
3007 3008
		if (!page)
			goto must_grow;
L
Linus Torvalds 已提交
3009 3010

		check_spinlock_acquired(cachep);
3011

3012
		batchcount = alloc_block(cachep, ac, page, batchcount);
3013
		fixup_slab_list(cachep, n, page, &list);
L
Linus Torvalds 已提交
3014 3015
	}

A
Andrew Morton 已提交
3016
must_grow:
3017
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
3018
alloc_done:
3019
	spin_unlock(&n->list_lock);
3020
	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
3021

3022
direct_grow:
L
Linus Torvalds 已提交
3023
	if (unlikely(!ac->avail)) {
3024 3025 3026 3027 3028 3029 3030 3031
		/* Check if we can use obj in pfmemalloc slab */
		if (sk_memalloc_socks()) {
			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);

			if (obj)
				return obj;
		}

3032
		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3033

3034 3035 3036 3037
		/*
		 * cache_grow_begin() can reenable interrupts,
		 * then ac could change.
		 */
3038
		ac = cpu_cache_get(cachep);
3039 3040 3041
		if (!ac->avail && page)
			alloc_block(cachep, ac, page, batchcount);
		cache_grow_end(cachep, page);
3042

3043
		if (!ac->avail)
L
Linus Torvalds 已提交
3044 3045 3046
			return NULL;
	}
	ac->touched = 1;
3047

3048
	return ac->entry[--ac->avail];
L
Linus Torvalds 已提交
3049 3050
}

A
Andrew Morton 已提交
3051 3052
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
3053
{
3054
	might_sleep_if(gfpflags_allow_blocking(flags));
L
Linus Torvalds 已提交
3055 3056 3057
}

#if DEBUG
A
Andrew Morton 已提交
3058
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3059
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
3060
{
P
Pekka Enberg 已提交
3061
	if (!objp)
L
Linus Torvalds 已提交
3062
		return objp;
P
Pekka Enberg 已提交
3063
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
3064
		check_poison_obj(cachep, objp);
3065
		slab_kernel_map(cachep, objp, 1, 0);
L
Linus Torvalds 已提交
3066 3067 3068
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
3069
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
3070 3071

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
3072 3073
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
J
Joe Perches 已提交
3074
			slab_error(cachep, "double free, or memory outside object was overwritten");
3075
			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3076 3077
			       objp, *dbg_redzone1(cachep, objp),
			       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
3078 3079 3080 3081
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
3082

3083
	objp += obj_offset(cachep);
3084
	if (cachep->ctor && cachep->flags & SLAB_POISON)
3085
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
3086 3087
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3088
		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
3089
		       objp, (int)ARCH_SLAB_MINALIGN);
3090
	}
L
Linus Torvalds 已提交
3091 3092 3093 3094 3095 3096
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

3097
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3098
{
P
Pekka Enberg 已提交
3099
	void *objp;
L
Linus Torvalds 已提交
3100 3101
	struct array_cache *ac;

3102
	check_irq_off();
3103

3104
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3105 3106
	if (likely(ac->avail)) {
		ac->touched = 1;
3107
		objp = ac->entry[--ac->avail];
3108

3109 3110
		STATS_INC_ALLOCHIT(cachep);
		goto out;
L
Linus Torvalds 已提交
3111
	}
3112 3113

	STATS_INC_ALLOCMISS(cachep);
3114
	objp = cache_alloc_refill(cachep, flags);
3115 3116 3117 3118 3119 3120 3121
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
3122 3123 3124 3125 3126
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
3127 3128
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
3129 3130 3131
	return objp;
}

3132
#ifdef CONFIG_NUMA
3133
/*
3134
 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3135 3136 3137 3138 3139 3140 3141 3142
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

3143
	if (in_interrupt() || (flags & __GFP_THISNODE))
3144
		return NULL;
3145
	nid_alloc = nid_here = numa_mem_id();
3146
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3147
		nid_alloc = cpuset_slab_spread_node();
3148
	else if (current->mempolicy)
3149
		nid_alloc = mempolicy_slab_node();
3150
	if (nid_alloc != nid_here)
3151
		return ____cache_alloc_node(cachep, flags, nid_alloc);
3152 3153 3154
	return NULL;
}

3155 3156
/*
 * Fallback function if there was no memory available and no objects on a
3157
 * certain node and fall back is permitted. First we scan all the
3158
 * available node for available objects. If that fails then we
3159 3160 3161
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3162
 */
3163
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3164
{
3165
	struct zonelist *zonelist;
3166
	struct zoneref *z;
3167 3168
	struct zone *zone;
	enum zone_type high_zoneidx = gfp_zone(flags);
3169
	void *obj = NULL;
3170
	struct page *page;
3171
	int nid;
3172
	unsigned int cpuset_mems_cookie;
3173 3174 3175 3176

	if (flags & __GFP_THISNODE)
		return NULL;

3177
retry_cpuset:
3178
	cpuset_mems_cookie = read_mems_allowed_begin();
3179
	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3180

3181 3182 3183 3184 3185
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3186 3187
	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
		nid = zone_to_nid(zone);
3188

3189
		if (cpuset_zone_allowed(zone, flags) &&
3190 3191
			get_node(cache, nid) &&
			get_node(cache, nid)->free_objects) {
3192
				obj = ____cache_alloc_node(cache,
D
David Rientjes 已提交
3193
					gfp_exact_node(flags), nid);
3194 3195 3196
				if (obj)
					break;
		}
3197 3198
	}

3199
	if (!obj) {
3200 3201 3202 3203 3204 3205
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3206 3207 3208 3209
		page = cache_grow_begin(cache, flags, numa_mem_id());
		cache_grow_end(cache, page);
		if (page) {
			nid = page_to_nid(page);
3210 3211
			obj = ____cache_alloc_node(cache,
				gfp_exact_node(flags), nid);
3212

3213
			/*
3214 3215
			 * Another processor may allocate the objects in
			 * the slab since we are not holding any locks.
3216
			 */
3217 3218
			if (!obj)
				goto retry;
3219
		}
3220
	}
3221

3222
	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3223
		goto retry_cpuset;
3224 3225 3226
	return obj;
}

3227 3228
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3229
 */
3230
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3231
				int nodeid)
3232
{
3233
	struct page *page;
3234
	struct kmem_cache_node *n;
3235
	void *obj = NULL;
3236
	void *list = NULL;
P
Pekka Enberg 已提交
3237

3238
	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3239
	n = get_node(cachep, nodeid);
3240
	BUG_ON(!n);
P
Pekka Enberg 已提交
3241

3242
	check_irq_off();
3243
	spin_lock(&n->list_lock);
3244
	page = get_first_slab(n, false);
3245 3246
	if (!page)
		goto must_grow;
P
Pekka Enberg 已提交
3247 3248 3249 3250 3251 3252 3253

	check_spinlock_acquired_node(cachep, nodeid);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

3254
	BUG_ON(page->active == cachep->num);
P
Pekka Enberg 已提交
3255

3256
	obj = slab_get_obj(cachep, page);
3257
	n->free_objects--;
P
Pekka Enberg 已提交
3258

3259
	fixup_slab_list(cachep, n, page, &list);
3260

3261
	spin_unlock(&n->list_lock);
3262
	fixup_objfreelist_debug(cachep, &list);
3263
	return obj;
3264

A
Andrew Morton 已提交
3265
must_grow:
3266
	spin_unlock(&n->list_lock);
3267
	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3268 3269 3270 3271
	if (page) {
		/* This slab isn't counted yet so don't update free_objects */
		obj = slab_get_obj(cachep, page);
	}
3272
	cache_grow_end(cachep, page);
L
Linus Torvalds 已提交
3273

3274
	return obj ? obj : fallback_alloc(cachep, flags);
3275
}
3276 3277

static __always_inline void *
3278
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3279
		   unsigned long caller)
3280 3281 3282
{
	unsigned long save_flags;
	void *ptr;
3283
	int slab_node = numa_mem_id();
3284

3285
	flags &= gfp_allowed_mask;
3286 3287
	cachep = slab_pre_alloc_hook(cachep, flags);
	if (unlikely(!cachep))
3288 3289
		return NULL;

3290 3291 3292
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3293
	if (nodeid == NUMA_NO_NODE)
3294
		nodeid = slab_node;
3295

3296
	if (unlikely(!get_node(cachep, nodeid))) {
3297 3298 3299 3300 3301
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3302
	if (nodeid == slab_node) {
3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);

3319 3320
	if (unlikely(flags & __GFP_ZERO) && ptr)
		memset(ptr, 0, cachep->object_size);
3321

3322
	slab_post_alloc_hook(cachep, flags, 1, &ptr);
3323 3324 3325 3326 3327 3328 3329 3330
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

3331
	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3342 3343
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3359
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3360 3361 3362 3363
{
	unsigned long save_flags;
	void *objp;

3364
	flags &= gfp_allowed_mask;
3365 3366
	cachep = slab_pre_alloc_hook(cachep, flags);
	if (unlikely(!cachep))
3367 3368
		return NULL;

3369 3370 3371 3372 3373 3374 3375
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
	prefetchw(objp);

3376 3377
	if (unlikely(flags & __GFP_ZERO) && objp)
		memset(objp, 0, cachep->object_size);
3378

3379
	slab_post_alloc_hook(cachep, flags, 1, &objp);
3380 3381
	return objp;
}
3382 3383

/*
3384
 * Caller needs to acquire correct kmem_cache_node's list_lock
3385
 * @list: List of detached free slabs should be freed by caller
3386
 */
3387 3388
static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
L
Linus Torvalds 已提交
3389 3390
{
	int i;
3391
	struct kmem_cache_node *n = get_node(cachep, node);
3392 3393 3394
	struct page *page;

	n->free_objects += nr_objects;
L
Linus Torvalds 已提交
3395 3396

	for (i = 0; i < nr_objects; i++) {
3397
		void *objp;
3398
		struct page *page;
L
Linus Torvalds 已提交
3399

3400 3401
		objp = objpp[i];

3402 3403
		page = virt_to_head_page(objp);
		list_del(&page->lru);
3404
		check_spinlock_acquired_node(cachep, node);
3405
		slab_put_obj(cachep, page, objp);
L
Linus Torvalds 已提交
3406 3407 3408
		STATS_DEC_ACTIVE(cachep);

		/* fixup slab chains */
3409
		if (page->active == 0) {
3410
			list_add(&page->lru, &n->slabs_free);
3411 3412
			n->free_slabs++;
		} else {
L
Linus Torvalds 已提交
3413 3414 3415 3416
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3417
			list_add_tail(&page->lru, &n->slabs_partial);
L
Linus Torvalds 已提交
3418 3419
		}
	}
3420 3421 3422 3423 3424

	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
		n->free_objects -= cachep->num;

		page = list_last_entry(&n->slabs_free, struct page, lru);
3425
		list_move(&page->lru, list);
3426
		n->free_slabs--;
3427
		n->total_slabs--;
3428
	}
L
Linus Torvalds 已提交
3429 3430
}

3431
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3432 3433
{
	int batchcount;
3434
	struct kmem_cache_node *n;
3435
	int node = numa_mem_id();
3436
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3437 3438

	batchcount = ac->batchcount;
3439

L
Linus Torvalds 已提交
3440
	check_irq_off();
3441
	n = get_node(cachep, node);
3442 3443 3444
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3445
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3446 3447 3448
		if (max) {
			if (batchcount > max)
				batchcount = max;
3449
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3450
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3451 3452 3453 3454 3455
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3456
	free_block(cachep, ac->entry, batchcount, node, &list);
A
Andrew Morton 已提交
3457
free_done:
L
Linus Torvalds 已提交
3458 3459 3460
#if STATS
	{
		int i = 0;
3461
		struct page *page;
L
Linus Torvalds 已提交
3462

3463
		list_for_each_entry(page, &n->slabs_free, lru) {
3464
			BUG_ON(page->active);
L
Linus Torvalds 已提交
3465 3466 3467 3468 3469 3470

			i++;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3471
	spin_unlock(&n->list_lock);
3472
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3473
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3474
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
L
Linus Torvalds 已提交
3475 3476 3477
}

/*
A
Andrew Morton 已提交
3478 3479
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3480
 */
3481 3482
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
					 unsigned long caller)
L
Linus Torvalds 已提交
3483
{
3484
	/* Put the object into the quarantine, don't touch it for now. */
3485
	if (kasan_slab_free(cachep, objp, _RET_IP_))
3486 3487 3488 3489
		return;

	___cache_free(cachep, objp, caller);
}
L
Linus Torvalds 已提交
3490

3491 3492 3493 3494
void ___cache_free(struct kmem_cache *cachep, void *objp,
		unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);
A
Alexander Potapenko 已提交
3495

L
Linus Torvalds 已提交
3496
	check_irq_off();
3497
	kmemleak_free_recursive(objp, cachep->flags);
3498
	objp = cache_free_debugcheck(cachep, objp, caller);
L
Linus Torvalds 已提交
3499

3500 3501 3502 3503 3504 3505 3506
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3507
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3508 3509
		return;

3510
	if (ac->avail < ac->limit) {
L
Linus Torvalds 已提交
3511 3512 3513 3514 3515
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3516

3517 3518 3519 3520 3521 3522 3523 3524 3525 3526
	if (sk_memalloc_socks()) {
		struct page *page = virt_to_head_page(objp);

		if (unlikely(PageSlabPfmemalloc(page))) {
			cache_free_pfmemalloc(cachep, page, objp);
			return;
		}
	}

	ac->entry[ac->avail++] = objp;
L
Linus Torvalds 已提交
3527 3528 3529 3530 3531 3532 3533 3534 3535 3536
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
 */
3537
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3538
{
3539
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3540

3541
	kasan_slab_alloc(cachep, ret, flags);
3542
	trace_kmem_cache_alloc(_RET_IP_, ret,
3543
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3544 3545

	return ret;
L
Linus Torvalds 已提交
3546 3547 3548
}
EXPORT_SYMBOL(kmem_cache_alloc);

3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
static __always_inline void
cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
				  size_t size, void **p, unsigned long caller)
{
	size_t i;

	for (i = 0; i < size; i++)
		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
}

3559
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3560
			  void **p)
3561
{
3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579
	size_t i;

	s = slab_pre_alloc_hook(s, flags);
	if (!s)
		return 0;

	cache_alloc_debugcheck_before(s, flags);

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = __do_cache_alloc(s, flags);

		if (unlikely(!objp))
			goto error;
		p[i] = objp;
	}
	local_irq_enable();

3580 3581
	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);

3582 3583 3584 3585 3586 3587 3588 3589 3590 3591
	/* Clear memory outside IRQ disabled section */
	if (unlikely(flags & __GFP_ZERO))
		for (i = 0; i < size; i++)
			memset(p[i], 0, s->object_size);

	slab_post_alloc_hook(s, flags, size, p);
	/* FIXME: Trace call missing. Christoph would like a bulk variant */
	return size;
error:
	local_irq_enable();
3592
	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3593 3594 3595
	slab_post_alloc_hook(s, flags, i, p);
	__kmem_cache_free_bulk(s, i, p);
	return 0;
3596 3597 3598
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

3599
#ifdef CONFIG_TRACING
3600
void *
3601
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3602
{
3603 3604
	void *ret;

3605
	ret = slab_alloc(cachep, flags, _RET_IP_);
3606

3607
	kasan_kmalloc(cachep, ret, size, flags);
3608
	trace_kmalloc(_RET_IP_, ret,
3609
		      size, cachep->size, flags);
3610
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3611
}
3612
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3613 3614
#endif

L
Linus Torvalds 已提交
3615
#ifdef CONFIG_NUMA
3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
 */
3627 3628
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3629
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3630

3631
	kasan_slab_alloc(cachep, ret, flags);
3632
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3633
				    cachep->object_size, cachep->size,
3634
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3635 3636

	return ret;
3637
}
L
Linus Torvalds 已提交
3638 3639
EXPORT_SYMBOL(kmem_cache_alloc_node);

3640
#ifdef CONFIG_TRACING
3641
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3642
				  gfp_t flags,
3643 3644
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3645
{
3646 3647
	void *ret;

3648
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3649 3650

	kasan_kmalloc(cachep, ret, size, flags);
3651
	trace_kmalloc_node(_RET_IP_, ret,
3652
			   size, cachep->size,
3653 3654
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3655
}
3656
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3657 3658
#endif

3659
static __always_inline void *
3660
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3661
{
3662
	struct kmem_cache *cachep;
A
Alexander Potapenko 已提交
3663
	void *ret;
3664

3665
	cachep = kmalloc_slab(size, flags);
3666 3667
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
A
Alexander Potapenko 已提交
3668
	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3669
	kasan_kmalloc(cachep, ret, size, flags);
A
Alexander Potapenko 已提交
3670 3671

	return ret;
3672
}
3673 3674 3675

void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3676
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3677
}
3678
EXPORT_SYMBOL(__kmalloc_node);
3679 3680

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3681
		int node, unsigned long caller)
3682
{
3683
	return __do_kmalloc_node(size, flags, node, caller);
3684 3685 3686
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3687 3688

/**
3689
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3690
 * @size: how many bytes of memory are required.
3691
 * @flags: the type of memory to allocate (see kmalloc).
3692
 * @caller: function caller for debug tracking of the caller
L
Linus Torvalds 已提交
3693
 */
3694
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3695
					  unsigned long caller)
L
Linus Torvalds 已提交
3696
{
3697
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3698
	void *ret;
L
Linus Torvalds 已提交
3699

3700
	cachep = kmalloc_slab(size, flags);
3701 3702
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3703
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3704

3705
	kasan_kmalloc(cachep, ret, size, flags);
3706
	trace_kmalloc(caller, ret,
3707
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3708 3709

	return ret;
3710 3711 3712 3713
}

void *__kmalloc(size_t size, gfp_t flags)
{
3714
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3715 3716 3717
}
EXPORT_SYMBOL(__kmalloc);

3718
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3719
{
3720
	return __do_kmalloc(size, flags, caller);
3721 3722
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3723

L
Linus Torvalds 已提交
3724 3725 3726 3727 3728 3729 3730 3731
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3732
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3733 3734
{
	unsigned long flags;
3735 3736 3737
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3738 3739

	local_irq_save(flags);
3740
	debug_check_no_locks_freed(objp, cachep->object_size);
3741
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3742
		debug_check_no_obj_freed(objp, cachep->object_size);
3743
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3744
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3745

3746
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3747 3748 3749
}
EXPORT_SYMBOL(kmem_cache_free);

3750 3751 3752 3753 3754 3755 3756 3757 3758
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
{
	struct kmem_cache *s;
	size_t i;

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = p[i];

3759 3760 3761 3762
		if (!orig_s) /* called via kfree_bulk */
			s = virt_to_cache(objp);
		else
			s = cache_from_obj(orig_s, objp);
3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775

		debug_check_no_locks_freed(objp, s->object_size);
		if (!(s->flags & SLAB_DEBUG_OBJECTS))
			debug_check_no_obj_freed(objp, s->object_size);

		__cache_free(s, objp, _RET_IP_);
	}
	local_irq_enable();

	/* FIXME: add tracing */
}
EXPORT_SYMBOL(kmem_cache_free_bulk);

L
Linus Torvalds 已提交
3776 3777 3778 3779
/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3780 3781
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3782 3783 3784 3785 3786
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3787
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3788 3789
	unsigned long flags;

3790 3791
	trace_kfree(_RET_IP_, objp);

3792
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3793 3794 3795
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3796
	c = virt_to_cache(objp);
3797 3798 3799
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3800
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3801 3802 3803 3804
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3805
/*
3806
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3807
 */
3808
static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3809
{
3810
	int ret;
3811
	int node;
3812
	struct kmem_cache_node *n;
3813

3814
	for_each_online_node(node) {
3815 3816
		ret = setup_kmem_cache_node(cachep, node, gfp, true);
		if (ret)
3817 3818 3819
			goto fail;

	}
3820

3821
	return 0;
3822

A
Andrew Morton 已提交
3823
fail:
3824
	if (!cachep->list.next) {
3825 3826 3827
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3828 3829
			n = get_node(cachep, node);
			if (n) {
3830 3831 3832
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3833
				cachep->node[node] = NULL;
3834 3835 3836 3837
			}
			node--;
		}
	}
3838
	return -ENOMEM;
3839 3840
}

3841
/* Always called with the slab_mutex held */
G
Glauber Costa 已提交
3842
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3843
				int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3844
{
3845 3846
	struct array_cache __percpu *cpu_cache, *prev;
	int cpu;
L
Linus Torvalds 已提交
3847

3848 3849
	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
	if (!cpu_cache)
3850 3851
		return -ENOMEM;

3852 3853
	prev = cachep->cpu_cache;
	cachep->cpu_cache = cpu_cache;
3854 3855 3856 3857 3858 3859
	/*
	 * Without a previous cpu_cache there's no need to synchronize remote
	 * cpus, so skip the IPIs.
	 */
	if (prev)
		kick_all_cpus_sync();
3860

L
Linus Torvalds 已提交
3861 3862 3863
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3864
	cachep->shared = shared;
L
Linus Torvalds 已提交
3865

3866
	if (!prev)
3867
		goto setup_node;
3868 3869

	for_each_online_cpu(cpu) {
3870
		LIST_HEAD(list);
3871 3872
		int node;
		struct kmem_cache_node *n;
3873
		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3874

3875
		node = cpu_to_mem(cpu);
3876 3877
		n = get_node(cachep, node);
		spin_lock_irq(&n->list_lock);
3878
		free_block(cachep, ac->entry, ac->avail, node, &list);
3879
		spin_unlock_irq(&n->list_lock);
3880
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3881
	}
3882 3883
	free_percpu(prev);

3884 3885
setup_node:
	return setup_kmem_cache_nodes(cachep, gfp);
L
Linus Torvalds 已提交
3886 3887
}

G
Glauber Costa 已提交
3888 3889 3890 3891
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
				int batchcount, int shared, gfp_t gfp)
{
	int ret;
3892
	struct kmem_cache *c;
G
Glauber Costa 已提交
3893 3894 3895 3896 3897 3898 3899 3900 3901

	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);

	if (slab_state < FULL)
		return ret;

	if ((ret < 0) || !is_root_cache(cachep))
		return ret;

3902 3903 3904 3905
	lockdep_assert_held(&slab_mutex);
	for_each_memcg_cache(c, cachep) {
		/* return value determined by the root cache only */
		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
G
Glauber Costa 已提交
3906 3907 3908 3909 3910
	}

	return ret;
}

3911
/* Called with slab_mutex held always */
3912
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3913 3914
{
	int err;
G
Glauber Costa 已提交
3915 3916 3917 3918
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

3919
	err = cache_random_seq_create(cachep, cachep->num, gfp);
T
Thomas Garnier 已提交
3920 3921 3922
	if (err)
		goto end;

G
Glauber Costa 已提交
3923 3924 3925 3926 3927 3928
	if (!is_root_cache(cachep)) {
		struct kmem_cache *root = memcg_root_cache(cachep);
		limit = root->limit;
		shared = root->shared;
		batchcount = root->batchcount;
	}
L
Linus Torvalds 已提交
3929

G
Glauber Costa 已提交
3930 3931
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3932 3933
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3934 3935
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3936
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3937 3938 3939 3940
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3941
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3942
		limit = 1;
3943
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3944
		limit = 8;
3945
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3946
		limit = 24;
3947
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3948 3949 3950 3951
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3952 3953
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3954 3955 3956 3957 3958 3959 3960 3961
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
3962
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
3963 3964 3965
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
3966 3967 3968
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
3969 3970 3971 3972
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
3973 3974 3975
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
T
Thomas Garnier 已提交
3976
end:
L
Linus Torvalds 已提交
3977
	if (err)
3978
		pr_err("enable_cpucache failed for %s, error %d\n",
P
Pekka Enberg 已提交
3979
		       cachep->name, -err);
3980
	return err;
L
Linus Torvalds 已提交
3981 3982
}

3983
/*
3984 3985
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
3986
 * if drain_array() is used on the shared array.
3987
 */
3988
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3989
			 struct array_cache *ac, int node)
L
Linus Torvalds 已提交
3990
{
3991
	LIST_HEAD(list);
3992 3993 3994

	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
	check_mutex_acquired();
L
Linus Torvalds 已提交
3995

3996 3997
	if (!ac || !ac->avail)
		return;
3998 3999

	if (ac->touched) {
L
Linus Torvalds 已提交
4000
		ac->touched = 0;
4001
		return;
L
Linus Torvalds 已提交
4002
	}
4003 4004 4005 4006 4007 4008

	spin_lock_irq(&n->list_lock);
	drain_array_locked(cachep, ac, node, false, &list);
	spin_unlock_irq(&n->list_lock);

	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
4009 4010 4011 4012
}

/**
 * cache_reap - Reclaim memory from caches.
4013
 * @w: work descriptor
L
Linus Torvalds 已提交
4014 4015 4016 4017 4018 4019
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
4020 4021
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
4022
 */
4023
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
4024
{
4025
	struct kmem_cache *searchp;
4026
	struct kmem_cache_node *n;
4027
	int node = numa_mem_id();
4028
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
4029

4030
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
4031
		/* Give up. Setup the next iteration. */
4032
		goto out;
L
Linus Torvalds 已提交
4033

4034
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
4035 4036
		check_irq_on();

4037
		/*
4038
		 * We only take the node lock if absolutely necessary and we
4039 4040 4041
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
4042
		n = get_node(searchp, node);
4043

4044
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
4045

4046
		drain_array(searchp, n, cpu_cache_get(searchp), node);
L
Linus Torvalds 已提交
4047

4048 4049 4050 4051
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
4052
		if (time_after(n->next_reap, jiffies))
4053
			goto next;
L
Linus Torvalds 已提交
4054

4055
		n->next_reap = jiffies + REAPTIMEOUT_NODE;
L
Linus Torvalds 已提交
4056

4057
		drain_array(searchp, n, n->shared, node);
L
Linus Torvalds 已提交
4058

4059 4060
		if (n->free_touched)
			n->free_touched = 0;
4061 4062
		else {
			int freed;
L
Linus Torvalds 已提交
4063

4064
			freed = drain_freelist(searchp, n, (n->free_limit +
4065 4066 4067
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
4068
next:
L
Linus Torvalds 已提交
4069 4070 4071
		cond_resched();
	}
	check_irq_on();
4072
	mutex_unlock(&slab_mutex);
4073
	next_reap_node();
4074
out:
A
Andrew Morton 已提交
4075
	/* Set up the next iteration */
4076
	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
L
Linus Torvalds 已提交
4077 4078
}

4079
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
4080
{
4081
	unsigned long active_objs, num_objs, active_slabs;
4082 4083
	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
	unsigned long free_slabs = 0;
4084
	int node;
4085
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
4086

4087
	for_each_kmem_cache_node(cachep, node, n) {
4088
		check_irq_on();
4089
		spin_lock_irq(&n->list_lock);
4090

4091 4092
		total_slabs += n->total_slabs;
		free_slabs += n->free_slabs;
4093
		free_objs += n->free_objects;
4094

4095 4096
		if (n->shared)
			shared_avail += n->shared->avail;
4097

4098
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4099
	}
4100 4101
	num_objs = total_slabs * cachep->num;
	active_slabs = total_slabs - free_slabs;
4102
	active_objs = num_objs - free_objs;
L
Linus Torvalds 已提交
4103

4104 4105 4106
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
4107
	sinfo->num_slabs = total_slabs;
4108 4109 4110 4111 4112 4113 4114 4115 4116 4117
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
4118
#if STATS
4119
	{			/* node stats */
L
Linus Torvalds 已提交
4120 4121 4122 4123 4124 4125 4126
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
4127
		unsigned long node_frees = cachep->node_frees;
4128
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
4129

J
Joe Perches 已提交
4130
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
J
Joe Perches 已提交
4131 4132 4133
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
4134 4135 4136 4137 4138 4139 4140 4141 4142
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4143
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
 */
4156
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4157
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4158
{
P
Pekka Enberg 已提交
4159
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4160
	int limit, batchcount, shared, res;
4161
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4162

L
Linus Torvalds 已提交
4163 4164 4165 4166
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4167
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4168 4169 4170 4171 4172 4173 4174 4175 4176 4177

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4178
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4179
	res = -EINVAL;
4180
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4181
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4182 4183
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4184
				res = 0;
L
Linus Torvalds 已提交
4185
			} else {
4186
				res = do_tune_cpucache(cachep, limit,
4187 4188
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4189 4190 4191 4192
			}
			break;
		}
	}
4193
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4194 4195 4196 4197
	if (res >= 0)
		res = count;
	return res;
}
4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230

#ifdef CONFIG_DEBUG_SLAB_LEAK

static inline int add_caller(unsigned long *n, unsigned long v)
{
	unsigned long *p;
	int l;
	if (!v)
		return 1;
	l = n[1];
	p = n + 2;
	while (l) {
		int i = l/2;
		unsigned long *q = p + 2 * i;
		if (*q == v) {
			q[1]++;
			return 1;
		}
		if (*q > v) {
			l = i;
		} else {
			p = q + 2;
			l -= i + 1;
		}
	}
	if (++n[1] == n[0])
		return 0;
	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
	p[0] = v;
	p[1] = 1;
	return 1;
}

4231 4232
static void handle_slab(unsigned long *n, struct kmem_cache *c,
						struct page *page)
4233 4234
{
	void *p;
4235 4236
	int i, j;
	unsigned long v;
4237

4238 4239
	if (n[0] == n[1])
		return;
4240
	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4241 4242 4243 4244 4245 4246 4247 4248 4249 4250
		bool active = true;

		for (j = page->active; j < c->num; j++) {
			if (get_free_obj(page, j) == i) {
				active = false;
				break;
			}
		}

		if (!active)
4251
			continue;
4252

4253 4254 4255 4256 4257 4258 4259 4260 4261 4262
		/*
		 * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
		 * mapping is established when actual object allocation and
		 * we could mistakenly access the unmapped object in the cpu
		 * cache.
		 */
		if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
			continue;

		if (!add_caller(n, v))
4263 4264 4265 4266 4267 4268 4269 4270
			return;
	}
}

static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
	unsigned long offset, size;
4271
	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4272

4273
	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4274
		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4275
		if (modname[0])
4276 4277 4278 4279
			seq_printf(m, " [%s]", modname);
		return;
	}
#endif
4280
	seq_printf(m, "%px", (void *)address);
4281 4282 4283 4284
}

static int leaks_show(struct seq_file *m, void *p)
{
4285
	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4286
	struct page *page;
4287
	struct kmem_cache_node *n;
4288
	const char *name;
4289
	unsigned long *x = m->private;
4290 4291 4292 4293 4294 4295 4296 4297
	int node;
	int i;

	if (!(cachep->flags & SLAB_STORE_USER))
		return 0;
	if (!(cachep->flags & SLAB_RED_ZONE))
		return 0;

4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308
	/*
	 * Set store_user_clean and start to grab stored user information
	 * for all objects on this cache. If some alloc/free requests comes
	 * during the processing, information would be wrong so restart
	 * whole processing.
	 */
	do {
		set_store_user_clean(cachep);
		drain_cpu_caches(cachep);

		x[1] = 0;
4309

4310
		for_each_kmem_cache_node(cachep, node, n) {
4311

4312 4313
			check_irq_on();
			spin_lock_irq(&n->list_lock);
4314

4315 4316 4317 4318 4319 4320 4321
			list_for_each_entry(page, &n->slabs_full, lru)
				handle_slab(x, cachep, page);
			list_for_each_entry(page, &n->slabs_partial, lru)
				handle_slab(x, cachep, page);
			spin_unlock_irq(&n->list_lock);
		}
	} while (!is_store_user_clean(cachep));
4322 4323

	name = cachep->name;
4324
	if (x[0] == x[1]) {
4325
		/* Increase the buffer size */
4326
		mutex_unlock(&slab_mutex);
4327
		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4328 4329
		if (!m->private) {
			/* Too bad, we are really out */
4330
			m->private = x;
4331
			mutex_lock(&slab_mutex);
4332 4333
			return -ENOMEM;
		}
4334 4335
		*(unsigned long *)m->private = x[0] * 2;
		kfree(x);
4336
		mutex_lock(&slab_mutex);
4337 4338 4339 4340
		/* Now make sure this entry will be retried */
		m->count = m->size;
		return 0;
	}
4341 4342 4343
	for (i = 0; i < x[1]; i++) {
		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
		show_symbol(m, x[2*i+2]);
4344 4345
		seq_putc(m, '\n');
	}
4346

4347 4348 4349
	return 0;
}

4350
static const struct seq_operations slabstats_op = {
4351
	.start = slab_start,
4352 4353
	.next = slab_next,
	.stop = slab_stop,
4354 4355
	.show = leaks_show,
};
4356 4357 4358

static int slabstats_open(struct inode *inode, struct file *file)
{
4359 4360 4361 4362 4363 4364 4365 4366 4367
	unsigned long *n;

	n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
	if (!n)
		return -ENOMEM;

	*n = PAGE_SIZE / (2 * sizeof(unsigned long));

	return 0;
4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381
}

static const struct file_operations proc_slabstats_operations = {
	.open		= slabstats_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};
#endif

static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4382
#endif
4383 4384 4385
	return 0;
}
module_init(slab_proc_init);
L
Linus Torvalds 已提交
4386

K
Kees Cook 已提交
4387 4388
#ifdef CONFIG_HARDENED_USERCOPY
/*
4389 4390 4391
 * Rejects incorrectly sized objects and objects that are to be copied
 * to/from userspace but do not fall entirely within the containing slab
 * cache's usercopy region.
K
Kees Cook 已提交
4392 4393 4394 4395
 *
 * Returns NULL if check passes, otherwise const char * to name of cache
 * to indicate an error.
 */
4396 4397
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
			 bool to_user)
K
Kees Cook 已提交
4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410
{
	struct kmem_cache *cachep;
	unsigned int objnr;
	unsigned long offset;

	/* Find and validate object. */
	cachep = page->slab_cache;
	objnr = obj_to_index(cachep, page, (void *)ptr);
	BUG_ON(objnr >= cachep->num);

	/* Find offset within object. */
	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);

4411 4412 4413 4414
	/* Allow address range falling entirely within usercopy region. */
	if (offset >= cachep->useroffset &&
	    offset - cachep->useroffset <= cachep->usersize &&
	    n <= cachep->useroffset - offset + cachep->usersize)
4415
		return;
K
Kees Cook 已提交
4416

4417 4418 4419 4420 4421 4422
	/*
	 * If the copy is still within the allocated object, produce
	 * a warning instead of rejecting the copy. This is intended
	 * to be a temporary method to find any missing usercopy
	 * whitelists.
	 */
4423 4424
	if (usercopy_fallback &&
	    offset <= cachep->object_size &&
4425 4426 4427 4428
	    n <= cachep->object_size - offset) {
		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
		return;
	}
K
Kees Cook 已提交
4429

4430
	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
K
Kees Cook 已提交
4431 4432 4433
}
#endif /* CONFIG_HARDENED_USERCOPY */

4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445
/**
 * ksize - get the actual amount of memory allocated for a given object
 * @objp: Pointer to the object
 *
 * kmalloc may internally round up allocations and return more memory
 * than requested. ksize() can be used to determine the actual amount of
 * memory allocated. The caller may use this additional memory, even though
 * a smaller amount of memory was initially specified with the kmalloc call.
 * The caller must guarantee that objp points to a valid object previously
 * allocated with either kmalloc() or kmem_cache_alloc(). The object
 * must not be freed during the duration of the call.
 */
P
Pekka Enberg 已提交
4446
size_t ksize(const void *objp)
L
Linus Torvalds 已提交
4447
{
A
Alexander Potapenko 已提交
4448 4449
	size_t size;

4450 4451
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4452
		return 0;
L
Linus Torvalds 已提交
4453

A
Alexander Potapenko 已提交
4454 4455 4456 4457
	size = virt_to_cache(objp)->object_size;
	/* We assume that ksize callers could use the whole allocated area,
	 * so we need to unpoison this area.
	 */
4458
	kasan_unpoison_shadow(objp, size);
A
Alexander Potapenko 已提交
4459 4460

	return size;
L
Linus Torvalds 已提交
4461
}
K
Kirill A. Shutemov 已提交
4462
EXPORT_SYMBOL(ksize);