slab.c 105.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
30
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
54
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
55 56 57 58
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
59
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70 71
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
72
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
73 74 75 76 77 78
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
79 80 81 82 83 84 85 86 87
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
88 89 90 91
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
92
#include	<linux/poison.h>
L
Linus Torvalds 已提交
93 94 95 96 97
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
98
#include	<linux/cpuset.h>
99
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
107
#include	<linux/string.h>
108
#include	<linux/uaccess.h>
109
#include	<linux/nodemask.h>
110
#include	<linux/kmemleak.h>
111
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
112
#include	<linux/mutex.h>
113
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
114
#include	<linux/rtmutex.h>
115
#include	<linux/reciprocal_div.h>
116
#include	<linux/debugobjects.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
119
#include	<linux/sched/task_stack.h>
L
Linus Torvalds 已提交
120

121 122
#include	<net/sock.h>

L
Linus Torvalds 已提交
123 124 125 126
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

127 128
#include <trace/events/kmem.h>

129 130
#include	"internal.h"

131 132
#include	"slab.h"

L
Linus Torvalds 已提交
133
/*
134
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
155
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
156 157 158 159 160

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

161 162 163 164 165 166 167 168 169
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

170
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171

L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
189
	void *entry[];	/*
A
Andrew Morton 已提交
190 191 192 193
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
L
Linus Torvalds 已提交
194 195
};

J
Joonsoo Kim 已提交
196 197 198 199 200
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

201 202 203
/*
 * Need this for bootstrapping a per node allocator.
 */
204
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206
#define	CACHE_CACHE 0
207
#define	SIZE_NODE (MAX_NUMNODES)
208

209
static int drain_freelist(struct kmem_cache *cache,
210
			struct kmem_cache_node *n, int tofree);
211
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 213
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215
static void cache_reap(struct work_struct *unused);
216

217 218 219 220 221
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
222 223
static int slab_early_init = 1;

224
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
225

226
static void kmem_cache_node_init(struct kmem_cache_node *parent)
227 228 229 230
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
231
	parent->total_slabs = 0;
232
	parent->free_slabs = 0;
233 234
	parent->shared = NULL;
	parent->alien = NULL;
235
	parent->colour_next = 0;
236 237 238 239 240
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
241 242 243
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
244
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
245 246
	} while (0)

A
Andrew Morton 已提交
247 248
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
249 250 251 252
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
253

254 255
#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
256
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
L
Linus Torvalds 已提交
257 258 259
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
260 261 262
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
263
 *
A
Adrian Bunk 已提交
264
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
265 266
 * which could lock up otherwise freeable slabs.
 */
267 268
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
L
Linus Torvalds 已提交
269 270 271 272 273 274

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
275
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
276 277 278 279 280
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
281 282
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
283
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
284
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
285 286 287 288 289
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
299
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
300 301 302
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
303
#define	STATS_INC_NODEFREES(x)	do { } while (0)
304
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
305
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
306 307 308 309 310 311 312 313
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
314 315
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
316
 * 0		: objp
317
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
318 319
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
320
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
321
 * 		redzone word.
322
 * cachep->obj_offset: The real object.
323 324
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
325
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
326
 */
327
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
328
{
329
	return cachep->obj_offset;
L
Linus Torvalds 已提交
330 331
}

332
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
333 334
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 336
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
337 338
}

339
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
340 341 342
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
343
		return (unsigned long long *)(objp + cachep->size -
344
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
345
					      REDZONE_ALIGN);
346
	return (unsigned long long *) (objp + cachep->size -
347
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
348 349
}

350
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
351 352
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
354 355 356 357
}

#else

358
#define obj_offset(x)			0
359 360
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
361 362 363 364 365
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
366 367
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
368
 */
369 370 371
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
372
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
373

374
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
375 376
				 unsigned int idx)
{
377
	return page->s_mem + cache->size * idx;
378 379
}

380
#define BOOT_CPUCACHE_ENTRIES	1
L
Linus Torvalds 已提交
381
/* internal cache of cache description objs */
382
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
383 384 385
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
386
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
387
	.name = "kmem_cache",
L
Linus Torvalds 已提交
388 389
};

390
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
391

392
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
393
{
394
	return this_cpu_ptr(cachep->cpu_cache);
L
Linus Torvalds 已提交
395 396
}

A
Andrew Morton 已提交
397 398 399
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
400
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
401
		slab_flags_t flags, size_t *left_over)
402
{
403
	unsigned int num;
404
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
405

406 407 408 409 410 411
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
412 413 414 415 416
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
417 418 419 420 421 422
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
423
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
424
		num = slab_size / buffer_size;
425
		*left_over = slab_size % buffer_size;
426
	} else {
427
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
428 429
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
430
	}
431 432

	return num;
L
Linus Torvalds 已提交
433 434
}

435
#if DEBUG
436
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
437

A
Andrew Morton 已提交
438 439
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
440
{
441
	pr_err("slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
442
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
443
	dump_stack();
444
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
445
}
446
#endif
L
Linus Torvalds 已提交
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

464 465 466 467 468 469 470 471 472 473 474
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

475 476 477 478 479 480 481
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
482
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
483 484 485

static void init_reap_node(int cpu)
{
486 487
	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
						    node_online_map);
488 489 490 491
}

static void next_reap_node(void)
{
492
	int node = __this_cpu_read(slab_reap_node);
493

494
	node = next_node_in(node, node_online_map);
495
	__this_cpu_write(slab_reap_node, node);
496 497 498 499 500 501 502
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
503 504 505 506 507 508 509
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
510
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
511
{
512
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
513

514
	if (reap_work->work.func == NULL) {
515
		init_reap_node(cpu);
516
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
517 518
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
519 520 521
	}
}

522
static void init_arraycache(struct array_cache *ac, int limit, int batch)
L
Linus Torvalds 已提交
523
{
524 525 526 527 528
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
L
Linus Torvalds 已提交
529
	}
530 531 532 533 534
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
535
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
536 537 538
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
539 540 541 542 543 544 545 546
	/*
	 * The array_cache structures contain pointers to free object.
	 * However, when such objects are allocated or transferred to another
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
	kmemleak_no_scan(ac);
547 548
	init_arraycache(ac, entries, batchcount);
	return ac;
L
Linus Torvalds 已提交
549 550
}

551 552
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
553
{
554 555 556
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
557

558 559
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
560

561 562 563
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
564

565
	slabs_destroy(cachep, &list);
566 567
}

568 569 570 571 572 573 574 575 576 577
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
578
	int nr = min3(from->avail, max, to->limit - to->avail);
579 580 581 582 583 584 585 586 587 588 589 590

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

591 592 593 594 595 596 597 598 599 600
/* &alien->lock must be held by alien callers. */
static __always_inline void __free_one(struct array_cache *ac, void *objp)
{
	/* Avoid trivial double-free. */
	if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
	    WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
		return;
	ac->entry[ac->avail++] = objp;
}

601 602 603
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
604
#define reap_alien(cachep, n) do { } while (0)
605

J
Joonsoo Kim 已提交
606 607
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
608
{
609
	return NULL;
610 611
}

J
Joonsoo Kim 已提交
612
static inline void free_alien_cache(struct alien_cache **ac_ptr)
613 614 615 616 617 618 619 620 621 622 623 624 625 626
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

627
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
628 629 630 631 632
		 gfp_t flags, int nodeid)
{
	return NULL;
}

D
David Rientjes 已提交
633 634
static inline gfp_t gfp_exact_node(gfp_t flags)
{
635
	return flags & ~__GFP_NOFAIL;
D
David Rientjes 已提交
636 637
}

638 639
#else	/* CONFIG_NUMA */

640
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
641
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
642

J
Joonsoo Kim 已提交
643 644 645
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
646
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
J
Joonsoo Kim 已提交
647 648 649
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
650
	if (alc) {
651
		kmemleak_no_scan(alc);
652 653 654
		init_arraycache(&alc->ac, entries, batch);
		spin_lock_init(&alc->lock);
	}
J
Joonsoo Kim 已提交
655 656 657 658
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
659
{
J
Joonsoo Kim 已提交
660
	struct alien_cache **alc_ptr;
661 662 663 664
	int i;

	if (limit > 1)
		limit = 12;
665
	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
J
Joonsoo Kim 已提交
666 667 668 669 670 671 672 673 674 675 676 677
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
678 679
		}
	}
J
Joonsoo Kim 已提交
680
	return alc_ptr;
681 682
}

J
Joonsoo Kim 已提交
683
static void free_alien_cache(struct alien_cache **alc_ptr)
684 685 686
{
	int i;

J
Joonsoo Kim 已提交
687
	if (!alc_ptr)
688 689
		return;
	for_each_node(i)
J
Joonsoo Kim 已提交
690 691
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
692 693
}

694
static void __drain_alien_cache(struct kmem_cache *cachep,
695 696
				struct array_cache *ac, int node,
				struct list_head *list)
697
{
698
	struct kmem_cache_node *n = get_node(cachep, node);
699 700

	if (ac->avail) {
701
		spin_lock(&n->list_lock);
702 703 704 705 706
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
707 708
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
709

710
		free_block(cachep, ac->entry, ac->avail, node, list);
711
		ac->avail = 0;
712
		spin_unlock(&n->list_lock);
713 714 715
	}
}

716 717 718
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
719
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
720
{
721
	int node = __this_cpu_read(slab_reap_node);
722

723
	if (n->alien) {
J
Joonsoo Kim 已提交
724 725 726 727 728
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
729
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
730 731 732
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
733
				spin_unlock_irq(&alc->lock);
734
				slabs_destroy(cachep, &list);
J
Joonsoo Kim 已提交
735
			}
736 737 738 739
		}
	}
}

A
Andrew Morton 已提交
740
static void drain_alien_cache(struct kmem_cache *cachep,
J
Joonsoo Kim 已提交
741
				struct alien_cache **alien)
742
{
P
Pekka Enberg 已提交
743
	int i = 0;
J
Joonsoo Kim 已提交
744
	struct alien_cache *alc;
745 746 747 748
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
J
Joonsoo Kim 已提交
749 750
		alc = alien[i];
		if (alc) {
751 752
			LIST_HEAD(list);

J
Joonsoo Kim 已提交
753
			ac = &alc->ac;
754
			spin_lock_irqsave(&alc->lock, flags);
755
			__drain_alien_cache(cachep, ac, i, &list);
756
			spin_unlock_irqrestore(&alc->lock, flags);
757
			slabs_destroy(cachep, &list);
758 759 760
		}
	}
}
761

762 763
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
764
{
765
	struct kmem_cache_node *n;
J
Joonsoo Kim 已提交
766 767
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
768
	LIST_HEAD(list);
P
Pekka Enberg 已提交
769

770
	n = get_node(cachep, node);
771
	STATS_INC_NODEFREES(cachep);
772 773
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
J
Joonsoo Kim 已提交
774
		ac = &alien->ac;
775
		spin_lock(&alien->lock);
J
Joonsoo Kim 已提交
776
		if (unlikely(ac->avail == ac->limit)) {
777
			STATS_INC_ACOVERFLOW(cachep);
778
			__drain_alien_cache(cachep, ac, page_node, &list);
779
		}
780
		__free_one(ac, objp);
781
		spin_unlock(&alien->lock);
782
		slabs_destroy(cachep, &list);
783
	} else {
784
		n = get_node(cachep, page_node);
785
		spin_lock(&n->list_lock);
786
		free_block(cachep, &objp, 1, page_node, &list);
787
		spin_unlock(&n->list_lock);
788
		slabs_destroy(cachep, &list);
789 790 791
	}
	return 1;
}
792 793 794 795 796 797 798 799 800 801 802 803 804 805

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
D
David Rientjes 已提交
806 807

/*
808 809
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
D
David Rientjes 已提交
810 811 812
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
813
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
D
David Rientjes 已提交
814
}
815 816
#endif

817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

857
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
858
/*
859
 * Allocates and initializes node for a node on each slab cache, used for
860
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
861
 * will be allocated off-node since memory is not yet online for the new node.
862
 * When hotplugging memory or a cpu, existing node are not replaced if
863 864
 * already in use.
 *
865
 * Must hold slab_mutex.
866
 */
867
static int init_cache_node_node(int node)
868
{
869
	int ret;
870 871
	struct kmem_cache *cachep;

872
	list_for_each_entry(cachep, &slab_caches, list) {
873 874 875
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
876
	}
877

878 879
	return 0;
}
880
#endif
881

882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

931 932 933 934
	/*
	 * To protect lockless access to n->shared during irq disabled context.
	 * If n->shared isn't NULL in irq disabled context, accessing to it is
	 * guaranteed to be valid until irq is re-enabled, because it will be
935
	 * freed after synchronize_rcu().
936
	 */
937
	if (old_shared && force_change)
938
		synchronize_rcu();
939

940 941 942 943 944 945 946 947
fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

948 949
#ifdef CONFIG_SMP

950
static void cpuup_canceled(long cpu)
951 952
{
	struct kmem_cache *cachep;
953
	struct kmem_cache_node *n = NULL;
954
	int node = cpu_to_mem(cpu);
955
	const struct cpumask *mask = cpumask_of_node(node);
956

957
	list_for_each_entry(cachep, &slab_caches, list) {
958 959
		struct array_cache *nc;
		struct array_cache *shared;
J
Joonsoo Kim 已提交
960
		struct alien_cache **alien;
961
		LIST_HEAD(list);
962

963
		n = get_node(cachep, node);
964
		if (!n)
965
			continue;
966

967
		spin_lock_irq(&n->list_lock);
968

969 970
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
971 972 973

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
974 975
		free_block(cachep, nc->entry, nc->avail, node, &list);
		nc->avail = 0;
976

977
		if (!cpumask_empty(mask)) {
978
			spin_unlock_irq(&n->list_lock);
979
			goto free_slab;
980 981
		}

982
		shared = n->shared;
983 984
		if (shared) {
			free_block(cachep, shared->entry,
985
				   shared->avail, node, &list);
986
			n->shared = NULL;
987 988
		}

989 990
		alien = n->alien;
		n->alien = NULL;
991

992
		spin_unlock_irq(&n->list_lock);
993 994 995 996 997 998

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
999 1000

free_slab:
1001
		slabs_destroy(cachep, &list);
1002 1003 1004 1005 1006 1007
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1008
	list_for_each_entry(cachep, &slab_caches, list) {
1009
		n = get_node(cachep, node);
1010
		if (!n)
1011
			continue;
1012
		drain_freelist(cachep, n, INT_MAX);
1013 1014 1015
	}
}

1016
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1017
{
1018
	struct kmem_cache *cachep;
1019
	int node = cpu_to_mem(cpu);
1020
	int err;
L
Linus Torvalds 已提交
1021

1022 1023 1024 1025
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1026
	 * kmem_cache_node and not this cpu's kmem_cache_node
1027
	 */
1028
	err = init_cache_node_node(node);
1029 1030
	if (err < 0)
		goto bad;
1031 1032 1033 1034 1035

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1036
	list_for_each_entry(cachep, &slab_caches, list) {
1037 1038 1039
		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
		if (err)
			goto bad;
1040
	}
1041

1042 1043
	return 0;
bad:
1044
	cpuup_canceled(cpu);
1045 1046 1047
	return -ENOMEM;
}

1048
int slab_prepare_cpu(unsigned int cpu)
1049
{
1050
	int err;
1051

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	mutex_lock(&slab_mutex);
	err = cpuup_prepare(cpu);
	mutex_unlock(&slab_mutex);
	return err;
}

/*
 * This is called for a failed online attempt and for a successful
 * offline.
 *
 * Even if all the cpus of a node are down, we don't free the
1063
 * kmem_cache_node of any cache. This to avoid a race between cpu_down, and
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
 * a kmalloc allocation from another cpu for memory from the node of
 * the cpu going down.  The list3 structure is usually allocated from
 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
 */
int slab_dead_cpu(unsigned int cpu)
{
	mutex_lock(&slab_mutex);
	cpuup_canceled(cpu);
	mutex_unlock(&slab_mutex);
	return 0;
}
1075
#endif
1076 1077 1078 1079 1080

static int slab_online_cpu(unsigned int cpu)
{
	start_cpu_timer(cpu);
	return 0;
L
Linus Torvalds 已提交
1081 1082
}

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
static int slab_offline_cpu(unsigned int cpu)
{
	/*
	 * Shutdown cache reaper. Note that the slab_mutex is held so
	 * that if cache_reap() is invoked it cannot do anything
	 * expensive but will only modify reap_work and reschedule the
	 * timer.
	 */
	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
	/* Now the cache_reaper is guaranteed to be not running. */
	per_cpu(slab_reap_work, cpu).work.func = NULL;
	return 0;
}
L
Linus Torvalds 已提交
1096

1097 1098 1099 1100 1101 1102
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1103
 * Must hold slab_mutex.
1104
 */
1105
static int __meminit drain_cache_node_node(int node)
1106 1107 1108 1109
{
	struct kmem_cache *cachep;
	int ret = 0;

1110
	list_for_each_entry(cachep, &slab_caches, list) {
1111
		struct kmem_cache_node *n;
1112

1113
		n = get_node(cachep, node);
1114
		if (!n)
1115 1116
			continue;

1117
		drain_freelist(cachep, n, INT_MAX);
1118

1119 1120
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1141
		mutex_lock(&slab_mutex);
1142
		ret = init_cache_node_node(nid);
1143
		mutex_unlock(&slab_mutex);
1144 1145
		break;
	case MEM_GOING_OFFLINE:
1146
		mutex_lock(&slab_mutex);
1147
		ret = drain_cache_node_node(nid);
1148
		mutex_unlock(&slab_mutex);
1149 1150 1151 1152 1153 1154 1155 1156
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1157
	return notifier_from_errno(ret);
1158 1159 1160
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1161
/*
1162
 * swap the static kmem_cache_node with kmalloced memory
1163
 */
1164
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1165
				int nodeid)
1166
{
1167
	struct kmem_cache_node *ptr;
1168

1169
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1170 1171
	BUG_ON(!ptr);

1172
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1173 1174 1175 1176 1177
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1178
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1179
	cachep->node[nodeid] = ptr;
1180 1181
}

1182
/*
1183 1184
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1185
 */
1186
static void __init set_up_node(struct kmem_cache *cachep, int index)
1187 1188 1189 1190
{
	int node;

	for_each_online_node(node) {
1191
		cachep->node[node] = &init_kmem_cache_node[index + node];
1192
		cachep->node[node]->next_reap = jiffies +
1193 1194
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1195 1196 1197
	}
}

A
Andrew Morton 已提交
1198 1199 1200
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1201 1202 1203
 */
void __init kmem_cache_init(void)
{
1204 1205
	int i;

1206 1207
	kmem_cache = &kmem_cache_boot;

1208
	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1209 1210
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1211
	for (i = 0; i < NUM_INIT_LISTS; i++)
1212
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1213

L
Linus Torvalds 已提交
1214 1215
	/*
	 * Fragmentation resistance on low memory - only use bigger
1216 1217
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1218
	 */
1219
	if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1220
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1221 1222 1223

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1224 1225 1226
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1227
	 *    Initially an __init data area is used for the head array and the
1228
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1229
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1230
	 * 2) Create the first kmalloc cache.
1231
	 *    The struct kmem_cache for the new cache is allocated normally.
1232 1233 1234
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1235
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1236
	 *    kmalloc cache with kmalloc allocated arrays.
1237
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1238 1239
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1240 1241
	 */

1242
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1243

E
Eric Dumazet 已提交
1244
	/*
1245
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1246
	 */
1247
	create_boot_cache(kmem_cache, "kmem_cache",
1248
		offsetof(struct kmem_cache, node) +
1249
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1250
				  SLAB_HWCACHE_ALIGN, 0, 0);
1251
	list_add(&kmem_cache->list, &slab_caches);
1252
	memcg_link_cache(kmem_cache, NULL);
1253
	slab_state = PARTIAL;
L
Linus Torvalds 已提交
1254

A
Andrew Morton 已提交
1255
	/*
1256 1257
	 * Initialize the caches that provide memory for the  kmem_cache_node
	 * structures first.  Without this, further allocations will bug.
1258
	 */
1259
	kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1260
				kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1261 1262 1263
				kmalloc_info[INDEX_NODE].size,
				ARCH_KMALLOC_FLAGS, 0,
				kmalloc_info[INDEX_NODE].size);
1264
	slab_state = PARTIAL_NODE;
1265
	setup_kmalloc_cache_index_table();
1266

1267 1268
	slab_early_init = 0;

1269
	/* 5) Replace the bootstrap kmem_cache_node */
1270
	{
P
Pekka Enberg 已提交
1271 1272
		int nid;

1273
		for_each_online_node(nid) {
1274
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1275

1276
			init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
1277
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1278 1279
		}
	}
L
Linus Torvalds 已提交
1280

1281
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1282 1283 1284 1285 1286 1287 1288
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

	/* 6) resize the head arrays to their final sizes */
1289 1290
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1291 1292
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1293
	mutex_unlock(&slab_mutex);
1294

1295 1296 1297
	/* Done! */
	slab_state = FULL;

1298 1299 1300
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1301
	 * node.
1302 1303 1304 1305
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1306 1307 1308
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1309 1310 1311 1312 1313
	 */
}

static int __init cpucache_init(void)
{
1314
	int ret;
L
Linus Torvalds 已提交
1315

A
Andrew Morton 已提交
1316 1317
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1318
	 */
1319 1320 1321
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
				slab_online_cpu, slab_offline_cpu);
	WARN_ON(ret < 0);
1322

L
Linus Torvalds 已提交
1323 1324 1325 1326
	return 0;
}
__initcall(cpucache_init);

1327 1328 1329
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1330
#if DEBUG
1331
	struct kmem_cache_node *n;
1332 1333
	unsigned long flags;
	int node;
1334 1335 1336 1337 1338
	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
		return;
1339

1340 1341 1342
	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
		nodeid, gfpflags, &gfpflags);
	pr_warn("  cache: %s, object size: %d, order: %d\n",
1343
		cachep->name, cachep->size, cachep->gfporder);
1344

1345
	for_each_kmem_cache_node(cachep, node, n) {
1346
		unsigned long total_slabs, free_slabs, free_objs;
1347

1348
		spin_lock_irqsave(&n->list_lock, flags);
1349 1350 1351
		total_slabs = n->total_slabs;
		free_slabs = n->free_slabs;
		free_objs = n->free_objects;
1352
		spin_unlock_irqrestore(&n->list_lock, flags);
1353

1354 1355 1356 1357
		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
			node, total_slabs - free_slabs, total_slabs,
			(total_slabs * cachep->num) - free_objs,
			total_slabs * cachep->num);
1358
	}
1359
#endif
1360 1361
}

L
Linus Torvalds 已提交
1362
/*
W
Wang Sheng-Hui 已提交
1363 1364
 * Interface to system's page allocator. No need to hold the
 * kmem_cache_node ->list_lock.
L
Linus Torvalds 已提交
1365 1366 1367 1368 1369
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1370 1371
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1372 1373
{
	struct page *page;
1374

1375
	flags |= cachep->allocflags;
1376

1377
	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1378
	if (!page) {
1379
		slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1380
		return NULL;
1381
	}
L
Linus Torvalds 已提交
1382

1383
	if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
1384 1385 1386 1387
		__free_pages(page, cachep->gfporder);
		return NULL;
	}

1388
	__SetPageSlab(page);
1389 1390
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1391
		SetPageSlabPfmemalloc(page);
1392

1393
	return page;
L
Linus Torvalds 已提交
1394 1395 1396 1397 1398
}

/*
 * Interface to system's page release.
 */
1399
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1400
{
1401
	int order = cachep->gfporder;
J
Joonsoo Kim 已提交
1402

1403
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1404
	__ClearPageSlabPfmemalloc(page);
1405
	__ClearPageSlab(page);
1406 1407
	page_mapcount_reset(page);
	page->mapping = NULL;
G
Glauber Costa 已提交
1408

L
Linus Torvalds 已提交
1409
	if (current->reclaim_state)
1410 1411
		current->reclaim_state->reclaimed_slab += 1 << order;
	uncharge_slab_page(page, order, cachep);
1412
	__free_pages(page, order);
L
Linus Torvalds 已提交
1413 1414 1415 1416
}

static void kmem_rcu_free(struct rcu_head *head)
{
1417 1418
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1419

1420 1421 1422 1423
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1424 1425 1426
}

#if DEBUG
1427 1428
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
{
1429
	if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1430 1431 1432 1433 1434
		(cachep->size % PAGE_SIZE) == 0)
		return true;

	return false;
}
L
Linus Torvalds 已提交
1435 1436

#ifdef CONFIG_DEBUG_PAGEALLOC
Q
Qian Cai 已提交
1437
static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
1438 1439 1440 1441 1442 1443 1444 1445 1446
{
	if (!is_debug_pagealloc_cache(cachep))
		return;

	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}

#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
Q
Qian Cai 已提交
1447
				int map) {}
1448

L
Linus Torvalds 已提交
1449 1450
#endif

1451
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1452
{
1453
	int size = cachep->object_size;
1454
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1455 1456

	memset(addr, val, size);
P
Pekka Enberg 已提交
1457
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1458 1459 1460 1461 1462
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1463 1464 1465
	unsigned char error = 0;
	int bad_count = 0;

1466
	pr_err("%03x: ", offset);
D
Dave Jones 已提交
1467 1468 1469 1470 1471 1472
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1473 1474
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1475 1476 1477 1478

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
1479
			pr_err("Single bit error detected. Probably bad RAM.\n");
D
Dave Jones 已提交
1480
#ifdef CONFIG_X86
1481
			pr_err("Run memtest86+ or a similar memory test tool.\n");
D
Dave Jones 已提交
1482
#else
1483
			pr_err("Run a memory test tool.\n");
D
Dave Jones 已提交
1484 1485 1486
#endif
		}
	}
L
Linus Torvalds 已提交
1487 1488 1489 1490 1491
}
#endif

#if DEBUG

1492
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1493 1494 1495 1496 1497
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1498 1499 1500
		pr_err("Redzone: 0x%llx/0x%llx\n",
		       *dbg_redzone1(cachep, objp),
		       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1501 1502
	}

1503 1504
	if (cachep->flags & SLAB_STORE_USER)
		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1505
	realobj = (char *)objp + obj_offset(cachep);
1506
	size = cachep->object_size;
P
Pekka Enberg 已提交
1507
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1508 1509
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1510 1511
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1512 1513 1514 1515
		dump_line(realobj, i, limit);
	}
}

1516
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1517 1518 1519 1520 1521
{
	char *realobj;
	int size, i;
	int lines = 0;

1522 1523 1524
	if (is_debug_pagealloc_cache(cachep))
		return;

1525
	realobj = (char *)objp + obj_offset(cachep);
1526
	size = cachep->object_size;
L
Linus Torvalds 已提交
1527

P
Pekka Enberg 已提交
1528
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1529
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1530
		if (i == size - 1)
L
Linus Torvalds 已提交
1531 1532 1533 1534 1535 1536
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
1537
				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1538 1539
				       print_tainted(), cachep->name,
				       realobj, size);
L
Linus Torvalds 已提交
1540 1541 1542
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1543
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1544
			limit = 16;
P
Pekka Enberg 已提交
1545 1546
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1559
		struct page *page = virt_to_head_page(objp);
1560
		unsigned int objnr;
L
Linus Torvalds 已提交
1561

1562
		objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
1563
		if (objnr) {
1564
			objp = index_to_obj(cachep, page, objnr - 1);
1565
			realobj = (char *)objp + obj_offset(cachep);
1566
			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1567 1568
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1569
		if (objnr + 1 < cachep->num) {
1570
			objp = index_to_obj(cachep, page, objnr + 1);
1571
			realobj = (char *)objp + obj_offset(cachep);
1572
			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1573 1574 1575 1576 1577 1578
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1579
#if DEBUG
1580 1581
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
L
Linus Torvalds 已提交
1582 1583
{
	int i;
1584 1585 1586 1587 1588 1589

	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
		poison_obj(cachep, page->freelist - obj_offset(cachep),
			POISON_FREE);
	}

L
Linus Torvalds 已提交
1590
	for (i = 0; i < cachep->num; i++) {
1591
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
1592 1593 1594

		if (cachep->flags & SLAB_POISON) {
			check_poison_obj(cachep, objp);
Q
Qian Cai 已提交
1595
			slab_kernel_map(cachep, objp, 1);
L
Linus Torvalds 已提交
1596 1597 1598
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1599
				slab_error(cachep, "start of a freed object was overwritten");
L
Linus Torvalds 已提交
1600
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1601
				slab_error(cachep, "end of a freed object was overwritten");
L
Linus Torvalds 已提交
1602 1603
		}
	}
1604
}
L
Linus Torvalds 已提交
1605
#else
1606 1607
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
1608 1609
{
}
L
Linus Torvalds 已提交
1610 1611
#endif

1612 1613 1614
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
1615
 * @page: page pointer being destroyed
1616
 *
W
Wang Sheng-Hui 已提交
1617 1618 1619
 * Destroy all the objs in a slab page, and release the mem back to the system.
 * Before calling the slab page must have been unlinked from the cache. The
 * kmem_cache_node ->list_lock is not held/needed.
1620
 */
1621
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1622
{
1623
	void *freelist;
1624

1625 1626
	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
1627
	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1628 1629
		call_rcu(&page->rcu_head, kmem_rcu_free);
	else
1630
		kmem_freepages(cachep, page);
1631 1632

	/*
1633
	 * From now on, we don't use freelist
1634 1635 1636
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
1637
		kmem_cache_free(cachep->freelist_cache, freelist);
L
Linus Torvalds 已提交
1638 1639
}

1640 1641 1642 1643
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

1644 1645
	list_for_each_entry_safe(page, n, list, slab_list) {
		list_del(&page->slab_list);
1646 1647 1648 1649
		slab_destroy(cachep, page);
	}
}

1650
/**
1651 1652 1653 1654 1655 1656
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
1657 1658 1659 1660
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
1661 1662
 *
 * Return: number of left-over bytes in a slab
1663
 */
A
Andrew Morton 已提交
1664
static size_t calculate_slab_order(struct kmem_cache *cachep,
1665
				size_t size, slab_flags_t flags)
1666 1667
{
	size_t left_over = 0;
1668
	int gfporder;
1669

1670
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1671 1672 1673
		unsigned int num;
		size_t remainder;

1674
		num = cache_estimate(gfporder, size, flags, &remainder);
1675 1676
		if (!num)
			continue;
1677

1678 1679 1680 1681
		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
		if (num > SLAB_OBJ_MAX_NUM)
			break;

1682
		if (flags & CFLGS_OFF_SLAB) {
1683 1684 1685 1686 1687 1688 1689 1690
			struct kmem_cache *freelist_cache;
			size_t freelist_size;

			freelist_size = num * sizeof(freelist_idx_t);
			freelist_cache = kmalloc_slab(freelist_size, 0u);
			if (!freelist_cache)
				continue;

1691
			/*
1692
			 * Needed to avoid possible looping condition
1693
			 * in cache_grow_begin()
1694
			 */
1695 1696
			if (OFF_SLAB(freelist_cache))
				continue;
1697

1698 1699 1700
			/* check if off slab has enough benefit */
			if (freelist_cache->size > cachep->size / 2)
				continue;
1701
		}
1702

1703
		/* Found something acceptable - save it away */
1704
		cachep->num = num;
1705
		cachep->gfporder = gfporder;
1706 1707
		left_over = remainder;

1708 1709 1710 1711 1712 1713 1714 1715
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

1716 1717 1718 1719
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
1720
		if (gfporder >= slab_max_order)
1721 1722
			break;

1723 1724 1725
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
1726
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1727 1728 1729 1730 1731
			break;
	}
	return left_over;
}

1732 1733 1734 1735 1736 1737 1738 1739
static struct array_cache __percpu *alloc_kmem_cache_cpus(
		struct kmem_cache *cachep, int entries, int batchcount)
{
	int cpu;
	size_t size;
	struct array_cache __percpu *cpu_cache;

	size = sizeof(void *) * entries + sizeof(struct array_cache);
1740
	cpu_cache = __alloc_percpu(size, sizeof(void *));
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752

	if (!cpu_cache)
		return NULL;

	for_each_possible_cpu(cpu) {
		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
				entries, batchcount);
	}

	return cpu_cache;
}

1753
static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1754
{
1755
	if (slab_state >= FULL)
1756
		return enable_cpucache(cachep, gfp);
1757

1758 1759 1760 1761
	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
	if (!cachep->cpu_cache)
		return 1;

1762
	if (slab_state == DOWN) {
1763 1764
		/* Creation of first cache (kmem_cache). */
		set_up_node(kmem_cache, CACHE_CACHE);
1765
	} else if (slab_state == PARTIAL) {
1766 1767
		/* For kmem_cache_node */
		set_up_node(cachep, SIZE_NODE);
1768
	} else {
1769
		int node;
1770

1771 1772 1773 1774 1775
		for_each_online_node(node) {
			cachep->node[node] = kmalloc_node(
				sizeof(struct kmem_cache_node), gfp, node);
			BUG_ON(!cachep->node[node]);
			kmem_cache_node_init(cachep->node[node]);
1776 1777
		}
	}
1778

1779
	cachep->node[numa_mem_id()]->next_reap =
1780 1781
			jiffies + REAPTIMEOUT_NODE +
			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1782 1783 1784 1785 1786 1787 1788

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1789
	return 0;
1790 1791
}

1792
slab_flags_t kmem_cache_flags(unsigned int object_size,
1793
	slab_flags_t flags, const char *name,
J
Joonsoo Kim 已提交
1794 1795 1796 1797 1798 1799
	void (*ctor)(void *))
{
	return flags;
}

struct kmem_cache *
1800
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1801
		   slab_flags_t flags, void (*ctor)(void *))
J
Joonsoo Kim 已提交
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
{
	struct kmem_cache *cachep;

	cachep = find_mergeable(size, align, flags, name, ctor);
	if (cachep) {
		cachep->refcount++;

		/*
		 * Adjust the object sizes so that we clear
		 * the complete object on kzalloc.
		 */
		cachep->object_size = max_t(int, cachep->object_size, size);
	}
	return cachep;
}

1818
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1819
			size_t size, slab_flags_t flags)
1820 1821 1822 1823 1824
{
	size_t left;

	cachep->num = 0;

1825 1826 1827 1828 1829 1830 1831 1832
	/*
	 * If slab auto-initialization on free is enabled, store the freelist
	 * off-slab, so that its contents don't end up in one of the allocated
	 * objects.
	 */
	if (unlikely(slab_want_init_on_free(cachep)))
		return false;

1833
	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
		return false;

	left = calculate_slab_order(cachep, size,
			flags | CFLGS_OBJFREELIST_SLAB);
	if (!cachep->num)
		return false;

	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

1849
static bool set_off_slab_cache(struct kmem_cache *cachep,
1850
			size_t size, slab_flags_t flags)
1851 1852 1853 1854 1855 1856
{
	size_t left;

	cachep->num = 0;

	/*
1857 1858
	 * Always use on-slab management when SLAB_NOLEAKTRACE
	 * to avoid recursive calls into kmemleak.
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
	 */
	if (flags & SLAB_NOLEAKTRACE)
		return false;

	/*
	 * Size is large, assume best to place the slab management obj
	 * off-slab (should allow better packing of objs).
	 */
	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
	if (!cachep->num)
		return false;

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
	if (left >= cachep->num * sizeof(freelist_idx_t))
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

static bool set_on_slab_cache(struct kmem_cache *cachep,
1884
			size_t size, slab_flags_t flags)
1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
{
	size_t left;

	cachep->num = 0;

	left = calculate_slab_order(cachep, size, flags);
	if (!cachep->num)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

L
Linus Torvalds 已提交
1899
/**
1900
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
1901
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
1902 1903 1904 1905
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
1906
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
1919 1920
 *
 * Return: a pointer to the created cache or %NULL in case of error
L
Linus Torvalds 已提交
1921
 */
1922
int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
L
Linus Torvalds 已提交
1923
{
1924
	size_t ralign = BYTES_PER_WORD;
1925
	gfp_t gfp;
1926
	int err;
1927
	unsigned int size = cachep->size;
L
Linus Torvalds 已提交
1928 1929 1930 1931 1932 1933 1934 1935 1936

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
1937 1938
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
1939
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1940
	if (!(flags & SLAB_TYPESAFE_BY_RCU))
L
Linus Torvalds 已提交
1941 1942 1943 1944
		flags |= SLAB_POISON;
#endif
#endif

A
Andrew Morton 已提交
1945 1946
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
1947 1948 1949
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
1950
	size = ALIGN(size, BYTES_PER_WORD);
L
Linus Torvalds 已提交
1951

D
David Woodhouse 已提交
1952 1953 1954 1955
	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
1956
		size = ALIGN(size, REDZONE_ALIGN);
D
David Woodhouse 已提交
1957
	}
1958

1959
	/* 3) caller mandated alignment */
1960 1961
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
1962
	}
1963 1964
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
1965
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
1966
	/*
1967
	 * 4) Store it.
L
Linus Torvalds 已提交
1968
	 */
1969
	cachep->align = ralign;
1970 1971 1972 1973
	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
L
Linus Torvalds 已提交
1974

1975 1976 1977 1978 1979
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

L
Linus Torvalds 已提交
1980 1981
#if DEBUG

1982 1983 1984 1985
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
1986 1987
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
1988 1989
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
1990 1991
	}
	if (flags & SLAB_STORE_USER) {
1992
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
1993 1994
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
1995
		 */
D
David Woodhouse 已提交
1996 1997 1998 1999
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
2000
	}
2001 2002
#endif

A
Alexander Potapenko 已提交
2003 2004
	kasan_cache_create(cachep, &size, &flags);

2005 2006 2007 2008 2009 2010 2011 2012 2013
	size = ALIGN(size, cachep->align);
	/*
	 * We should restrict the number of objects in a slab to implement
	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
	 */
	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);

#if DEBUG
2014 2015 2016 2017 2018 2019 2020
	/*
	 * To activate debug pagealloc, off-slab management is necessary
	 * requirement. In early phase of initialization, small sized slab
	 * doesn't get initialized so it would not be possible. So, we need
	 * to check size >= 256. It guarantees that all necessary small
	 * sized slab is initialized in current slab initialization sequence.
	 */
2021
	if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
		size >= 256 && cachep->object_size > cache_line_size()) {
		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
			size_t tmp_size = ALIGN(size, PAGE_SIZE);

			if (set_off_slab_cache(cachep, tmp_size, flags)) {
				flags |= CFLGS_OFF_SLAB;
				cachep->obj_offset += tmp_size - size;
				size = tmp_size;
				goto done;
			}
		}
L
Linus Torvalds 已提交
2033 2034 2035
	}
#endif

2036 2037 2038 2039 2040
	if (set_objfreelist_slab_cache(cachep, size, flags)) {
		flags |= CFLGS_OBJFREELIST_SLAB;
		goto done;
	}

2041
	if (set_off_slab_cache(cachep, size, flags)) {
L
Linus Torvalds 已提交
2042
		flags |= CFLGS_OFF_SLAB;
2043
		goto done;
2044
	}
L
Linus Torvalds 已提交
2045

2046 2047
	if (set_on_slab_cache(cachep, size, flags))
		goto done;
L
Linus Torvalds 已提交
2048

2049
	return -E2BIG;
L
Linus Torvalds 已提交
2050

2051 2052
done:
	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
L
Linus Torvalds 已提交
2053
	cachep->flags = flags;
2054
	cachep->allocflags = __GFP_COMP;
Y
Yang Shi 已提交
2055
	if (flags & SLAB_CACHE_DMA)
2056
		cachep->allocflags |= GFP_DMA;
2057 2058
	if (flags & SLAB_CACHE_DMA32)
		cachep->allocflags |= GFP_DMA32;
2059 2060
	if (flags & SLAB_RECLAIM_ACCOUNT)
		cachep->allocflags |= __GFP_RECLAIMABLE;
2061
	cachep->size = size;
2062
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2063

2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
#if DEBUG
	/*
	 * If we're going to use the generic kernel_map_pages()
	 * poisoning, then it's going to smash the contents of
	 * the redzone and userword anyhow, so switch them off.
	 */
	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
		(cachep->flags & SLAB_POISON) &&
		is_debug_pagealloc_cache(cachep))
		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif

	if (OFF_SLAB(cachep)) {
2077 2078
		cachep->freelist_cache =
			kmalloc_slab(cachep->freelist_size, 0u);
2079
	}
L
Linus Torvalds 已提交
2080

2081 2082
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2083
		__kmem_cache_release(cachep);
2084
		return err;
2085
	}
L
Linus Torvalds 已提交
2086

2087
	return 0;
L
Linus Torvalds 已提交
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2101 2102 2103 2104 2105
static void check_mutex_acquired(void)
{
	BUG_ON(!mutex_is_locked(&slab_mutex));
}

2106
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2107 2108 2109
{
#ifdef CONFIG_SMP
	check_irq_off();
2110
	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
L
Linus Torvalds 已提交
2111 2112
#endif
}
2113

2114
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2115 2116 2117
{
#ifdef CONFIG_SMP
	check_irq_off();
2118
	assert_spin_locked(&get_node(cachep, node)->list_lock);
2119 2120 2121
#endif
}

L
Linus Torvalds 已提交
2122 2123 2124
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
2125
#define check_mutex_acquired()	do { } while(0)
L
Linus Torvalds 已提交
2126
#define check_spinlock_acquired(x) do { } while(0)
2127
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2128 2129
#endif

2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
				int node, bool free_all, struct list_head *list)
{
	int tofree;

	if (!ac || !ac->avail)
		return;

	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
	if (tofree > ac->avail)
		tofree = (ac->avail + 1) / 2;

	free_block(cachep, ac->entry, tofree, node, list);
	ac->avail -= tofree;
	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
}
2146

L
Linus Torvalds 已提交
2147 2148
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2149
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2150
	struct array_cache *ac;
2151
	int node = numa_mem_id();
2152
	struct kmem_cache_node *n;
2153
	LIST_HEAD(list);
L
Linus Torvalds 已提交
2154 2155

	check_irq_off();
2156
	ac = cpu_cache_get(cachep);
2157 2158
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
2159
	free_block(cachep, ac->entry, ac->avail, node, &list);
2160
	spin_unlock(&n->list_lock);
2161
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
2162 2163 2164
	ac->avail = 0;
}

2165
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2166
{
2167
	struct kmem_cache_node *n;
2168
	int node;
2169
	LIST_HEAD(list);
2170

2171
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2172
	check_irq_on();
2173 2174
	for_each_kmem_cache_node(cachep, node, n)
		if (n->alien)
2175
			drain_alien_cache(cachep, n->alien);
2176

2177 2178 2179 2180 2181 2182 2183
	for_each_kmem_cache_node(cachep, node, n) {
		spin_lock_irq(&n->list_lock);
		drain_array_locked(cachep, n->shared, node, true, &list);
		spin_unlock_irq(&n->list_lock);

		slabs_destroy(cachep, &list);
	}
L
Linus Torvalds 已提交
2184 2185
}

2186 2187 2188 2189 2190 2191 2192
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2193
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2194
{
2195 2196
	struct list_head *p;
	int nr_freed;
2197
	struct page *page;
L
Linus Torvalds 已提交
2198

2199
	nr_freed = 0;
2200
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2201

2202 2203 2204 2205
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2206 2207
			goto out;
		}
L
Linus Torvalds 已提交
2208

2209 2210
		page = list_entry(p, struct page, slab_list);
		list_del(&page->slab_list);
2211
		n->free_slabs--;
2212
		n->total_slabs--;
2213 2214 2215 2216
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2217 2218
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2219
		slab_destroy(cache, page);
2220
		nr_freed++;
L
Linus Torvalds 已提交
2221
	}
2222 2223
out:
	return nr_freed;
L
Linus Torvalds 已提交
2224 2225
}

2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
bool __kmem_cache_empty(struct kmem_cache *s)
{
	int node;
	struct kmem_cache_node *n;

	for_each_kmem_cache_node(s, node, n)
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial))
			return false;
	return true;
}

2238
int __kmem_cache_shrink(struct kmem_cache *cachep)
2239
{
2240 2241
	int ret = 0;
	int node;
2242
	struct kmem_cache_node *n;
2243 2244 2245 2246

	drain_cpu_caches(cachep);

	check_irq_on();
2247
	for_each_kmem_cache_node(cachep, node, n) {
2248
		drain_freelist(cachep, n, INT_MAX);
2249

2250 2251
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2252 2253 2254 2255
	}
	return (ret ? 1 : 0);
}

2256 2257 2258 2259 2260
#ifdef CONFIG_MEMCG
void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
{
	__kmem_cache_shrink(cachep);
}
2261 2262 2263 2264

void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
{
}
2265 2266
#endif

2267
int __kmem_cache_shutdown(struct kmem_cache *cachep)
2268
{
2269
	return __kmem_cache_shrink(cachep);
2270 2271 2272
}

void __kmem_cache_release(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2273
{
2274
	int i;
2275
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2276

T
Thomas Garnier 已提交
2277 2278
	cache_random_seq_destroy(cachep);

2279
	free_percpu(cachep->cpu_cache);
L
Linus Torvalds 已提交
2280

2281
	/* NUMA: free the node structures */
2282 2283 2284 2285 2286
	for_each_kmem_cache_node(cachep, i, n) {
		kfree(n->shared);
		free_alien_cache(n->alien);
		kfree(n);
		cachep->node[i] = NULL;
2287
	}
L
Linus Torvalds 已提交
2288 2289
}

2290 2291
/*
 * Get the memory for a slab management obj.
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
 *
 * For a slab cache when the slab descriptor is off-slab, the
 * slab descriptor can't come from the same cache which is being created,
 * Because if it is the case, that means we defer the creation of
 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
 * And we eventually call down to __kmem_cache_create(), which
 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
 * This is a "chicken-and-egg" problem.
 *
 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
 * which are all initialized during kmem_cache_init().
2303
 */
2304
static void *alloc_slabmgmt(struct kmem_cache *cachep,
2305 2306
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2307
{
2308
	void *freelist;
2309
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2310

2311
	page->s_mem = addr + colour_off;
2312 2313
	page->active = 0;

2314 2315 2316
	if (OBJFREELIST_SLAB(cachep))
		freelist = NULL;
	else if (OFF_SLAB(cachep)) {
L
Linus Torvalds 已提交
2317
		/* Slab management obj is off-slab. */
2318
		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2319
					      local_flags, nodeid);
2320
		if (!freelist)
L
Linus Torvalds 已提交
2321 2322
			return NULL;
	} else {
2323 2324 2325
		/* We will use last bytes at the slab for freelist */
		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
				cachep->freelist_size;
L
Linus Torvalds 已提交
2326
	}
2327

2328
	return freelist;
L
Linus Torvalds 已提交
2329 2330
}

2331
static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
L
Linus Torvalds 已提交
2332
{
2333
	return ((freelist_idx_t *)page->freelist)[idx];
2334 2335 2336
}

static inline void set_free_obj(struct page *page,
2337
					unsigned int idx, freelist_idx_t val)
2338
{
2339
	((freelist_idx_t *)(page->freelist))[idx] = val;
L
Linus Torvalds 已提交
2340 2341
}

2342
static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
2343
{
2344
#if DEBUG
L
Linus Torvalds 已提交
2345 2346 2347
	int i;

	for (i = 0; i < cachep->num; i++) {
2348
		void *objp = index_to_obj(cachep, page, i);
2349

L
Linus Torvalds 已提交
2350 2351 2352 2353 2354 2355 2356 2357
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2358 2359 2360
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2361
		 */
A
Alexander Potapenko 已提交
2362 2363 2364
		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
			kasan_unpoison_object_data(cachep,
						   objp + obj_offset(cachep));
2365
			cachep->ctor(objp + obj_offset(cachep));
A
Alexander Potapenko 已提交
2366 2367 2368
			kasan_poison_object_data(
				cachep, objp + obj_offset(cachep));
		}
L
Linus Torvalds 已提交
2369 2370 2371

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2372
				slab_error(cachep, "constructor overwrote the end of an object");
L
Linus Torvalds 已提交
2373
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2374
				slab_error(cachep, "constructor overwrote the start of an object");
L
Linus Torvalds 已提交
2375
		}
2376 2377 2378
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON) {
			poison_obj(cachep, objp, POISON_FREE);
Q
Qian Cai 已提交
2379
			slab_kernel_map(cachep, objp, 0);
2380
		}
2381
	}
L
Linus Torvalds 已提交
2382
#endif
2383 2384
}

T
Thomas Garnier 已提交
2385 2386 2387 2388 2389
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Hold information during a freelist initialization */
union freelist_init_state {
	struct {
		unsigned int pos;
2390
		unsigned int *list;
T
Thomas Garnier 已提交
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
		unsigned int count;
	};
	struct rnd_state rnd_state;
};

/*
 * Initialize the state based on the randomization methode available.
 * return true if the pre-computed list is available, false otherwize.
 */
static bool freelist_state_initialize(union freelist_init_state *state,
				struct kmem_cache *cachep,
				unsigned int count)
{
	bool ret;
	unsigned int rand;

	/* Use best entropy available to define a random shift */
2408
	rand = get_random_int();
T
Thomas Garnier 已提交
2409 2410 2411 2412 2413 2414 2415 2416

	/* Use a random state if the pre-computed list is not available */
	if (!cachep->random_seq) {
		prandom_seed_state(&state->rnd_state, rand);
		ret = false;
	} else {
		state->list = cachep->random_seq;
		state->count = count;
2417
		state->pos = rand % count;
T
Thomas Garnier 已提交
2418 2419 2420 2421 2422 2423 2424 2425
		ret = true;
	}
	return ret;
}

/* Get the next entry on the list and randomize it using a random shift */
static freelist_idx_t next_random_slot(union freelist_init_state *state)
{
2426 2427 2428
	if (state->pos >= state->count)
		state->pos = 0;
	return state->list[state->pos++];
T
Thomas Garnier 已提交
2429 2430
}

2431 2432 2433 2434 2435 2436 2437
/* Swap two freelist entries */
static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
{
	swap(((freelist_idx_t *)page->freelist)[a],
		((freelist_idx_t *)page->freelist)[b]);
}

T
Thomas Garnier 已提交
2438 2439 2440 2441 2442 2443
/*
 * Shuffle the freelist initialization state based on pre-computed lists.
 * return true if the list was successfully shuffled, false otherwise.
 */
static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
{
2444
	unsigned int objfreelist = 0, i, rand, count = cachep->num;
T
Thomas Garnier 已提交
2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
	union freelist_init_state state;
	bool precomputed;

	if (count < 2)
		return false;

	precomputed = freelist_state_initialize(&state, cachep, count);

	/* Take a random entry as the objfreelist */
	if (OBJFREELIST_SLAB(cachep)) {
		if (!precomputed)
			objfreelist = count - 1;
		else
			objfreelist = next_random_slot(&state);
		page->freelist = index_to_obj(cachep, page, objfreelist) +
						obj_offset(cachep);
		count--;
	}

	/*
	 * On early boot, generate the list dynamically.
	 * Later use a pre-computed list for speed.
	 */
	if (!precomputed) {
2469 2470 2471 2472 2473 2474 2475 2476 2477
		for (i = 0; i < count; i++)
			set_free_obj(page, i, i);

		/* Fisher-Yates shuffle */
		for (i = count - 1; i > 0; i--) {
			rand = prandom_u32_state(&state.rnd_state);
			rand %= (i + 1);
			swap_free_obj(page, i, rand);
		}
T
Thomas Garnier 已提交
2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
	} else {
		for (i = 0; i < count; i++)
			set_free_obj(page, i, next_random_slot(&state));
	}

	if (OBJFREELIST_SLAB(cachep))
		set_free_obj(page, cachep->num - 1, objfreelist);

	return true;
}
#else
static inline bool shuffle_freelist(struct kmem_cache *cachep,
				struct page *page)
{
	return false;
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

2496 2497 2498 2499
static void cache_init_objs(struct kmem_cache *cachep,
			    struct page *page)
{
	int i;
A
Alexander Potapenko 已提交
2500
	void *objp;
T
Thomas Garnier 已提交
2501
	bool shuffled;
2502 2503 2504

	cache_init_objs_debug(cachep, page);

T
Thomas Garnier 已提交
2505 2506 2507 2508
	/* Try to randomize the freelist if enabled */
	shuffled = shuffle_freelist(cachep, page);

	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2509 2510 2511 2512
		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
						obj_offset(cachep);
	}

2513
	for (i = 0; i < cachep->num; i++) {
2514
		objp = index_to_obj(cachep, page, i);
2515
		objp = kasan_init_slab_obj(cachep, objp);
2516

2517
		/* constructor could break poison info */
A
Alexander Potapenko 已提交
2518 2519 2520 2521 2522
		if (DEBUG == 0 && cachep->ctor) {
			kasan_unpoison_object_data(cachep, objp);
			cachep->ctor(objp);
			kasan_poison_object_data(cachep, objp);
		}
2523

T
Thomas Garnier 已提交
2524 2525
		if (!shuffled)
			set_free_obj(page, i, i);
L
Linus Torvalds 已提交
2526 2527 2528
	}
}

2529
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2530
{
2531
	void *objp;
2532

2533
	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2534
	page->active++;
2535 2536 2537 2538

	return objp;
}

2539 2540
static void slab_put_obj(struct kmem_cache *cachep,
			struct page *page, void *objp)
2541
{
2542
	unsigned int objnr = obj_to_index(cachep, page, objp);
2543
#if DEBUG
J
Joonsoo Kim 已提交
2544
	unsigned int i;
2545 2546

	/* Verify double free bug */
2547
	for (i = page->active; i < cachep->num; i++) {
2548
		if (get_free_obj(page, i) == objnr) {
2549
			pr_err("slab: double free detected in cache '%s', objp %px\n",
J
Joe Perches 已提交
2550
			       cachep->name, objp);
2551 2552
			BUG();
		}
2553 2554
	}
#endif
2555
	page->active--;
2556 2557 2558
	if (!page->freelist)
		page->freelist = objp + obj_offset(cachep);

2559
	set_free_obj(page, page->active, objnr);
2560 2561
}

2562 2563 2564
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2565
 * virtual address for kfree, ksize, and slab debugging.
2566
 */
2567
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2568
			   void *freelist)
L
Linus Torvalds 已提交
2569
{
2570
	page->slab_cache = cache;
2571
	page->freelist = freelist;
L
Linus Torvalds 已提交
2572 2573 2574 2575 2576 2577
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2578 2579
static struct page *cache_grow_begin(struct kmem_cache *cachep,
				gfp_t flags, int nodeid)
L
Linus Torvalds 已提交
2580
{
2581
	void *freelist;
P
Pekka Enberg 已提交
2582 2583
	size_t offset;
	gfp_t local_flags;
2584
	int page_node;
2585
	struct kmem_cache_node *n;
2586
	struct page *page;
L
Linus Torvalds 已提交
2587

A
Andrew Morton 已提交
2588 2589 2590
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2591
	 */
2592 2593 2594
	if (unlikely(flags & GFP_SLAB_BUG_MASK))
		flags = kmalloc_fix_flags(flags);

2595
	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
C
Christoph Lameter 已提交
2596
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2597 2598

	check_irq_off();
2599
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2600 2601
		local_irq_enable();

A
Andrew Morton 已提交
2602 2603 2604
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2605
	 */
2606
	page = kmem_getpages(cachep, local_flags, nodeid);
2607
	if (!page)
L
Linus Torvalds 已提交
2608 2609
		goto failed;

2610 2611
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623

	/* Get colour for the slab, and cal the next value. */
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;

	offset = n->colour_next;
	if (offset >= cachep->colour)
		offset = 0;

	offset *= cachep->colour_off;

2624 2625 2626 2627 2628 2629 2630
	/*
	 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
	 * page_address() in the latter returns a non-tagged pointer,
	 * as it should be for slab pages.
	 */
	kasan_poison_slab(page);

L
Linus Torvalds 已提交
2631
	/* Get slab management. */
2632
	freelist = alloc_slabmgmt(cachep, page, offset,
2633
			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2634
	if (OFF_SLAB(cachep) && !freelist)
L
Linus Torvalds 已提交
2635 2636
		goto opps1;

2637
	slab_map_pages(cachep, page, freelist);
L
Linus Torvalds 已提交
2638

2639
	cache_init_objs(cachep, page);
L
Linus Torvalds 已提交
2640

2641
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2642 2643
		local_irq_disable();

2644 2645
	return page;

A
Andrew Morton 已提交
2646
opps1:
2647
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2648
failed:
2649
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2650
		local_irq_disable();
2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
	return NULL;
}

static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
{
	struct kmem_cache_node *n;
	void *list = NULL;

	check_irq_off();

	if (!page)
		return;

2664
	INIT_LIST_HEAD(&page->slab_list);
2665 2666 2667
	n = get_node(cachep, page_to_nid(page));

	spin_lock(&n->list_lock);
2668
	n->total_slabs++;
2669
	if (!page->active) {
2670
		list_add_tail(&page->slab_list, &n->slabs_free);
2671
		n->free_slabs++;
2672
	} else
2673
		fixup_slab_list(cachep, n, page, &list);
2674

2675 2676 2677 2678 2679
	STATS_INC_GROWN(cachep);
	n->free_objects += cachep->num - page->active;
	spin_unlock(&n->list_lock);

	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
2692
		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
P
Pekka Enberg 已提交
2693 2694
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2695 2696 2697
	}
}

2698 2699
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2700
	unsigned long long redzone1, redzone2;
2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2716
	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2717
	       obj, redzone1, redzone2);
2718 2719
}

2720
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2721
				   unsigned long caller)
L
Linus Torvalds 已提交
2722 2723
{
	unsigned int objnr;
2724
	struct page *page;
L
Linus Torvalds 已提交
2725

2726 2727
	BUG_ON(virt_to_cache(objp) != cachep);

2728
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2729
	kfree_debugcheck(objp);
2730
	page = virt_to_head_page(objp);
L
Linus Torvalds 已提交
2731 2732

	if (cachep->flags & SLAB_RED_ZONE) {
2733
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2734 2735 2736
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
Q
Qian Cai 已提交
2737
	if (cachep->flags & SLAB_STORE_USER)
2738
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2739

2740
	objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
2741 2742

	BUG_ON(objnr >= cachep->num);
2743
	BUG_ON(objp != index_to_obj(cachep, page, objnr));
L
Linus Torvalds 已提交
2744 2745 2746

	if (cachep->flags & SLAB_POISON) {
		poison_obj(cachep, objp, POISON_FREE);
Q
Qian Cai 已提交
2747
		slab_kernel_map(cachep, objp, 0);
L
Linus Torvalds 已提交
2748 2749 2750 2751 2752 2753 2754 2755 2756
	}
	return objp;
}

#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#endif

2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list)
{
#if DEBUG
	void *next = *list;
	void *objp;

	while (next) {
		objp = next - obj_offset(cachep);
		next = *(void **)next;
		poison_obj(cachep, objp, POISON_FREE);
	}
#endif
}

2772
static inline void fixup_slab_list(struct kmem_cache *cachep,
2773 2774
				struct kmem_cache_node *n, struct page *page,
				void **list)
2775 2776
{
	/* move slabp to correct slabp list: */
2777
	list_del(&page->slab_list);
2778
	if (page->active == cachep->num) {
2779
		list_add(&page->slab_list, &n->slabs_full);
2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
		if (OBJFREELIST_SLAB(cachep)) {
#if DEBUG
			/* Poisoning will be done without holding the lock */
			if (cachep->flags & SLAB_POISON) {
				void **objp = page->freelist;

				*objp = *list;
				*list = objp;
			}
#endif
			page->freelist = NULL;
		}
	} else
2793
		list_add(&page->slab_list, &n->slabs_partial);
2794 2795
}

2796 2797
/* Try to find non-pfmemalloc slab if needed */
static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2798
					struct page *page, bool pfmemalloc)
2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815
{
	if (!page)
		return NULL;

	if (pfmemalloc)
		return page;

	if (!PageSlabPfmemalloc(page))
		return page;

	/* No need to keep pfmemalloc slab if we have enough free objects */
	if (n->free_objects > n->free_limit) {
		ClearPageSlabPfmemalloc(page);
		return page;
	}

	/* Move pfmemalloc slab to the end of list to speed up next search */
2816
	list_del(&page->slab_list);
2817
	if (!page->active) {
2818
		list_add_tail(&page->slab_list, &n->slabs_free);
2819
		n->free_slabs++;
2820
	} else
2821
		list_add_tail(&page->slab_list, &n->slabs_partial);
2822

2823
	list_for_each_entry(page, &n->slabs_partial, slab_list) {
2824 2825 2826 2827
		if (!PageSlabPfmemalloc(page))
			return page;
	}

2828
	n->free_touched = 1;
2829
	list_for_each_entry(page, &n->slabs_free, slab_list) {
2830
		if (!PageSlabPfmemalloc(page)) {
2831
			n->free_slabs--;
2832
			return page;
2833
		}
2834 2835 2836 2837 2838 2839
	}

	return NULL;
}

static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2840 2841 2842
{
	struct page *page;

2843
	assert_spin_locked(&n->list_lock);
2844 2845
	page = list_first_entry_or_null(&n->slabs_partial, struct page,
					slab_list);
2846 2847
	if (!page) {
		n->free_touched = 1;
2848
		page = list_first_entry_or_null(&n->slabs_free, struct page,
2849
						slab_list);
2850
		if (page)
2851
			n->free_slabs--;
2852 2853
	}

2854
	if (sk_memalloc_socks())
2855
		page = get_valid_first_slab(n, page, pfmemalloc);
2856

2857 2858 2859
	return page;
}

2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
				struct kmem_cache_node *n, gfp_t flags)
{
	struct page *page;
	void *obj;
	void *list = NULL;

	if (!gfp_pfmemalloc_allowed(flags))
		return NULL;

	spin_lock(&n->list_lock);
	page = get_first_slab(n, true);
	if (!page) {
		spin_unlock(&n->list_lock);
		return NULL;
	}

	obj = slab_get_obj(cachep, page);
	n->free_objects--;

	fixup_slab_list(cachep, n, page, &list);

	spin_unlock(&n->list_lock);
	fixup_objfreelist_debug(cachep, &list);

	return obj;
}

2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
/*
 * Slab list should be fixed up by fixup_slab_list() for existing slab
 * or cache_grow_end() for new slab
 */
static __always_inline int alloc_block(struct kmem_cache *cachep,
		struct array_cache *ac, struct page *page, int batchcount)
{
	/*
	 * There must be at least one object available for
	 * allocation.
	 */
	BUG_ON(page->active >= cachep->num);

	while (page->active < cachep->num && batchcount--) {
		STATS_INC_ALLOCED(cachep);
		STATS_INC_ACTIVE(cachep);
		STATS_SET_HIGH(cachep);

		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
	}

	return batchcount;
}

2912
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2913 2914
{
	int batchcount;
2915
	struct kmem_cache_node *n;
2916
	struct array_cache *ac, *shared;
P
Pekka Enberg 已提交
2917
	int node;
2918
	void *list = NULL;
2919
	struct page *page;
P
Pekka Enberg 已提交
2920

L
Linus Torvalds 已提交
2921
	check_irq_off();
2922
	node = numa_mem_id();
2923

2924
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2925 2926
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2927 2928 2929 2930
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2931 2932 2933
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2934
	n = get_node(cachep, node);
2935

2936
	BUG_ON(ac->avail > 0 || !n);
2937 2938 2939 2940
	shared = READ_ONCE(n->shared);
	if (!n->free_objects && (!shared || !shared->avail))
		goto direct_grow;

2941
	spin_lock(&n->list_lock);
2942
	shared = READ_ONCE(n->shared);
L
Linus Torvalds 已提交
2943

2944
	/* See if we can refill from the shared array */
2945 2946
	if (shared && transfer_objects(ac, shared, batchcount)) {
		shared->touched = 1;
2947
		goto alloc_done;
2948
	}
2949

L
Linus Torvalds 已提交
2950 2951
	while (batchcount > 0) {
		/* Get slab alloc is to come from. */
2952
		page = get_first_slab(n, false);
2953 2954
		if (!page)
			goto must_grow;
L
Linus Torvalds 已提交
2955 2956

		check_spinlock_acquired(cachep);
2957

2958
		batchcount = alloc_block(cachep, ac, page, batchcount);
2959
		fixup_slab_list(cachep, n, page, &list);
L
Linus Torvalds 已提交
2960 2961
	}

A
Andrew Morton 已提交
2962
must_grow:
2963
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
2964
alloc_done:
2965
	spin_unlock(&n->list_lock);
2966
	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
2967

2968
direct_grow:
L
Linus Torvalds 已提交
2969
	if (unlikely(!ac->avail)) {
2970 2971 2972 2973 2974 2975 2976 2977
		/* Check if we can use obj in pfmemalloc slab */
		if (sk_memalloc_socks()) {
			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);

			if (obj)
				return obj;
		}

2978
		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2979

2980 2981 2982 2983
		/*
		 * cache_grow_begin() can reenable interrupts,
		 * then ac could change.
		 */
2984
		ac = cpu_cache_get(cachep);
2985 2986 2987
		if (!ac->avail && page)
			alloc_block(cachep, ac, page, batchcount);
		cache_grow_end(cachep, page);
2988

2989
		if (!ac->avail)
L
Linus Torvalds 已提交
2990 2991 2992
			return NULL;
	}
	ac->touched = 1;
2993

2994
	return ac->entry[--ac->avail];
L
Linus Torvalds 已提交
2995 2996
}

A
Andrew Morton 已提交
2997 2998
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
2999
{
3000
	might_sleep_if(gfpflags_allow_blocking(flags));
L
Linus Torvalds 已提交
3001 3002 3003
}

#if DEBUG
A
Andrew Morton 已提交
3004
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3005
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
3006
{
3007
	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
P
Pekka Enberg 已提交
3008
	if (!objp)
L
Linus Torvalds 已提交
3009
		return objp;
P
Pekka Enberg 已提交
3010
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
3011
		check_poison_obj(cachep, objp);
Q
Qian Cai 已提交
3012
		slab_kernel_map(cachep, objp, 1);
L
Linus Torvalds 已提交
3013 3014 3015
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
3016
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
3017 3018

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
3019 3020
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
J
Joe Perches 已提交
3021
			slab_error(cachep, "double free, or memory outside object was overwritten");
3022
			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3023 3024
			       objp, *dbg_redzone1(cachep, objp),
			       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
3025 3026 3027 3028
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
3029

3030
	objp += obj_offset(cachep);
3031
	if (cachep->ctor && cachep->flags & SLAB_POISON)
3032
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
3033 3034
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3035
		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
3036
		       objp, (int)ARCH_SLAB_MINALIGN);
3037
	}
L
Linus Torvalds 已提交
3038 3039 3040 3041 3042 3043
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

3044
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3045
{
P
Pekka Enberg 已提交
3046
	void *objp;
L
Linus Torvalds 已提交
3047 3048
	struct array_cache *ac;

3049
	check_irq_off();
3050

3051
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3052 3053
	if (likely(ac->avail)) {
		ac->touched = 1;
3054
		objp = ac->entry[--ac->avail];
3055

3056 3057
		STATS_INC_ALLOCHIT(cachep);
		goto out;
L
Linus Torvalds 已提交
3058
	}
3059 3060

	STATS_INC_ALLOCMISS(cachep);
3061
	objp = cache_alloc_refill(cachep, flags);
3062 3063 3064 3065 3066 3067 3068
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
3069 3070 3071 3072 3073
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
3074 3075
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
3076 3077 3078
	return objp;
}

3079
#ifdef CONFIG_NUMA
3080
/*
3081
 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3082 3083 3084 3085 3086 3087 3088 3089
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

3090
	if (in_interrupt() || (flags & __GFP_THISNODE))
3091
		return NULL;
3092
	nid_alloc = nid_here = numa_mem_id();
3093
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3094
		nid_alloc = cpuset_slab_spread_node();
3095
	else if (current->mempolicy)
3096
		nid_alloc = mempolicy_slab_node();
3097
	if (nid_alloc != nid_here)
3098
		return ____cache_alloc_node(cachep, flags, nid_alloc);
3099 3100 3101
	return NULL;
}

3102 3103
/*
 * Fallback function if there was no memory available and no objects on a
3104
 * certain node and fall back is permitted. First we scan all the
3105
 * available node for available objects. If that fails then we
3106 3107 3108
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3109
 */
3110
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3111
{
3112
	struct zonelist *zonelist;
3113
	struct zoneref *z;
3114
	struct zone *zone;
3115
	enum zone_type highest_zoneidx = gfp_zone(flags);
3116
	void *obj = NULL;
3117
	struct page *page;
3118
	int nid;
3119
	unsigned int cpuset_mems_cookie;
3120 3121 3122 3123

	if (flags & __GFP_THISNODE)
		return NULL;

3124
retry_cpuset:
3125
	cpuset_mems_cookie = read_mems_allowed_begin();
3126
	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3127

3128 3129 3130 3131 3132
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3133
	for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3134
		nid = zone_to_nid(zone);
3135

3136
		if (cpuset_zone_allowed(zone, flags) &&
3137 3138
			get_node(cache, nid) &&
			get_node(cache, nid)->free_objects) {
3139
				obj = ____cache_alloc_node(cache,
D
David Rientjes 已提交
3140
					gfp_exact_node(flags), nid);
3141 3142 3143
				if (obj)
					break;
		}
3144 3145
	}

3146
	if (!obj) {
3147 3148 3149 3150 3151 3152
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3153 3154 3155 3156
		page = cache_grow_begin(cache, flags, numa_mem_id());
		cache_grow_end(cache, page);
		if (page) {
			nid = page_to_nid(page);
3157 3158
			obj = ____cache_alloc_node(cache,
				gfp_exact_node(flags), nid);
3159

3160
			/*
3161 3162
			 * Another processor may allocate the objects in
			 * the slab since we are not holding any locks.
3163
			 */
3164 3165
			if (!obj)
				goto retry;
3166
		}
3167
	}
3168

3169
	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3170
		goto retry_cpuset;
3171 3172 3173
	return obj;
}

3174 3175
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3176
 */
3177
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3178
				int nodeid)
3179
{
3180
	struct page *page;
3181
	struct kmem_cache_node *n;
3182
	void *obj = NULL;
3183
	void *list = NULL;
P
Pekka Enberg 已提交
3184

3185
	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3186
	n = get_node(cachep, nodeid);
3187
	BUG_ON(!n);
P
Pekka Enberg 已提交
3188

3189
	check_irq_off();
3190
	spin_lock(&n->list_lock);
3191
	page = get_first_slab(n, false);
3192 3193
	if (!page)
		goto must_grow;
P
Pekka Enberg 已提交
3194 3195 3196 3197 3198 3199 3200

	check_spinlock_acquired_node(cachep, nodeid);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

3201
	BUG_ON(page->active == cachep->num);
P
Pekka Enberg 已提交
3202

3203
	obj = slab_get_obj(cachep, page);
3204
	n->free_objects--;
P
Pekka Enberg 已提交
3205

3206
	fixup_slab_list(cachep, n, page, &list);
3207

3208
	spin_unlock(&n->list_lock);
3209
	fixup_objfreelist_debug(cachep, &list);
3210
	return obj;
3211

A
Andrew Morton 已提交
3212
must_grow:
3213
	spin_unlock(&n->list_lock);
3214
	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3215 3216 3217 3218
	if (page) {
		/* This slab isn't counted yet so don't update free_objects */
		obj = slab_get_obj(cachep, page);
	}
3219
	cache_grow_end(cachep, page);
L
Linus Torvalds 已提交
3220

3221
	return obj ? obj : fallback_alloc(cachep, flags);
3222
}
3223 3224

static __always_inline void *
3225
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3226
		   unsigned long caller)
3227 3228 3229
{
	unsigned long save_flags;
	void *ptr;
3230
	int slab_node = numa_mem_id();
3231

3232
	flags &= gfp_allowed_mask;
3233 3234
	cachep = slab_pre_alloc_hook(cachep, flags);
	if (unlikely(!cachep))
3235 3236
		return NULL;

3237 3238 3239
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3240
	if (nodeid == NUMA_NO_NODE)
3241
		nodeid = slab_node;
3242

3243
	if (unlikely(!get_node(cachep, nodeid))) {
3244 3245 3246 3247 3248
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3249
	if (nodeid == slab_node) {
3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);

3266
	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3267
		memset(ptr, 0, cachep->object_size);
3268

3269
	slab_post_alloc_hook(cachep, flags, 1, &ptr);
3270 3271 3272 3273 3274 3275 3276 3277
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

3278
	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3279 3280 3281 3282 3283 3284 3285 3286 3287 3288
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3289 3290
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3306
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3307 3308 3309 3310
{
	unsigned long save_flags;
	void *objp;

3311
	flags &= gfp_allowed_mask;
3312 3313
	cachep = slab_pre_alloc_hook(cachep, flags);
	if (unlikely(!cachep))
3314 3315
		return NULL;

3316 3317 3318 3319 3320 3321 3322
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
	prefetchw(objp);

3323
	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3324
		memset(objp, 0, cachep->object_size);
3325

3326
	slab_post_alloc_hook(cachep, flags, 1, &objp);
3327 3328
	return objp;
}
3329 3330

/*
3331
 * Caller needs to acquire correct kmem_cache_node's list_lock
3332
 * @list: List of detached free slabs should be freed by caller
3333
 */
3334 3335
static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
L
Linus Torvalds 已提交
3336 3337
{
	int i;
3338
	struct kmem_cache_node *n = get_node(cachep, node);
3339 3340 3341
	struct page *page;

	n->free_objects += nr_objects;
L
Linus Torvalds 已提交
3342 3343

	for (i = 0; i < nr_objects; i++) {
3344
		void *objp;
3345
		struct page *page;
L
Linus Torvalds 已提交
3346

3347 3348
		objp = objpp[i];

3349
		page = virt_to_head_page(objp);
3350
		list_del(&page->slab_list);
3351
		check_spinlock_acquired_node(cachep, node);
3352
		slab_put_obj(cachep, page, objp);
L
Linus Torvalds 已提交
3353 3354 3355
		STATS_DEC_ACTIVE(cachep);

		/* fixup slab chains */
3356
		if (page->active == 0) {
3357
			list_add(&page->slab_list, &n->slabs_free);
3358 3359
			n->free_slabs++;
		} else {
L
Linus Torvalds 已提交
3360 3361 3362 3363
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3364
			list_add_tail(&page->slab_list, &n->slabs_partial);
L
Linus Torvalds 已提交
3365 3366
		}
	}
3367 3368 3369 3370

	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
		n->free_objects -= cachep->num;

3371 3372
		page = list_last_entry(&n->slabs_free, struct page, slab_list);
		list_move(&page->slab_list, list);
3373
		n->free_slabs--;
3374
		n->total_slabs--;
3375
	}
L
Linus Torvalds 已提交
3376 3377
}

3378
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3379 3380
{
	int batchcount;
3381
	struct kmem_cache_node *n;
3382
	int node = numa_mem_id();
3383
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3384 3385

	batchcount = ac->batchcount;
3386

L
Linus Torvalds 已提交
3387
	check_irq_off();
3388
	n = get_node(cachep, node);
3389 3390 3391
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3392
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3393 3394 3395
		if (max) {
			if (batchcount > max)
				batchcount = max;
3396
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3397
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3398 3399 3400 3401 3402
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3403
	free_block(cachep, ac->entry, batchcount, node, &list);
A
Andrew Morton 已提交
3404
free_done:
L
Linus Torvalds 已提交
3405 3406 3407
#if STATS
	{
		int i = 0;
3408
		struct page *page;
L
Linus Torvalds 已提交
3409

3410
		list_for_each_entry(page, &n->slabs_free, slab_list) {
3411
			BUG_ON(page->active);
L
Linus Torvalds 已提交
3412 3413 3414 3415 3416 3417

			i++;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3418
	spin_unlock(&n->list_lock);
3419
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3420
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3421
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
L
Linus Torvalds 已提交
3422 3423 3424
}

/*
A
Andrew Morton 已提交
3425 3426
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3427
 */
3428 3429
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
					 unsigned long caller)
L
Linus Torvalds 已提交
3430
{
3431
	/* Put the object into the quarantine, don't touch it for now. */
3432
	if (kasan_slab_free(cachep, objp, _RET_IP_))
3433 3434 3435 3436
		return;

	___cache_free(cachep, objp, caller);
}
L
Linus Torvalds 已提交
3437

3438 3439 3440 3441
void ___cache_free(struct kmem_cache *cachep, void *objp,
		unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);
A
Alexander Potapenko 已提交
3442

L
Linus Torvalds 已提交
3443
	check_irq_off();
3444 3445
	if (unlikely(slab_want_init_on_free(cachep)))
		memset(objp, 0, cachep->object_size);
3446
	kmemleak_free_recursive(objp, cachep->flags);
3447
	objp = cache_free_debugcheck(cachep, objp, caller);
L
Linus Torvalds 已提交
3448

3449 3450 3451 3452 3453 3454 3455
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3456
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3457 3458
		return;

3459
	if (ac->avail < ac->limit) {
L
Linus Torvalds 已提交
3460 3461 3462 3463 3464
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3465

3466 3467 3468 3469 3470 3471 3472 3473 3474
	if (sk_memalloc_socks()) {
		struct page *page = virt_to_head_page(objp);

		if (unlikely(PageSlabPfmemalloc(page))) {
			cache_free_pfmemalloc(cachep, page, objp);
			return;
		}
	}

3475
	__free_one(ac, objp);
L
Linus Torvalds 已提交
3476 3477 3478 3479 3480 3481 3482 3483 3484
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
3485 3486
 *
 * Return: pointer to the new object or %NULL in case of error
L
Linus Torvalds 已提交
3487
 */
3488
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3489
{
3490
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3491

3492
	trace_kmem_cache_alloc(_RET_IP_, ret,
3493
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3494 3495

	return ret;
L
Linus Torvalds 已提交
3496 3497 3498
}
EXPORT_SYMBOL(kmem_cache_alloc);

3499 3500 3501 3502 3503 3504 3505 3506 3507 3508
static __always_inline void
cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
				  size_t size, void **p, unsigned long caller)
{
	size_t i;

	for (i = 0; i < size; i++)
		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
}

3509
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3510
			  void **p)
3511
{
3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529
	size_t i;

	s = slab_pre_alloc_hook(s, flags);
	if (!s)
		return 0;

	cache_alloc_debugcheck_before(s, flags);

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = __do_cache_alloc(s, flags);

		if (unlikely(!objp))
			goto error;
		p[i] = objp;
	}
	local_irq_enable();

3530 3531
	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);

3532
	/* Clear memory outside IRQ disabled section */
3533
	if (unlikely(slab_want_init_on_alloc(flags, s)))
3534 3535 3536 3537 3538 3539 3540 3541
		for (i = 0; i < size; i++)
			memset(p[i], 0, s->object_size);

	slab_post_alloc_hook(s, flags, size, p);
	/* FIXME: Trace call missing. Christoph would like a bulk variant */
	return size;
error:
	local_irq_enable();
3542
	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3543 3544 3545
	slab_post_alloc_hook(s, flags, i, p);
	__kmem_cache_free_bulk(s, i, p);
	return 0;
3546 3547 3548
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

3549
#ifdef CONFIG_TRACING
3550
void *
3551
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3552
{
3553 3554
	void *ret;

3555
	ret = slab_alloc(cachep, flags, _RET_IP_);
3556

3557
	ret = kasan_kmalloc(cachep, ret, size, flags);
3558
	trace_kmalloc(_RET_IP_, ret,
3559
		      size, cachep->size, flags);
3560
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3561
}
3562
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3563 3564
#endif

L
Linus Torvalds 已提交
3565
#ifdef CONFIG_NUMA
3566 3567 3568 3569 3570 3571 3572 3573 3574 3575
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
3576 3577
 *
 * Return: pointer to the new object or %NULL in case of error
3578
 */
3579 3580
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3581
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3582

3583
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3584
				    cachep->object_size, cachep->size,
3585
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3586 3587

	return ret;
3588
}
L
Linus Torvalds 已提交
3589 3590
EXPORT_SYMBOL(kmem_cache_alloc_node);

3591
#ifdef CONFIG_TRACING
3592
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3593
				  gfp_t flags,
3594 3595
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3596
{
3597 3598
	void *ret;

3599
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3600

3601
	ret = kasan_kmalloc(cachep, ret, size, flags);
3602
	trace_kmalloc_node(_RET_IP_, ret,
3603
			   size, cachep->size,
3604 3605
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3606
}
3607
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3608 3609
#endif

3610
static __always_inline void *
3611
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3612
{
3613
	struct kmem_cache *cachep;
A
Alexander Potapenko 已提交
3614
	void *ret;
3615

3616 3617
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
		return NULL;
3618
	cachep = kmalloc_slab(size, flags);
3619 3620
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
A
Alexander Potapenko 已提交
3621
	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3622
	ret = kasan_kmalloc(cachep, ret, size, flags);
A
Alexander Potapenko 已提交
3623 3624

	return ret;
3625
}
3626 3627 3628

void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3629
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3630
}
3631
EXPORT_SYMBOL(__kmalloc_node);
3632 3633

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3634
		int node, unsigned long caller)
3635
{
3636
	return __do_kmalloc_node(size, flags, node, caller);
3637 3638 3639
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3640 3641

/**
3642
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3643
 * @size: how many bytes of memory are required.
3644
 * @flags: the type of memory to allocate (see kmalloc).
3645
 * @caller: function caller for debug tracking of the caller
3646 3647
 *
 * Return: pointer to the allocated memory or %NULL in case of error
L
Linus Torvalds 已提交
3648
 */
3649
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3650
					  unsigned long caller)
L
Linus Torvalds 已提交
3651
{
3652
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3653
	void *ret;
L
Linus Torvalds 已提交
3654

3655 3656
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
		return NULL;
3657
	cachep = kmalloc_slab(size, flags);
3658 3659
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3660
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3661

3662
	ret = kasan_kmalloc(cachep, ret, size, flags);
3663
	trace_kmalloc(caller, ret,
3664
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3665 3666

	return ret;
3667 3668 3669 3670
}

void *__kmalloc(size_t size, gfp_t flags)
{
3671
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3672 3673 3674
}
EXPORT_SYMBOL(__kmalloc);

3675
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3676
{
3677
	return __do_kmalloc(size, flags, caller);
3678 3679
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3680

L
Linus Torvalds 已提交
3681 3682 3683 3684 3685 3686 3687 3688
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3689
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3690 3691
{
	unsigned long flags;
3692 3693 3694
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3695 3696

	local_irq_save(flags);
3697
	debug_check_no_locks_freed(objp, cachep->object_size);
3698
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3699
		debug_check_no_obj_freed(objp, cachep->object_size);
3700
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3701
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3702

3703
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3704 3705 3706
}
EXPORT_SYMBOL(kmem_cache_free);

3707 3708 3709 3710 3711 3712 3713 3714 3715
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
{
	struct kmem_cache *s;
	size_t i;

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = p[i];

3716 3717 3718 3719
		if (!orig_s) /* called via kfree_bulk */
			s = virt_to_cache(objp);
		else
			s = cache_from_obj(orig_s, objp);
3720 3721
		if (!s)
			continue;
3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734

		debug_check_no_locks_freed(objp, s->object_size);
		if (!(s->flags & SLAB_DEBUG_OBJECTS))
			debug_check_no_obj_freed(objp, s->object_size);

		__cache_free(s, objp, _RET_IP_);
	}
	local_irq_enable();

	/* FIXME: add tracing */
}
EXPORT_SYMBOL(kmem_cache_free_bulk);

L
Linus Torvalds 已提交
3735 3736 3737 3738
/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3739 3740
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3741 3742 3743 3744 3745
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3746
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3747 3748
	unsigned long flags;

3749 3750
	trace_kfree(_RET_IP_, objp);

3751
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3752 3753 3754
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3755
	c = virt_to_cache(objp);
3756 3757 3758 3759
	if (!c) {
		local_irq_restore(flags);
		return;
	}
3760 3761 3762
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3763
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3764 3765 3766 3767
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3768
/*
3769
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3770
 */
3771
static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3772
{
3773
	int ret;
3774
	int node;
3775
	struct kmem_cache_node *n;
3776

3777
	for_each_online_node(node) {
3778 3779
		ret = setup_kmem_cache_node(cachep, node, gfp, true);
		if (ret)
3780 3781 3782
			goto fail;

	}
3783

3784
	return 0;
3785

A
Andrew Morton 已提交
3786
fail:
3787
	if (!cachep->list.next) {
3788 3789 3790
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3791 3792
			n = get_node(cachep, node);
			if (n) {
3793 3794 3795
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3796
				cachep->node[node] = NULL;
3797 3798 3799 3800
			}
			node--;
		}
	}
3801
	return -ENOMEM;
3802 3803
}

3804
/* Always called with the slab_mutex held */
G
Glauber Costa 已提交
3805
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3806
				int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3807
{
3808 3809
	struct array_cache __percpu *cpu_cache, *prev;
	int cpu;
L
Linus Torvalds 已提交
3810

3811 3812
	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
	if (!cpu_cache)
3813 3814
		return -ENOMEM;

3815 3816
	prev = cachep->cpu_cache;
	cachep->cpu_cache = cpu_cache;
3817 3818 3819 3820 3821 3822
	/*
	 * Without a previous cpu_cache there's no need to synchronize remote
	 * cpus, so skip the IPIs.
	 */
	if (prev)
		kick_all_cpus_sync();
3823

L
Linus Torvalds 已提交
3824 3825 3826
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3827
	cachep->shared = shared;
L
Linus Torvalds 已提交
3828

3829
	if (!prev)
3830
		goto setup_node;
3831 3832

	for_each_online_cpu(cpu) {
3833
		LIST_HEAD(list);
3834 3835
		int node;
		struct kmem_cache_node *n;
3836
		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3837

3838
		node = cpu_to_mem(cpu);
3839 3840
		n = get_node(cachep, node);
		spin_lock_irq(&n->list_lock);
3841
		free_block(cachep, ac->entry, ac->avail, node, &list);
3842
		spin_unlock_irq(&n->list_lock);
3843
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3844
	}
3845 3846
	free_percpu(prev);

3847 3848
setup_node:
	return setup_kmem_cache_nodes(cachep, gfp);
L
Linus Torvalds 已提交
3849 3850
}

G
Glauber Costa 已提交
3851 3852 3853 3854
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
				int batchcount, int shared, gfp_t gfp)
{
	int ret;
3855
	struct kmem_cache *c;
G
Glauber Costa 已提交
3856 3857 3858 3859 3860 3861 3862 3863 3864

	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);

	if (slab_state < FULL)
		return ret;

	if ((ret < 0) || !is_root_cache(cachep))
		return ret;

3865 3866 3867 3868
	lockdep_assert_held(&slab_mutex);
	for_each_memcg_cache(c, cachep) {
		/* return value determined by the root cache only */
		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
G
Glauber Costa 已提交
3869 3870 3871 3872 3873
	}

	return ret;
}

3874
/* Called with slab_mutex held always */
3875
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3876 3877
{
	int err;
G
Glauber Costa 已提交
3878 3879 3880 3881
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

3882
	err = cache_random_seq_create(cachep, cachep->num, gfp);
T
Thomas Garnier 已提交
3883 3884 3885
	if (err)
		goto end;

G
Glauber Costa 已提交
3886 3887 3888 3889 3890 3891
	if (!is_root_cache(cachep)) {
		struct kmem_cache *root = memcg_root_cache(cachep);
		limit = root->limit;
		shared = root->shared;
		batchcount = root->batchcount;
	}
L
Linus Torvalds 已提交
3892

G
Glauber Costa 已提交
3893 3894
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3895 3896
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3897 3898
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3899
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3900 3901 3902 3903
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3904
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3905
		limit = 1;
3906
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3907
		limit = 8;
3908
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3909
		limit = 24;
3910
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3911 3912 3913 3914
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3915 3916
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3917 3918 3919 3920 3921 3922 3923 3924
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
3925
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
3926 3927 3928
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
3929 3930 3931
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
3932 3933 3934 3935
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
3936 3937 3938
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
T
Thomas Garnier 已提交
3939
end:
L
Linus Torvalds 已提交
3940
	if (err)
3941
		pr_err("enable_cpucache failed for %s, error %d\n",
P
Pekka Enberg 已提交
3942
		       cachep->name, -err);
3943
	return err;
L
Linus Torvalds 已提交
3944 3945
}

3946
/*
3947 3948
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
3949
 * if drain_array() is used on the shared array.
3950
 */
3951
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3952
			 struct array_cache *ac, int node)
L
Linus Torvalds 已提交
3953
{
3954
	LIST_HEAD(list);
3955 3956 3957

	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
	check_mutex_acquired();
L
Linus Torvalds 已提交
3958

3959 3960
	if (!ac || !ac->avail)
		return;
3961 3962

	if (ac->touched) {
L
Linus Torvalds 已提交
3963
		ac->touched = 0;
3964
		return;
L
Linus Torvalds 已提交
3965
	}
3966 3967 3968 3969 3970 3971

	spin_lock_irq(&n->list_lock);
	drain_array_locked(cachep, ac, node, false, &list);
	spin_unlock_irq(&n->list_lock);

	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3972 3973 3974 3975
}

/**
 * cache_reap - Reclaim memory from caches.
3976
 * @w: work descriptor
L
Linus Torvalds 已提交
3977 3978 3979 3980 3981 3982
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
3983 3984
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
3985
 */
3986
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
3987
{
3988
	struct kmem_cache *searchp;
3989
	struct kmem_cache_node *n;
3990
	int node = numa_mem_id();
3991
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
3992

3993
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
3994
		/* Give up. Setup the next iteration. */
3995
		goto out;
L
Linus Torvalds 已提交
3996

3997
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
3998 3999
		check_irq_on();

4000
		/*
4001
		 * We only take the node lock if absolutely necessary and we
4002 4003 4004
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
4005
		n = get_node(searchp, node);
4006

4007
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
4008

4009
		drain_array(searchp, n, cpu_cache_get(searchp), node);
L
Linus Torvalds 已提交
4010

4011 4012 4013 4014
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
4015
		if (time_after(n->next_reap, jiffies))
4016
			goto next;
L
Linus Torvalds 已提交
4017

4018
		n->next_reap = jiffies + REAPTIMEOUT_NODE;
L
Linus Torvalds 已提交
4019

4020
		drain_array(searchp, n, n->shared, node);
L
Linus Torvalds 已提交
4021

4022 4023
		if (n->free_touched)
			n->free_touched = 0;
4024 4025
		else {
			int freed;
L
Linus Torvalds 已提交
4026

4027
			freed = drain_freelist(searchp, n, (n->free_limit +
4028 4029 4030
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
4031
next:
L
Linus Torvalds 已提交
4032 4033 4034
		cond_resched();
	}
	check_irq_on();
4035
	mutex_unlock(&slab_mutex);
4036
	next_reap_node();
4037
out:
A
Andrew Morton 已提交
4038
	/* Set up the next iteration */
4039 4040
	schedule_delayed_work_on(smp_processor_id(), work,
				round_jiffies_relative(REAPTIMEOUT_AC));
L
Linus Torvalds 已提交
4041 4042
}

4043
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
4044
{
4045
	unsigned long active_objs, num_objs, active_slabs;
4046 4047
	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
	unsigned long free_slabs = 0;
4048
	int node;
4049
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
4050

4051
	for_each_kmem_cache_node(cachep, node, n) {
4052
		check_irq_on();
4053
		spin_lock_irq(&n->list_lock);
4054

4055 4056
		total_slabs += n->total_slabs;
		free_slabs += n->free_slabs;
4057
		free_objs += n->free_objects;
4058

4059 4060
		if (n->shared)
			shared_avail += n->shared->avail;
4061

4062
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4063
	}
4064 4065
	num_objs = total_slabs * cachep->num;
	active_slabs = total_slabs - free_slabs;
4066
	active_objs = num_objs - free_objs;
L
Linus Torvalds 已提交
4067

4068 4069 4070
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
4071
	sinfo->num_slabs = total_slabs;
4072 4073 4074 4075 4076 4077 4078 4079 4080 4081
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
4082
#if STATS
4083
	{			/* node stats */
L
Linus Torvalds 已提交
4084 4085 4086 4087 4088 4089 4090
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
4091
		unsigned long node_frees = cachep->node_frees;
4092
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
4093

J
Joe Perches 已提交
4094
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
J
Joe Perches 已提交
4095 4096 4097
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
4098 4099 4100 4101 4102 4103 4104 4105 4106
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4107
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
4119 4120
 *
 * Return: %0 on success, negative error code otherwise.
L
Linus Torvalds 已提交
4121
 */
4122
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4123
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4124
{
P
Pekka Enberg 已提交
4125
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4126
	int limit, batchcount, shared, res;
4127
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4128

L
Linus Torvalds 已提交
4129 4130 4131 4132
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4133
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4134 4135 4136 4137 4138 4139 4140 4141 4142 4143

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4144
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4145
	res = -EINVAL;
4146
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4147
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4148 4149
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4150
				res = 0;
L
Linus Torvalds 已提交
4151
			} else {
4152
				res = do_tune_cpucache(cachep, limit,
4153 4154
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4155 4156 4157 4158
			}
			break;
		}
	}
4159
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4160 4161 4162 4163
	if (res >= 0)
		res = count;
	return res;
}
4164

K
Kees Cook 已提交
4165 4166
#ifdef CONFIG_HARDENED_USERCOPY
/*
4167 4168 4169
 * Rejects incorrectly sized objects and objects that are to be copied
 * to/from userspace but do not fall entirely within the containing slab
 * cache's usercopy region.
K
Kees Cook 已提交
4170 4171 4172 4173
 *
 * Returns NULL if check passes, otherwise const char * to name of cache
 * to indicate an error.
 */
4174 4175
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
			 bool to_user)
K
Kees Cook 已提交
4176 4177 4178 4179 4180
{
	struct kmem_cache *cachep;
	unsigned int objnr;
	unsigned long offset;

4181 4182
	ptr = kasan_reset_tag(ptr);

K
Kees Cook 已提交
4183 4184 4185 4186 4187 4188 4189 4190
	/* Find and validate object. */
	cachep = page->slab_cache;
	objnr = obj_to_index(cachep, page, (void *)ptr);
	BUG_ON(objnr >= cachep->num);

	/* Find offset within object. */
	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);

4191 4192 4193 4194
	/* Allow address range falling entirely within usercopy region. */
	if (offset >= cachep->useroffset &&
	    offset - cachep->useroffset <= cachep->usersize &&
	    n <= cachep->useroffset - offset + cachep->usersize)
4195
		return;
K
Kees Cook 已提交
4196

4197 4198 4199 4200 4201 4202
	/*
	 * If the copy is still within the allocated object, produce
	 * a warning instead of rejecting the copy. This is intended
	 * to be a temporary method to find any missing usercopy
	 * whitelists.
	 */
4203 4204
	if (usercopy_fallback &&
	    offset <= cachep->object_size &&
4205 4206 4207 4208
	    n <= cachep->object_size - offset) {
		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
		return;
	}
K
Kees Cook 已提交
4209

4210
	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
K
Kees Cook 已提交
4211 4212 4213
}
#endif /* CONFIG_HARDENED_USERCOPY */

4214
/**
4215
 * __ksize -- Uninstrumented ksize.
4216
 * @objp: pointer to the object
4217
 *
4218 4219
 * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
 * safety checks as ksize() with KASAN instrumentation enabled.
4220 4221
 *
 * Return: size of the actual memory used by @objp in bytes
4222
 */
4223
size_t __ksize(const void *objp)
L
Linus Torvalds 已提交
4224
{
4225
	struct kmem_cache *c;
A
Alexander Potapenko 已提交
4226 4227
	size_t size;

4228 4229
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4230
		return 0;
L
Linus Torvalds 已提交
4231

4232 4233
	c = virt_to_cache(objp);
	size = c ? c->object_size : 0;
A
Alexander Potapenko 已提交
4234 4235

	return size;
L
Linus Torvalds 已提交
4236
}
4237
EXPORT_SYMBOL(__ksize);