slab.c 105.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
30
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
54
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
55 56 57 58
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
59
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70 71
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
72
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
73 74 75 76 77 78
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
79 80 81 82 83 84 85 86 87
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
88 89 90 91
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
92
#include	<linux/poison.h>
L
Linus Torvalds 已提交
93 94 95 96 97
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
98
#include	<linux/cpuset.h>
99
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
107
#include	<linux/string.h>
108
#include	<linux/uaccess.h>
109
#include	<linux/nodemask.h>
110
#include	<linux/kmemleak.h>
111
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
112
#include	<linux/mutex.h>
113
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
114
#include	<linux/rtmutex.h>
115
#include	<linux/reciprocal_div.h>
116
#include	<linux/debugobjects.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
119
#include	<linux/sched/task_stack.h>
L
Linus Torvalds 已提交
120

121 122
#include	<net/sock.h>

L
Linus Torvalds 已提交
123 124 125 126
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

127 128
#include <trace/events/kmem.h>

129 130
#include	"internal.h"

131 132
#include	"slab.h"

L
Linus Torvalds 已提交
133
/*
134
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
155
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
156 157 158 159 160

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

161 162 163 164 165 166 167 168 169
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

170
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171

L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
189
	void *entry[];	/*
A
Andrew Morton 已提交
190 191 192 193
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
L
Linus Torvalds 已提交
194 195
};

J
Joonsoo Kim 已提交
196 197 198 199 200
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

201 202 203
/*
 * Need this for bootstrapping a per node allocator.
 */
204
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206
#define	CACHE_CACHE 0
207
#define	SIZE_NODE (MAX_NUMNODES)
208

209
static int drain_freelist(struct kmem_cache *cache,
210
			struct kmem_cache_node *n, int tofree);
211
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 213
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215
static void cache_reap(struct work_struct *unused);
216

217 218 219 220 221
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
222 223
static int slab_early_init = 1;

224
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
225

226
static void kmem_cache_node_init(struct kmem_cache_node *parent)
227 228 229 230
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
231
	parent->total_slabs = 0;
232
	parent->free_slabs = 0;
233 234
	parent->shared = NULL;
	parent->alien = NULL;
235
	parent->colour_next = 0;
236 237 238 239 240
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
241 242 243
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
244
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
245 246
	} while (0)

A
Andrew Morton 已提交
247 248
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
249 250 251 252
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
253

254 255
#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
256
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
L
Linus Torvalds 已提交
257 258 259
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
260 261 262
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
263
 *
A
Adrian Bunk 已提交
264
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
265 266
 * which could lock up otherwise freeable slabs.
 */
267 268
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
L
Linus Torvalds 已提交
269 270 271 272 273 274

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
275
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
276 277 278 279 280
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
281 282
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
283
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
284
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
285 286 287 288 289
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
299
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
300 301 302
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
303
#define	STATS_INC_NODEFREES(x)	do { } while (0)
304
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
305
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
306 307 308 309 310 311 312 313
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
314 315
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
316
 * 0		: objp
317
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
318 319
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
320
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
321
 * 		redzone word.
322
 * cachep->obj_offset: The real object.
323 324
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
325
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
326
 */
327
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
328
{
329
	return cachep->obj_offset;
L
Linus Torvalds 已提交
330 331
}

332
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
333 334
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 336
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
337 338
}

339
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
340 341 342
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
343
		return (unsigned long long *)(objp + cachep->size -
344
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
345
					      REDZONE_ALIGN);
346
	return (unsigned long long *) (objp + cachep->size -
347
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
348 349
}

350
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
351 352
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
354 355 356 357
}

#else

358
#define obj_offset(x)			0
359 360
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
361 362 363 364 365
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
366 367
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
368
 */
369 370 371
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
372
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
373

374
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
375 376
				 unsigned int idx)
{
377
	return page->s_mem + cache->size * idx;
378 379
}

380
#define BOOT_CPUCACHE_ENTRIES	1
L
Linus Torvalds 已提交
381
/* internal cache of cache description objs */
382
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
383 384 385
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
386
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
387
	.name = "kmem_cache",
L
Linus Torvalds 已提交
388 389
};

390
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
391

392
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
393
{
394
	return this_cpu_ptr(cachep->cpu_cache);
L
Linus Torvalds 已提交
395 396
}

A
Andrew Morton 已提交
397 398 399
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
400
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
401
		slab_flags_t flags, size_t *left_over)
402
{
403
	unsigned int num;
404
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
405

406 407 408 409 410 411
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
412 413 414 415 416
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
417 418 419 420 421 422
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
423
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
424
		num = slab_size / buffer_size;
425
		*left_over = slab_size % buffer_size;
426
	} else {
427
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
428 429
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
430
	}
431 432

	return num;
L
Linus Torvalds 已提交
433 434
}

435
#if DEBUG
436
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
437

A
Andrew Morton 已提交
438 439
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
440
{
441
	pr_err("slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
442
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
443
	dump_stack();
444
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
445
}
446
#endif
L
Linus Torvalds 已提交
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

464 465 466 467 468 469 470 471 472 473 474
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

475 476 477 478 479 480 481
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
482
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
483 484 485

static void init_reap_node(int cpu)
{
486 487
	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
						    node_online_map);
488 489 490 491
}

static void next_reap_node(void)
{
492
	int node = __this_cpu_read(slab_reap_node);
493

494
	node = next_node_in(node, node_online_map);
495
	__this_cpu_write(slab_reap_node, node);
496 497 498 499 500 501 502
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
503 504 505 506 507 508 509
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
510
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
511
{
512
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
513

514
	if (reap_work->work.func == NULL) {
515
		init_reap_node(cpu);
516
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
517 518
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
519 520 521
	}
}

522
static void init_arraycache(struct array_cache *ac, int limit, int batch)
L
Linus Torvalds 已提交
523
{
524 525 526 527 528
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
L
Linus Torvalds 已提交
529
	}
530 531 532 533 534
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
535
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
536 537 538
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
539 540 541 542 543 544 545 546
	/*
	 * The array_cache structures contain pointers to free object.
	 * However, when such objects are allocated or transferred to another
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
	kmemleak_no_scan(ac);
547 548
	init_arraycache(ac, entries, batchcount);
	return ac;
L
Linus Torvalds 已提交
549 550
}

551 552
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
553
{
554 555 556
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
557

558 559
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
560

561 562 563
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
564

565
	slabs_destroy(cachep, &list);
566 567
}

568 569 570 571 572 573 574 575 576 577
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
578
	int nr = min3(from->avail, max, to->limit - to->avail);
579 580 581 582 583 584 585 586 587 588 589 590

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

591 592 593
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
594
#define reap_alien(cachep, n) do { } while (0)
595

J
Joonsoo Kim 已提交
596 597
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
598
{
599
	return NULL;
600 601
}

J
Joonsoo Kim 已提交
602
static inline void free_alien_cache(struct alien_cache **ac_ptr)
603 604 605 606 607 608 609 610 611 612 613 614 615 616
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

617
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
618 619 620 621 622
		 gfp_t flags, int nodeid)
{
	return NULL;
}

D
David Rientjes 已提交
623 624
static inline gfp_t gfp_exact_node(gfp_t flags)
{
625
	return flags & ~__GFP_NOFAIL;
D
David Rientjes 已提交
626 627
}

628 629
#else	/* CONFIG_NUMA */

630
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
631
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
632

J
Joonsoo Kim 已提交
633 634 635
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
636
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
J
Joonsoo Kim 已提交
637 638 639
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
640
	if (alc) {
641
		kmemleak_no_scan(alc);
642 643 644
		init_arraycache(&alc->ac, entries, batch);
		spin_lock_init(&alc->lock);
	}
J
Joonsoo Kim 已提交
645 646 647 648
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
649
{
J
Joonsoo Kim 已提交
650
	struct alien_cache **alc_ptr;
651 652 653 654
	int i;

	if (limit > 1)
		limit = 12;
655
	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
J
Joonsoo Kim 已提交
656 657 658 659 660 661 662 663 664 665 666 667
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
668 669
		}
	}
J
Joonsoo Kim 已提交
670
	return alc_ptr;
671 672
}

J
Joonsoo Kim 已提交
673
static void free_alien_cache(struct alien_cache **alc_ptr)
674 675 676
{
	int i;

J
Joonsoo Kim 已提交
677
	if (!alc_ptr)
678 679
		return;
	for_each_node(i)
J
Joonsoo Kim 已提交
680 681
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
682 683
}

684
static void __drain_alien_cache(struct kmem_cache *cachep,
685 686
				struct array_cache *ac, int node,
				struct list_head *list)
687
{
688
	struct kmem_cache_node *n = get_node(cachep, node);
689 690

	if (ac->avail) {
691
		spin_lock(&n->list_lock);
692 693 694 695 696
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
697 698
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
699

700
		free_block(cachep, ac->entry, ac->avail, node, list);
701
		ac->avail = 0;
702
		spin_unlock(&n->list_lock);
703 704 705
	}
}

706 707 708
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
709
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
710
{
711
	int node = __this_cpu_read(slab_reap_node);
712

713
	if (n->alien) {
J
Joonsoo Kim 已提交
714 715 716 717 718
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
719
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
720 721 722
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
723
				spin_unlock_irq(&alc->lock);
724
				slabs_destroy(cachep, &list);
J
Joonsoo Kim 已提交
725
			}
726 727 728 729
		}
	}
}

A
Andrew Morton 已提交
730
static void drain_alien_cache(struct kmem_cache *cachep,
J
Joonsoo Kim 已提交
731
				struct alien_cache **alien)
732
{
P
Pekka Enberg 已提交
733
	int i = 0;
J
Joonsoo Kim 已提交
734
	struct alien_cache *alc;
735 736 737 738
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
J
Joonsoo Kim 已提交
739 740
		alc = alien[i];
		if (alc) {
741 742
			LIST_HEAD(list);

J
Joonsoo Kim 已提交
743
			ac = &alc->ac;
744
			spin_lock_irqsave(&alc->lock, flags);
745
			__drain_alien_cache(cachep, ac, i, &list);
746
			spin_unlock_irqrestore(&alc->lock, flags);
747
			slabs_destroy(cachep, &list);
748 749 750
		}
	}
}
751

752 753
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
754
{
755
	struct kmem_cache_node *n;
J
Joonsoo Kim 已提交
756 757
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
758
	LIST_HEAD(list);
P
Pekka Enberg 已提交
759

760
	n = get_node(cachep, node);
761
	STATS_INC_NODEFREES(cachep);
762 763
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
J
Joonsoo Kim 已提交
764
		ac = &alien->ac;
765
		spin_lock(&alien->lock);
J
Joonsoo Kim 已提交
766
		if (unlikely(ac->avail == ac->limit)) {
767
			STATS_INC_ACOVERFLOW(cachep);
768
			__drain_alien_cache(cachep, ac, page_node, &list);
769
		}
770
		ac->entry[ac->avail++] = objp;
771
		spin_unlock(&alien->lock);
772
		slabs_destroy(cachep, &list);
773
	} else {
774
		n = get_node(cachep, page_node);
775
		spin_lock(&n->list_lock);
776
		free_block(cachep, &objp, 1, page_node, &list);
777
		spin_unlock(&n->list_lock);
778
		slabs_destroy(cachep, &list);
779 780 781
	}
	return 1;
}
782 783 784 785 786 787 788 789 790 791 792 793 794 795

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
D
David Rientjes 已提交
796 797

/*
798 799
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
D
David Rientjes 已提交
800 801 802
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
803
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
D
David Rientjes 已提交
804
}
805 806
#endif

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

847
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
848
/*
849
 * Allocates and initializes node for a node on each slab cache, used for
850
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
851
 * will be allocated off-node since memory is not yet online for the new node.
852
 * When hotplugging memory or a cpu, existing node are not replaced if
853 854
 * already in use.
 *
855
 * Must hold slab_mutex.
856
 */
857
static int init_cache_node_node(int node)
858
{
859
	int ret;
860 861
	struct kmem_cache *cachep;

862
	list_for_each_entry(cachep, &slab_caches, list) {
863 864 865
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
866
	}
867

868 869
	return 0;
}
870
#endif
871

872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

921 922 923 924
	/*
	 * To protect lockless access to n->shared during irq disabled context.
	 * If n->shared isn't NULL in irq disabled context, accessing to it is
	 * guaranteed to be valid until irq is re-enabled, because it will be
925
	 * freed after synchronize_rcu().
926
	 */
927
	if (old_shared && force_change)
928
		synchronize_rcu();
929

930 931 932 933 934 935 936 937
fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

938 939
#ifdef CONFIG_SMP

940
static void cpuup_canceled(long cpu)
941 942
{
	struct kmem_cache *cachep;
943
	struct kmem_cache_node *n = NULL;
944
	int node = cpu_to_mem(cpu);
945
	const struct cpumask *mask = cpumask_of_node(node);
946

947
	list_for_each_entry(cachep, &slab_caches, list) {
948 949
		struct array_cache *nc;
		struct array_cache *shared;
J
Joonsoo Kim 已提交
950
		struct alien_cache **alien;
951
		LIST_HEAD(list);
952

953
		n = get_node(cachep, node);
954
		if (!n)
955
			continue;
956

957
		spin_lock_irq(&n->list_lock);
958

959 960
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
961 962 963

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
964 965
		free_block(cachep, nc->entry, nc->avail, node, &list);
		nc->avail = 0;
966

967
		if (!cpumask_empty(mask)) {
968
			spin_unlock_irq(&n->list_lock);
969
			goto free_slab;
970 971
		}

972
		shared = n->shared;
973 974
		if (shared) {
			free_block(cachep, shared->entry,
975
				   shared->avail, node, &list);
976
			n->shared = NULL;
977 978
		}

979 980
		alien = n->alien;
		n->alien = NULL;
981

982
		spin_unlock_irq(&n->list_lock);
983 984 985 986 987 988

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
989 990

free_slab:
991
		slabs_destroy(cachep, &list);
992 993 994 995 996 997
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
998
	list_for_each_entry(cachep, &slab_caches, list) {
999
		n = get_node(cachep, node);
1000
		if (!n)
1001
			continue;
1002
		drain_freelist(cachep, n, INT_MAX);
1003 1004 1005
	}
}

1006
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1007
{
1008
	struct kmem_cache *cachep;
1009
	int node = cpu_to_mem(cpu);
1010
	int err;
L
Linus Torvalds 已提交
1011

1012 1013 1014 1015
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1016
	 * kmem_cache_node and not this cpu's kmem_cache_node
1017
	 */
1018
	err = init_cache_node_node(node);
1019 1020
	if (err < 0)
		goto bad;
1021 1022 1023 1024 1025

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1026
	list_for_each_entry(cachep, &slab_caches, list) {
1027 1028 1029
		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
		if (err)
			goto bad;
1030
	}
1031

1032 1033
	return 0;
bad:
1034
	cpuup_canceled(cpu);
1035 1036 1037
	return -ENOMEM;
}

1038
int slab_prepare_cpu(unsigned int cpu)
1039
{
1040
	int err;
1041

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	mutex_lock(&slab_mutex);
	err = cpuup_prepare(cpu);
	mutex_unlock(&slab_mutex);
	return err;
}

/*
 * This is called for a failed online attempt and for a successful
 * offline.
 *
 * Even if all the cpus of a node are down, we don't free the
 * kmem_list3 of any cache. This to avoid a race between cpu_down, and
 * a kmalloc allocation from another cpu for memory from the node of
 * the cpu going down.  The list3 structure is usually allocated from
 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
 */
int slab_dead_cpu(unsigned int cpu)
{
	mutex_lock(&slab_mutex);
	cpuup_canceled(cpu);
	mutex_unlock(&slab_mutex);
	return 0;
}
1065
#endif
1066 1067 1068 1069 1070

static int slab_online_cpu(unsigned int cpu)
{
	start_cpu_timer(cpu);
	return 0;
L
Linus Torvalds 已提交
1071 1072
}

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
static int slab_offline_cpu(unsigned int cpu)
{
	/*
	 * Shutdown cache reaper. Note that the slab_mutex is held so
	 * that if cache_reap() is invoked it cannot do anything
	 * expensive but will only modify reap_work and reschedule the
	 * timer.
	 */
	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
	/* Now the cache_reaper is guaranteed to be not running. */
	per_cpu(slab_reap_work, cpu).work.func = NULL;
	return 0;
}
L
Linus Torvalds 已提交
1086

1087 1088 1089 1090 1091 1092
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1093
 * Must hold slab_mutex.
1094
 */
1095
static int __meminit drain_cache_node_node(int node)
1096 1097 1098 1099
{
	struct kmem_cache *cachep;
	int ret = 0;

1100
	list_for_each_entry(cachep, &slab_caches, list) {
1101
		struct kmem_cache_node *n;
1102

1103
		n = get_node(cachep, node);
1104
		if (!n)
1105 1106
			continue;

1107
		drain_freelist(cachep, n, INT_MAX);
1108

1109 1110
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1131
		mutex_lock(&slab_mutex);
1132
		ret = init_cache_node_node(nid);
1133
		mutex_unlock(&slab_mutex);
1134 1135
		break;
	case MEM_GOING_OFFLINE:
1136
		mutex_lock(&slab_mutex);
1137
		ret = drain_cache_node_node(nid);
1138
		mutex_unlock(&slab_mutex);
1139 1140 1141 1142 1143 1144 1145 1146
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1147
	return notifier_from_errno(ret);
1148 1149 1150
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1151
/*
1152
 * swap the static kmem_cache_node with kmalloced memory
1153
 */
1154
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1155
				int nodeid)
1156
{
1157
	struct kmem_cache_node *ptr;
1158

1159
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1160 1161
	BUG_ON(!ptr);

1162
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1163 1164 1165 1166 1167
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1168
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1169
	cachep->node[nodeid] = ptr;
1170 1171
}

1172
/*
1173 1174
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1175
 */
1176
static void __init set_up_node(struct kmem_cache *cachep, int index)
1177 1178 1179 1180
{
	int node;

	for_each_online_node(node) {
1181
		cachep->node[node] = &init_kmem_cache_node[index + node];
1182
		cachep->node[node]->next_reap = jiffies +
1183 1184
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1185 1186 1187
	}
}

A
Andrew Morton 已提交
1188 1189 1190
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1191 1192 1193
 */
void __init kmem_cache_init(void)
{
1194 1195
	int i;

1196 1197
	kmem_cache = &kmem_cache_boot;

1198
	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1199 1200
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1201
	for (i = 0; i < NUM_INIT_LISTS; i++)
1202
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1203

L
Linus Torvalds 已提交
1204 1205
	/*
	 * Fragmentation resistance on low memory - only use bigger
1206 1207
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1208
	 */
1209
	if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1210
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1211 1212 1213

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1214 1215 1216
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1217
	 *    Initially an __init data area is used for the head array and the
1218
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1219
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1220
	 * 2) Create the first kmalloc cache.
1221
	 *    The struct kmem_cache for the new cache is allocated normally.
1222 1223 1224
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1225
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1226
	 *    kmalloc cache with kmalloc allocated arrays.
1227
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1228 1229
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1230 1231
	 */

1232
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1233

E
Eric Dumazet 已提交
1234
	/*
1235
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1236
	 */
1237
	create_boot_cache(kmem_cache, "kmem_cache",
1238
		offsetof(struct kmem_cache, node) +
1239
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1240
				  SLAB_HWCACHE_ALIGN, 0, 0);
1241
	list_add(&kmem_cache->list, &slab_caches);
1242
	memcg_link_cache(kmem_cache, NULL);
1243
	slab_state = PARTIAL;
L
Linus Torvalds 已提交
1244

A
Andrew Morton 已提交
1245
	/*
1246 1247
	 * Initialize the caches that provide memory for the  kmem_cache_node
	 * structures first.  Without this, further allocations will bug.
1248
	 */
1249
	kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1250
				kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1251 1252 1253
				kmalloc_info[INDEX_NODE].size,
				ARCH_KMALLOC_FLAGS, 0,
				kmalloc_info[INDEX_NODE].size);
1254
	slab_state = PARTIAL_NODE;
1255
	setup_kmalloc_cache_index_table();
1256

1257 1258
	slab_early_init = 0;

1259
	/* 5) Replace the bootstrap kmem_cache_node */
1260
	{
P
Pekka Enberg 已提交
1261 1262
		int nid;

1263
		for_each_online_node(nid) {
1264
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1265

1266
			init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
1267
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1268 1269
		}
	}
L
Linus Torvalds 已提交
1270

1271
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1272 1273 1274 1275 1276 1277 1278
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

	/* 6) resize the head arrays to their final sizes */
1279 1280
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1281 1282
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1283
	mutex_unlock(&slab_mutex);
1284

1285 1286 1287
	/* Done! */
	slab_state = FULL;

1288 1289 1290
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1291
	 * node.
1292 1293 1294 1295
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1296 1297 1298
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1299 1300 1301 1302 1303
	 */
}

static int __init cpucache_init(void)
{
1304
	int ret;
L
Linus Torvalds 已提交
1305

A
Andrew Morton 已提交
1306 1307
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1308
	 */
1309 1310 1311
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
				slab_online_cpu, slab_offline_cpu);
	WARN_ON(ret < 0);
1312

L
Linus Torvalds 已提交
1313 1314 1315 1316
	return 0;
}
__initcall(cpucache_init);

1317 1318 1319
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1320
#if DEBUG
1321
	struct kmem_cache_node *n;
1322 1323
	unsigned long flags;
	int node;
1324 1325 1326 1327 1328
	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
		return;
1329

1330 1331 1332
	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
		nodeid, gfpflags, &gfpflags);
	pr_warn("  cache: %s, object size: %d, order: %d\n",
1333
		cachep->name, cachep->size, cachep->gfporder);
1334

1335
	for_each_kmem_cache_node(cachep, node, n) {
1336
		unsigned long total_slabs, free_slabs, free_objs;
1337

1338
		spin_lock_irqsave(&n->list_lock, flags);
1339 1340 1341
		total_slabs = n->total_slabs;
		free_slabs = n->free_slabs;
		free_objs = n->free_objects;
1342
		spin_unlock_irqrestore(&n->list_lock, flags);
1343

1344 1345 1346 1347
		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
			node, total_slabs - free_slabs, total_slabs,
			(total_slabs * cachep->num) - free_objs,
			total_slabs * cachep->num);
1348
	}
1349
#endif
1350 1351
}

L
Linus Torvalds 已提交
1352
/*
W
Wang Sheng-Hui 已提交
1353 1354
 * Interface to system's page allocator. No need to hold the
 * kmem_cache_node ->list_lock.
L
Linus Torvalds 已提交
1355 1356 1357 1358 1359
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1360 1361
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1362 1363
{
	struct page *page;
1364

1365
	flags |= cachep->allocflags;
1366

1367
	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1368
	if (!page) {
1369
		slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1370
		return NULL;
1371
	}
L
Linus Torvalds 已提交
1372

1373
	if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
1374 1375 1376 1377
		__free_pages(page, cachep->gfporder);
		return NULL;
	}

1378
	__SetPageSlab(page);
1379 1380
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1381
		SetPageSlabPfmemalloc(page);
1382

1383
	return page;
L
Linus Torvalds 已提交
1384 1385 1386 1387 1388
}

/*
 * Interface to system's page release.
 */
1389
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1390
{
1391
	int order = cachep->gfporder;
J
Joonsoo Kim 已提交
1392

1393
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1394
	__ClearPageSlabPfmemalloc(page);
1395
	__ClearPageSlab(page);
1396 1397
	page_mapcount_reset(page);
	page->mapping = NULL;
G
Glauber Costa 已提交
1398

L
Linus Torvalds 已提交
1399
	if (current->reclaim_state)
1400 1401
		current->reclaim_state->reclaimed_slab += 1 << order;
	uncharge_slab_page(page, order, cachep);
1402
	__free_pages(page, order);
L
Linus Torvalds 已提交
1403 1404 1405 1406
}

static void kmem_rcu_free(struct rcu_head *head)
{
1407 1408
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1409

1410 1411 1412 1413
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1414 1415 1416
}

#if DEBUG
1417 1418
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
{
1419
	if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1420 1421 1422 1423 1424
		(cachep->size % PAGE_SIZE) == 0)
		return true;

	return false;
}
L
Linus Torvalds 已提交
1425 1426

#ifdef CONFIG_DEBUG_PAGEALLOC
Q
Qian Cai 已提交
1427
static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
1428 1429 1430 1431 1432 1433 1434 1435 1436
{
	if (!is_debug_pagealloc_cache(cachep))
		return;

	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}

#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
Q
Qian Cai 已提交
1437
				int map) {}
1438

L
Linus Torvalds 已提交
1439 1440
#endif

1441
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1442
{
1443
	int size = cachep->object_size;
1444
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1445 1446

	memset(addr, val, size);
P
Pekka Enberg 已提交
1447
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1448 1449 1450 1451 1452
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1453 1454 1455
	unsigned char error = 0;
	int bad_count = 0;

1456
	pr_err("%03x: ", offset);
D
Dave Jones 已提交
1457 1458 1459 1460 1461 1462
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1463 1464
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1465 1466 1467 1468

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
1469
			pr_err("Single bit error detected. Probably bad RAM.\n");
D
Dave Jones 已提交
1470
#ifdef CONFIG_X86
1471
			pr_err("Run memtest86+ or a similar memory test tool.\n");
D
Dave Jones 已提交
1472
#else
1473
			pr_err("Run a memory test tool.\n");
D
Dave Jones 已提交
1474 1475 1476
#endif
		}
	}
L
Linus Torvalds 已提交
1477 1478 1479 1480 1481
}
#endif

#if DEBUG

1482
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1483 1484 1485 1486 1487
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1488 1489 1490
		pr_err("Redzone: 0x%llx/0x%llx\n",
		       *dbg_redzone1(cachep, objp),
		       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1491 1492
	}

1493 1494
	if (cachep->flags & SLAB_STORE_USER)
		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1495
	realobj = (char *)objp + obj_offset(cachep);
1496
	size = cachep->object_size;
P
Pekka Enberg 已提交
1497
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1498 1499
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1500 1501
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1502 1503 1504 1505
		dump_line(realobj, i, limit);
	}
}

1506
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1507 1508 1509 1510 1511
{
	char *realobj;
	int size, i;
	int lines = 0;

1512 1513 1514
	if (is_debug_pagealloc_cache(cachep))
		return;

1515
	realobj = (char *)objp + obj_offset(cachep);
1516
	size = cachep->object_size;
L
Linus Torvalds 已提交
1517

P
Pekka Enberg 已提交
1518
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1519
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1520
		if (i == size - 1)
L
Linus Torvalds 已提交
1521 1522 1523 1524 1525 1526
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
1527
				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1528 1529
				       print_tainted(), cachep->name,
				       realobj, size);
L
Linus Torvalds 已提交
1530 1531 1532
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1533
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1534
			limit = 16;
P
Pekka Enberg 已提交
1535 1536
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1549
		struct page *page = virt_to_head_page(objp);
1550
		unsigned int objnr;
L
Linus Torvalds 已提交
1551

1552
		objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
1553
		if (objnr) {
1554
			objp = index_to_obj(cachep, page, objnr - 1);
1555
			realobj = (char *)objp + obj_offset(cachep);
1556
			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1557 1558
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1559
		if (objnr + 1 < cachep->num) {
1560
			objp = index_to_obj(cachep, page, objnr + 1);
1561
			realobj = (char *)objp + obj_offset(cachep);
1562
			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1563 1564 1565 1566 1567 1568
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1569
#if DEBUG
1570 1571
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
L
Linus Torvalds 已提交
1572 1573
{
	int i;
1574 1575 1576 1577 1578 1579

	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
		poison_obj(cachep, page->freelist - obj_offset(cachep),
			POISON_FREE);
	}

L
Linus Torvalds 已提交
1580
	for (i = 0; i < cachep->num; i++) {
1581
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
1582 1583 1584

		if (cachep->flags & SLAB_POISON) {
			check_poison_obj(cachep, objp);
Q
Qian Cai 已提交
1585
			slab_kernel_map(cachep, objp, 1);
L
Linus Torvalds 已提交
1586 1587 1588
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1589
				slab_error(cachep, "start of a freed object was overwritten");
L
Linus Torvalds 已提交
1590
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1591
				slab_error(cachep, "end of a freed object was overwritten");
L
Linus Torvalds 已提交
1592 1593
		}
	}
1594
}
L
Linus Torvalds 已提交
1595
#else
1596 1597
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
1598 1599
{
}
L
Linus Torvalds 已提交
1600 1601
#endif

1602 1603 1604
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
1605
 * @page: page pointer being destroyed
1606
 *
W
Wang Sheng-Hui 已提交
1607 1608 1609
 * Destroy all the objs in a slab page, and release the mem back to the system.
 * Before calling the slab page must have been unlinked from the cache. The
 * kmem_cache_node ->list_lock is not held/needed.
1610
 */
1611
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1612
{
1613
	void *freelist;
1614

1615 1616
	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
1617
	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1618 1619
		call_rcu(&page->rcu_head, kmem_rcu_free);
	else
1620
		kmem_freepages(cachep, page);
1621 1622

	/*
1623
	 * From now on, we don't use freelist
1624 1625 1626
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
1627
		kmem_cache_free(cachep->freelist_cache, freelist);
L
Linus Torvalds 已提交
1628 1629
}

1630 1631 1632 1633
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

1634 1635
	list_for_each_entry_safe(page, n, list, slab_list) {
		list_del(&page->slab_list);
1636 1637 1638 1639
		slab_destroy(cachep, page);
	}
}

1640
/**
1641 1642 1643 1644 1645 1646
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
1647 1648 1649 1650
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
1651 1652
 *
 * Return: number of left-over bytes in a slab
1653
 */
A
Andrew Morton 已提交
1654
static size_t calculate_slab_order(struct kmem_cache *cachep,
1655
				size_t size, slab_flags_t flags)
1656 1657
{
	size_t left_over = 0;
1658
	int gfporder;
1659

1660
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1661 1662 1663
		unsigned int num;
		size_t remainder;

1664
		num = cache_estimate(gfporder, size, flags, &remainder);
1665 1666
		if (!num)
			continue;
1667

1668 1669 1670 1671
		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
		if (num > SLAB_OBJ_MAX_NUM)
			break;

1672
		if (flags & CFLGS_OFF_SLAB) {
1673 1674 1675 1676 1677 1678 1679 1680
			struct kmem_cache *freelist_cache;
			size_t freelist_size;

			freelist_size = num * sizeof(freelist_idx_t);
			freelist_cache = kmalloc_slab(freelist_size, 0u);
			if (!freelist_cache)
				continue;

1681
			/*
1682
			 * Needed to avoid possible looping condition
1683
			 * in cache_grow_begin()
1684
			 */
1685 1686
			if (OFF_SLAB(freelist_cache))
				continue;
1687

1688 1689 1690
			/* check if off slab has enough benefit */
			if (freelist_cache->size > cachep->size / 2)
				continue;
1691
		}
1692

1693
		/* Found something acceptable - save it away */
1694
		cachep->num = num;
1695
		cachep->gfporder = gfporder;
1696 1697
		left_over = remainder;

1698 1699 1700 1701 1702 1703 1704 1705
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

1706 1707 1708 1709
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
1710
		if (gfporder >= slab_max_order)
1711 1712
			break;

1713 1714 1715
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
1716
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1717 1718 1719 1720 1721
			break;
	}
	return left_over;
}

1722 1723 1724 1725 1726 1727 1728 1729
static struct array_cache __percpu *alloc_kmem_cache_cpus(
		struct kmem_cache *cachep, int entries, int batchcount)
{
	int cpu;
	size_t size;
	struct array_cache __percpu *cpu_cache;

	size = sizeof(void *) * entries + sizeof(struct array_cache);
1730
	cpu_cache = __alloc_percpu(size, sizeof(void *));
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742

	if (!cpu_cache)
		return NULL;

	for_each_possible_cpu(cpu) {
		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
				entries, batchcount);
	}

	return cpu_cache;
}

1743
static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1744
{
1745
	if (slab_state >= FULL)
1746
		return enable_cpucache(cachep, gfp);
1747

1748 1749 1750 1751
	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
	if (!cachep->cpu_cache)
		return 1;

1752
	if (slab_state == DOWN) {
1753 1754
		/* Creation of first cache (kmem_cache). */
		set_up_node(kmem_cache, CACHE_CACHE);
1755
	} else if (slab_state == PARTIAL) {
1756 1757
		/* For kmem_cache_node */
		set_up_node(cachep, SIZE_NODE);
1758
	} else {
1759
		int node;
1760

1761 1762 1763 1764 1765
		for_each_online_node(node) {
			cachep->node[node] = kmalloc_node(
				sizeof(struct kmem_cache_node), gfp, node);
			BUG_ON(!cachep->node[node]);
			kmem_cache_node_init(cachep->node[node]);
1766 1767
		}
	}
1768

1769
	cachep->node[numa_mem_id()]->next_reap =
1770 1771
			jiffies + REAPTIMEOUT_NODE +
			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1772 1773 1774 1775 1776 1777 1778

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1779
	return 0;
1780 1781
}

1782
slab_flags_t kmem_cache_flags(unsigned int object_size,
1783
	slab_flags_t flags, const char *name,
J
Joonsoo Kim 已提交
1784 1785 1786 1787 1788 1789
	void (*ctor)(void *))
{
	return flags;
}

struct kmem_cache *
1790
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1791
		   slab_flags_t flags, void (*ctor)(void *))
J
Joonsoo Kim 已提交
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
{
	struct kmem_cache *cachep;

	cachep = find_mergeable(size, align, flags, name, ctor);
	if (cachep) {
		cachep->refcount++;

		/*
		 * Adjust the object sizes so that we clear
		 * the complete object on kzalloc.
		 */
		cachep->object_size = max_t(int, cachep->object_size, size);
	}
	return cachep;
}

1808
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1809
			size_t size, slab_flags_t flags)
1810 1811 1812 1813 1814
{
	size_t left;

	cachep->num = 0;

1815 1816 1817 1818 1819 1820 1821 1822
	/*
	 * If slab auto-initialization on free is enabled, store the freelist
	 * off-slab, so that its contents don't end up in one of the allocated
	 * objects.
	 */
	if (unlikely(slab_want_init_on_free(cachep)))
		return false;

1823
	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
		return false;

	left = calculate_slab_order(cachep, size,
			flags | CFLGS_OBJFREELIST_SLAB);
	if (!cachep->num)
		return false;

	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

1839
static bool set_off_slab_cache(struct kmem_cache *cachep,
1840
			size_t size, slab_flags_t flags)
1841 1842 1843 1844 1845 1846
{
	size_t left;

	cachep->num = 0;

	/*
1847 1848
	 * Always use on-slab management when SLAB_NOLEAKTRACE
	 * to avoid recursive calls into kmemleak.
1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
	 */
	if (flags & SLAB_NOLEAKTRACE)
		return false;

	/*
	 * Size is large, assume best to place the slab management obj
	 * off-slab (should allow better packing of objs).
	 */
	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
	if (!cachep->num)
		return false;

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
	if (left >= cachep->num * sizeof(freelist_idx_t))
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

static bool set_on_slab_cache(struct kmem_cache *cachep,
1874
			size_t size, slab_flags_t flags)
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
{
	size_t left;

	cachep->num = 0;

	left = calculate_slab_order(cachep, size, flags);
	if (!cachep->num)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

L
Linus Torvalds 已提交
1889
/**
1890
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
1891
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
1892 1893 1894 1895
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
1896
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
1909 1910
 *
 * Return: a pointer to the created cache or %NULL in case of error
L
Linus Torvalds 已提交
1911
 */
1912
int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
L
Linus Torvalds 已提交
1913
{
1914
	size_t ralign = BYTES_PER_WORD;
1915
	gfp_t gfp;
1916
	int err;
1917
	unsigned int size = cachep->size;
L
Linus Torvalds 已提交
1918 1919 1920 1921 1922 1923 1924 1925 1926

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
1927 1928
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
1929
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1930
	if (!(flags & SLAB_TYPESAFE_BY_RCU))
L
Linus Torvalds 已提交
1931 1932 1933 1934
		flags |= SLAB_POISON;
#endif
#endif

A
Andrew Morton 已提交
1935 1936
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
1937 1938 1939
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
1940
	size = ALIGN(size, BYTES_PER_WORD);
L
Linus Torvalds 已提交
1941

D
David Woodhouse 已提交
1942 1943 1944 1945
	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
1946
		size = ALIGN(size, REDZONE_ALIGN);
D
David Woodhouse 已提交
1947
	}
1948

1949
	/* 3) caller mandated alignment */
1950 1951
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
1952
	}
1953 1954
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
1955
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
1956
	/*
1957
	 * 4) Store it.
L
Linus Torvalds 已提交
1958
	 */
1959
	cachep->align = ralign;
1960 1961 1962 1963
	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
L
Linus Torvalds 已提交
1964

1965 1966 1967 1968 1969
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

L
Linus Torvalds 已提交
1970 1971
#if DEBUG

1972 1973 1974 1975
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
1976 1977
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
1978 1979
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
1980 1981
	}
	if (flags & SLAB_STORE_USER) {
1982
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
1983 1984
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
1985
		 */
D
David Woodhouse 已提交
1986 1987 1988 1989
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
1990
	}
1991 1992
#endif

A
Alexander Potapenko 已提交
1993 1994
	kasan_cache_create(cachep, &size, &flags);

1995 1996 1997 1998 1999 2000 2001 2002 2003
	size = ALIGN(size, cachep->align);
	/*
	 * We should restrict the number of objects in a slab to implement
	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
	 */
	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);

#if DEBUG
2004 2005 2006 2007 2008 2009 2010
	/*
	 * To activate debug pagealloc, off-slab management is necessary
	 * requirement. In early phase of initialization, small sized slab
	 * doesn't get initialized so it would not be possible. So, we need
	 * to check size >= 256. It guarantees that all necessary small
	 * sized slab is initialized in current slab initialization sequence.
	 */
2011
	if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022
		size >= 256 && cachep->object_size > cache_line_size()) {
		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
			size_t tmp_size = ALIGN(size, PAGE_SIZE);

			if (set_off_slab_cache(cachep, tmp_size, flags)) {
				flags |= CFLGS_OFF_SLAB;
				cachep->obj_offset += tmp_size - size;
				size = tmp_size;
				goto done;
			}
		}
L
Linus Torvalds 已提交
2023 2024 2025
	}
#endif

2026 2027 2028 2029 2030
	if (set_objfreelist_slab_cache(cachep, size, flags)) {
		flags |= CFLGS_OBJFREELIST_SLAB;
		goto done;
	}

2031
	if (set_off_slab_cache(cachep, size, flags)) {
L
Linus Torvalds 已提交
2032
		flags |= CFLGS_OFF_SLAB;
2033
		goto done;
2034
	}
L
Linus Torvalds 已提交
2035

2036 2037
	if (set_on_slab_cache(cachep, size, flags))
		goto done;
L
Linus Torvalds 已提交
2038

2039
	return -E2BIG;
L
Linus Torvalds 已提交
2040

2041 2042
done:
	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
L
Linus Torvalds 已提交
2043
	cachep->flags = flags;
2044
	cachep->allocflags = __GFP_COMP;
Y
Yang Shi 已提交
2045
	if (flags & SLAB_CACHE_DMA)
2046
		cachep->allocflags |= GFP_DMA;
2047 2048
	if (flags & SLAB_CACHE_DMA32)
		cachep->allocflags |= GFP_DMA32;
2049 2050
	if (flags & SLAB_RECLAIM_ACCOUNT)
		cachep->allocflags |= __GFP_RECLAIMABLE;
2051
	cachep->size = size;
2052
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2053

2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
#if DEBUG
	/*
	 * If we're going to use the generic kernel_map_pages()
	 * poisoning, then it's going to smash the contents of
	 * the redzone and userword anyhow, so switch them off.
	 */
	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
		(cachep->flags & SLAB_POISON) &&
		is_debug_pagealloc_cache(cachep))
		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif

	if (OFF_SLAB(cachep)) {
2067 2068
		cachep->freelist_cache =
			kmalloc_slab(cachep->freelist_size, 0u);
2069
	}
L
Linus Torvalds 已提交
2070

2071 2072
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2073
		__kmem_cache_release(cachep);
2074
		return err;
2075
	}
L
Linus Torvalds 已提交
2076

2077
	return 0;
L
Linus Torvalds 已提交
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2091 2092 2093 2094 2095
static void check_mutex_acquired(void)
{
	BUG_ON(!mutex_is_locked(&slab_mutex));
}

2096
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2097 2098 2099
{
#ifdef CONFIG_SMP
	check_irq_off();
2100
	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
L
Linus Torvalds 已提交
2101 2102
#endif
}
2103

2104
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2105 2106 2107
{
#ifdef CONFIG_SMP
	check_irq_off();
2108
	assert_spin_locked(&get_node(cachep, node)->list_lock);
2109 2110 2111
#endif
}

L
Linus Torvalds 已提交
2112 2113 2114
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
2115
#define check_mutex_acquired()	do { } while(0)
L
Linus Torvalds 已提交
2116
#define check_spinlock_acquired(x) do { } while(0)
2117
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2118 2119
#endif

2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
				int node, bool free_all, struct list_head *list)
{
	int tofree;

	if (!ac || !ac->avail)
		return;

	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
	if (tofree > ac->avail)
		tofree = (ac->avail + 1) / 2;

	free_block(cachep, ac->entry, tofree, node, list);
	ac->avail -= tofree;
	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
}
2136

L
Linus Torvalds 已提交
2137 2138
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2139
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2140
	struct array_cache *ac;
2141
	int node = numa_mem_id();
2142
	struct kmem_cache_node *n;
2143
	LIST_HEAD(list);
L
Linus Torvalds 已提交
2144 2145

	check_irq_off();
2146
	ac = cpu_cache_get(cachep);
2147 2148
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
2149
	free_block(cachep, ac->entry, ac->avail, node, &list);
2150
	spin_unlock(&n->list_lock);
2151
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
2152 2153 2154
	ac->avail = 0;
}

2155
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2156
{
2157
	struct kmem_cache_node *n;
2158
	int node;
2159
	LIST_HEAD(list);
2160

2161
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2162
	check_irq_on();
2163 2164
	for_each_kmem_cache_node(cachep, node, n)
		if (n->alien)
2165
			drain_alien_cache(cachep, n->alien);
2166

2167 2168 2169 2170 2171 2172 2173
	for_each_kmem_cache_node(cachep, node, n) {
		spin_lock_irq(&n->list_lock);
		drain_array_locked(cachep, n->shared, node, true, &list);
		spin_unlock_irq(&n->list_lock);

		slabs_destroy(cachep, &list);
	}
L
Linus Torvalds 已提交
2174 2175
}

2176 2177 2178 2179 2180 2181 2182
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2183
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2184
{
2185 2186
	struct list_head *p;
	int nr_freed;
2187
	struct page *page;
L
Linus Torvalds 已提交
2188

2189
	nr_freed = 0;
2190
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2191

2192 2193 2194 2195
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2196 2197
			goto out;
		}
L
Linus Torvalds 已提交
2198

2199 2200
		page = list_entry(p, struct page, slab_list);
		list_del(&page->slab_list);
2201
		n->free_slabs--;
2202
		n->total_slabs--;
2203 2204 2205 2206
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2207 2208
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2209
		slab_destroy(cache, page);
2210
		nr_freed++;
L
Linus Torvalds 已提交
2211
	}
2212 2213
out:
	return nr_freed;
L
Linus Torvalds 已提交
2214 2215
}

2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
bool __kmem_cache_empty(struct kmem_cache *s)
{
	int node;
	struct kmem_cache_node *n;

	for_each_kmem_cache_node(s, node, n)
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial))
			return false;
	return true;
}

2228
int __kmem_cache_shrink(struct kmem_cache *cachep)
2229
{
2230 2231
	int ret = 0;
	int node;
2232
	struct kmem_cache_node *n;
2233 2234 2235 2236

	drain_cpu_caches(cachep);

	check_irq_on();
2237
	for_each_kmem_cache_node(cachep, node, n) {
2238
		drain_freelist(cachep, n, INT_MAX);
2239

2240 2241
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2242 2243 2244 2245
	}
	return (ret ? 1 : 0);
}

2246 2247 2248 2249 2250
#ifdef CONFIG_MEMCG
void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
{
	__kmem_cache_shrink(cachep);
}
2251 2252 2253 2254

void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
{
}
2255 2256
#endif

2257
int __kmem_cache_shutdown(struct kmem_cache *cachep)
2258
{
2259
	return __kmem_cache_shrink(cachep);
2260 2261 2262
}

void __kmem_cache_release(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2263
{
2264
	int i;
2265
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2266

T
Thomas Garnier 已提交
2267 2268
	cache_random_seq_destroy(cachep);

2269
	free_percpu(cachep->cpu_cache);
L
Linus Torvalds 已提交
2270

2271
	/* NUMA: free the node structures */
2272 2273 2274 2275 2276
	for_each_kmem_cache_node(cachep, i, n) {
		kfree(n->shared);
		free_alien_cache(n->alien);
		kfree(n);
		cachep->node[i] = NULL;
2277
	}
L
Linus Torvalds 已提交
2278 2279
}

2280 2281
/*
 * Get the memory for a slab management obj.
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292
 *
 * For a slab cache when the slab descriptor is off-slab, the
 * slab descriptor can't come from the same cache which is being created,
 * Because if it is the case, that means we defer the creation of
 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
 * And we eventually call down to __kmem_cache_create(), which
 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
 * This is a "chicken-and-egg" problem.
 *
 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
 * which are all initialized during kmem_cache_init().
2293
 */
2294
static void *alloc_slabmgmt(struct kmem_cache *cachep,
2295 2296
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2297
{
2298
	void *freelist;
2299
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2300

2301
	page->s_mem = addr + colour_off;
2302 2303
	page->active = 0;

2304 2305 2306
	if (OBJFREELIST_SLAB(cachep))
		freelist = NULL;
	else if (OFF_SLAB(cachep)) {
L
Linus Torvalds 已提交
2307
		/* Slab management obj is off-slab. */
2308
		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2309
					      local_flags, nodeid);
2310
		if (!freelist)
L
Linus Torvalds 已提交
2311 2312
			return NULL;
	} else {
2313 2314 2315
		/* We will use last bytes at the slab for freelist */
		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
				cachep->freelist_size;
L
Linus Torvalds 已提交
2316
	}
2317

2318
	return freelist;
L
Linus Torvalds 已提交
2319 2320
}

2321
static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
L
Linus Torvalds 已提交
2322
{
2323
	return ((freelist_idx_t *)page->freelist)[idx];
2324 2325 2326
}

static inline void set_free_obj(struct page *page,
2327
					unsigned int idx, freelist_idx_t val)
2328
{
2329
	((freelist_idx_t *)(page->freelist))[idx] = val;
L
Linus Torvalds 已提交
2330 2331
}

2332
static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
2333
{
2334
#if DEBUG
L
Linus Torvalds 已提交
2335 2336 2337
	int i;

	for (i = 0; i < cachep->num; i++) {
2338
		void *objp = index_to_obj(cachep, page, i);
2339

L
Linus Torvalds 已提交
2340 2341 2342 2343 2344 2345 2346 2347
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2348 2349 2350
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2351
		 */
A
Alexander Potapenko 已提交
2352 2353 2354
		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
			kasan_unpoison_object_data(cachep,
						   objp + obj_offset(cachep));
2355
			cachep->ctor(objp + obj_offset(cachep));
A
Alexander Potapenko 已提交
2356 2357 2358
			kasan_poison_object_data(
				cachep, objp + obj_offset(cachep));
		}
L
Linus Torvalds 已提交
2359 2360 2361

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2362
				slab_error(cachep, "constructor overwrote the end of an object");
L
Linus Torvalds 已提交
2363
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2364
				slab_error(cachep, "constructor overwrote the start of an object");
L
Linus Torvalds 已提交
2365
		}
2366 2367 2368
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON) {
			poison_obj(cachep, objp, POISON_FREE);
Q
Qian Cai 已提交
2369
			slab_kernel_map(cachep, objp, 0);
2370
		}
2371
	}
L
Linus Torvalds 已提交
2372
#endif
2373 2374
}

T
Thomas Garnier 已提交
2375 2376 2377 2378 2379
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Hold information during a freelist initialization */
union freelist_init_state {
	struct {
		unsigned int pos;
2380
		unsigned int *list;
T
Thomas Garnier 已提交
2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
		unsigned int count;
	};
	struct rnd_state rnd_state;
};

/*
 * Initialize the state based on the randomization methode available.
 * return true if the pre-computed list is available, false otherwize.
 */
static bool freelist_state_initialize(union freelist_init_state *state,
				struct kmem_cache *cachep,
				unsigned int count)
{
	bool ret;
	unsigned int rand;

	/* Use best entropy available to define a random shift */
2398
	rand = get_random_int();
T
Thomas Garnier 已提交
2399 2400 2401 2402 2403 2404 2405 2406

	/* Use a random state if the pre-computed list is not available */
	if (!cachep->random_seq) {
		prandom_seed_state(&state->rnd_state, rand);
		ret = false;
	} else {
		state->list = cachep->random_seq;
		state->count = count;
2407
		state->pos = rand % count;
T
Thomas Garnier 已提交
2408 2409 2410 2411 2412 2413 2414 2415
		ret = true;
	}
	return ret;
}

/* Get the next entry on the list and randomize it using a random shift */
static freelist_idx_t next_random_slot(union freelist_init_state *state)
{
2416 2417 2418
	if (state->pos >= state->count)
		state->pos = 0;
	return state->list[state->pos++];
T
Thomas Garnier 已提交
2419 2420
}

2421 2422 2423 2424 2425 2426 2427
/* Swap two freelist entries */
static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
{
	swap(((freelist_idx_t *)page->freelist)[a],
		((freelist_idx_t *)page->freelist)[b]);
}

T
Thomas Garnier 已提交
2428 2429 2430 2431 2432 2433
/*
 * Shuffle the freelist initialization state based on pre-computed lists.
 * return true if the list was successfully shuffled, false otherwise.
 */
static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
{
2434
	unsigned int objfreelist = 0, i, rand, count = cachep->num;
T
Thomas Garnier 已提交
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
	union freelist_init_state state;
	bool precomputed;

	if (count < 2)
		return false;

	precomputed = freelist_state_initialize(&state, cachep, count);

	/* Take a random entry as the objfreelist */
	if (OBJFREELIST_SLAB(cachep)) {
		if (!precomputed)
			objfreelist = count - 1;
		else
			objfreelist = next_random_slot(&state);
		page->freelist = index_to_obj(cachep, page, objfreelist) +
						obj_offset(cachep);
		count--;
	}

	/*
	 * On early boot, generate the list dynamically.
	 * Later use a pre-computed list for speed.
	 */
	if (!precomputed) {
2459 2460 2461 2462 2463 2464 2465 2466 2467
		for (i = 0; i < count; i++)
			set_free_obj(page, i, i);

		/* Fisher-Yates shuffle */
		for (i = count - 1; i > 0; i--) {
			rand = prandom_u32_state(&state.rnd_state);
			rand %= (i + 1);
			swap_free_obj(page, i, rand);
		}
T
Thomas Garnier 已提交
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
	} else {
		for (i = 0; i < count; i++)
			set_free_obj(page, i, next_random_slot(&state));
	}

	if (OBJFREELIST_SLAB(cachep))
		set_free_obj(page, cachep->num - 1, objfreelist);

	return true;
}
#else
static inline bool shuffle_freelist(struct kmem_cache *cachep,
				struct page *page)
{
	return false;
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

2486 2487 2488 2489
static void cache_init_objs(struct kmem_cache *cachep,
			    struct page *page)
{
	int i;
A
Alexander Potapenko 已提交
2490
	void *objp;
T
Thomas Garnier 已提交
2491
	bool shuffled;
2492 2493 2494

	cache_init_objs_debug(cachep, page);

T
Thomas Garnier 已提交
2495 2496 2497 2498
	/* Try to randomize the freelist if enabled */
	shuffled = shuffle_freelist(cachep, page);

	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2499 2500 2501 2502
		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
						obj_offset(cachep);
	}

2503
	for (i = 0; i < cachep->num; i++) {
2504
		objp = index_to_obj(cachep, page, i);
2505
		objp = kasan_init_slab_obj(cachep, objp);
2506

2507
		/* constructor could break poison info */
A
Alexander Potapenko 已提交
2508 2509 2510 2511 2512
		if (DEBUG == 0 && cachep->ctor) {
			kasan_unpoison_object_data(cachep, objp);
			cachep->ctor(objp);
			kasan_poison_object_data(cachep, objp);
		}
2513

T
Thomas Garnier 已提交
2514 2515
		if (!shuffled)
			set_free_obj(page, i, i);
L
Linus Torvalds 已提交
2516 2517 2518
	}
}

2519
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2520
{
2521
	void *objp;
2522

2523
	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2524
	page->active++;
2525 2526 2527 2528

	return objp;
}

2529 2530
static void slab_put_obj(struct kmem_cache *cachep,
			struct page *page, void *objp)
2531
{
2532
	unsigned int objnr = obj_to_index(cachep, page, objp);
2533
#if DEBUG
J
Joonsoo Kim 已提交
2534
	unsigned int i;
2535 2536

	/* Verify double free bug */
2537
	for (i = page->active; i < cachep->num; i++) {
2538
		if (get_free_obj(page, i) == objnr) {
2539
			pr_err("slab: double free detected in cache '%s', objp %px\n",
J
Joe Perches 已提交
2540
			       cachep->name, objp);
2541 2542
			BUG();
		}
2543 2544
	}
#endif
2545
	page->active--;
2546 2547 2548
	if (!page->freelist)
		page->freelist = objp + obj_offset(cachep);

2549
	set_free_obj(page, page->active, objnr);
2550 2551
}

2552 2553 2554
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2555
 * virtual address for kfree, ksize, and slab debugging.
2556
 */
2557
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2558
			   void *freelist)
L
Linus Torvalds 已提交
2559
{
2560
	page->slab_cache = cache;
2561
	page->freelist = freelist;
L
Linus Torvalds 已提交
2562 2563 2564 2565 2566 2567
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2568 2569
static struct page *cache_grow_begin(struct kmem_cache *cachep,
				gfp_t flags, int nodeid)
L
Linus Torvalds 已提交
2570
{
2571
	void *freelist;
P
Pekka Enberg 已提交
2572 2573
	size_t offset;
	gfp_t local_flags;
2574
	int page_node;
2575
	struct kmem_cache_node *n;
2576
	struct page *page;
L
Linus Torvalds 已提交
2577

A
Andrew Morton 已提交
2578 2579 2580
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2581
	 */
2582
	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2583
		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2584 2585 2586 2587
		flags &= ~GFP_SLAB_BUG_MASK;
		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
				invalid_mask, &invalid_mask, flags, &flags);
		dump_stack();
2588
	}
2589
	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
C
Christoph Lameter 已提交
2590
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2591 2592

	check_irq_off();
2593
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2594 2595
		local_irq_enable();

A
Andrew Morton 已提交
2596 2597 2598
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2599
	 */
2600
	page = kmem_getpages(cachep, local_flags, nodeid);
2601
	if (!page)
L
Linus Torvalds 已提交
2602 2603
		goto failed;

2604 2605
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617

	/* Get colour for the slab, and cal the next value. */
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;

	offset = n->colour_next;
	if (offset >= cachep->colour)
		offset = 0;

	offset *= cachep->colour_off;

2618 2619 2620 2621 2622 2623 2624
	/*
	 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
	 * page_address() in the latter returns a non-tagged pointer,
	 * as it should be for slab pages.
	 */
	kasan_poison_slab(page);

L
Linus Torvalds 已提交
2625
	/* Get slab management. */
2626
	freelist = alloc_slabmgmt(cachep, page, offset,
2627
			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2628
	if (OFF_SLAB(cachep) && !freelist)
L
Linus Torvalds 已提交
2629 2630
		goto opps1;

2631
	slab_map_pages(cachep, page, freelist);
L
Linus Torvalds 已提交
2632

2633
	cache_init_objs(cachep, page);
L
Linus Torvalds 已提交
2634

2635
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2636 2637
		local_irq_disable();

2638 2639
	return page;

A
Andrew Morton 已提交
2640
opps1:
2641
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2642
failed:
2643
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2644
		local_irq_disable();
2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
	return NULL;
}

static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
{
	struct kmem_cache_node *n;
	void *list = NULL;

	check_irq_off();

	if (!page)
		return;

2658
	INIT_LIST_HEAD(&page->slab_list);
2659 2660 2661
	n = get_node(cachep, page_to_nid(page));

	spin_lock(&n->list_lock);
2662
	n->total_slabs++;
2663
	if (!page->active) {
2664
		list_add_tail(&page->slab_list, &n->slabs_free);
2665
		n->free_slabs++;
2666
	} else
2667
		fixup_slab_list(cachep, n, page, &list);
2668

2669 2670 2671 2672 2673
	STATS_INC_GROWN(cachep);
	n->free_objects += cachep->num - page->active;
	spin_unlock(&n->list_lock);

	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
2686
		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
P
Pekka Enberg 已提交
2687 2688
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2689 2690 2691
	}
}

2692 2693
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2694
	unsigned long long redzone1, redzone2;
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2710
	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2711
	       obj, redzone1, redzone2);
2712 2713
}

2714
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2715
				   unsigned long caller)
L
Linus Torvalds 已提交
2716 2717
{
	unsigned int objnr;
2718
	struct page *page;
L
Linus Torvalds 已提交
2719

2720 2721
	BUG_ON(virt_to_cache(objp) != cachep);

2722
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2723
	kfree_debugcheck(objp);
2724
	page = virt_to_head_page(objp);
L
Linus Torvalds 已提交
2725 2726

	if (cachep->flags & SLAB_RED_ZONE) {
2727
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2728 2729 2730
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
Q
Qian Cai 已提交
2731
	if (cachep->flags & SLAB_STORE_USER)
2732
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2733

2734
	objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
2735 2736

	BUG_ON(objnr >= cachep->num);
2737
	BUG_ON(objp != index_to_obj(cachep, page, objnr));
L
Linus Torvalds 已提交
2738 2739 2740

	if (cachep->flags & SLAB_POISON) {
		poison_obj(cachep, objp, POISON_FREE);
Q
Qian Cai 已提交
2741
		slab_kernel_map(cachep, objp, 0);
L
Linus Torvalds 已提交
2742 2743 2744 2745 2746 2747 2748 2749 2750
	}
	return objp;
}

#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#endif

2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list)
{
#if DEBUG
	void *next = *list;
	void *objp;

	while (next) {
		objp = next - obj_offset(cachep);
		next = *(void **)next;
		poison_obj(cachep, objp, POISON_FREE);
	}
#endif
}

2766
static inline void fixup_slab_list(struct kmem_cache *cachep,
2767 2768
				struct kmem_cache_node *n, struct page *page,
				void **list)
2769 2770
{
	/* move slabp to correct slabp list: */
2771
	list_del(&page->slab_list);
2772
	if (page->active == cachep->num) {
2773
		list_add(&page->slab_list, &n->slabs_full);
2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
		if (OBJFREELIST_SLAB(cachep)) {
#if DEBUG
			/* Poisoning will be done without holding the lock */
			if (cachep->flags & SLAB_POISON) {
				void **objp = page->freelist;

				*objp = *list;
				*list = objp;
			}
#endif
			page->freelist = NULL;
		}
	} else
2787
		list_add(&page->slab_list, &n->slabs_partial);
2788 2789
}

2790 2791
/* Try to find non-pfmemalloc slab if needed */
static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2792
					struct page *page, bool pfmemalloc)
2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809
{
	if (!page)
		return NULL;

	if (pfmemalloc)
		return page;

	if (!PageSlabPfmemalloc(page))
		return page;

	/* No need to keep pfmemalloc slab if we have enough free objects */
	if (n->free_objects > n->free_limit) {
		ClearPageSlabPfmemalloc(page);
		return page;
	}

	/* Move pfmemalloc slab to the end of list to speed up next search */
2810
	list_del(&page->slab_list);
2811
	if (!page->active) {
2812
		list_add_tail(&page->slab_list, &n->slabs_free);
2813
		n->free_slabs++;
2814
	} else
2815
		list_add_tail(&page->slab_list, &n->slabs_partial);
2816

2817
	list_for_each_entry(page, &n->slabs_partial, slab_list) {
2818 2819 2820 2821
		if (!PageSlabPfmemalloc(page))
			return page;
	}

2822
	n->free_touched = 1;
2823
	list_for_each_entry(page, &n->slabs_free, slab_list) {
2824
		if (!PageSlabPfmemalloc(page)) {
2825
			n->free_slabs--;
2826
			return page;
2827
		}
2828 2829 2830 2831 2832 2833
	}

	return NULL;
}

static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2834 2835 2836
{
	struct page *page;

2837
	assert_spin_locked(&n->list_lock);
2838 2839
	page = list_first_entry_or_null(&n->slabs_partial, struct page,
					slab_list);
2840 2841
	if (!page) {
		n->free_touched = 1;
2842
		page = list_first_entry_or_null(&n->slabs_free, struct page,
2843
						slab_list);
2844
		if (page)
2845
			n->free_slabs--;
2846 2847
	}

2848
	if (sk_memalloc_socks())
2849
		page = get_valid_first_slab(n, page, pfmemalloc);
2850

2851 2852 2853
	return page;
}

2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
				struct kmem_cache_node *n, gfp_t flags)
{
	struct page *page;
	void *obj;
	void *list = NULL;

	if (!gfp_pfmemalloc_allowed(flags))
		return NULL;

	spin_lock(&n->list_lock);
	page = get_first_slab(n, true);
	if (!page) {
		spin_unlock(&n->list_lock);
		return NULL;
	}

	obj = slab_get_obj(cachep, page);
	n->free_objects--;

	fixup_slab_list(cachep, n, page, &list);

	spin_unlock(&n->list_lock);
	fixup_objfreelist_debug(cachep, &list);

	return obj;
}

2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905
/*
 * Slab list should be fixed up by fixup_slab_list() for existing slab
 * or cache_grow_end() for new slab
 */
static __always_inline int alloc_block(struct kmem_cache *cachep,
		struct array_cache *ac, struct page *page, int batchcount)
{
	/*
	 * There must be at least one object available for
	 * allocation.
	 */
	BUG_ON(page->active >= cachep->num);

	while (page->active < cachep->num && batchcount--) {
		STATS_INC_ALLOCED(cachep);
		STATS_INC_ACTIVE(cachep);
		STATS_SET_HIGH(cachep);

		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
	}

	return batchcount;
}

2906
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2907 2908
{
	int batchcount;
2909
	struct kmem_cache_node *n;
2910
	struct array_cache *ac, *shared;
P
Pekka Enberg 已提交
2911
	int node;
2912
	void *list = NULL;
2913
	struct page *page;
P
Pekka Enberg 已提交
2914

L
Linus Torvalds 已提交
2915
	check_irq_off();
2916
	node = numa_mem_id();
2917

2918
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2919 2920
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2921 2922 2923 2924
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2925 2926 2927
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2928
	n = get_node(cachep, node);
2929

2930
	BUG_ON(ac->avail > 0 || !n);
2931 2932 2933 2934
	shared = READ_ONCE(n->shared);
	if (!n->free_objects && (!shared || !shared->avail))
		goto direct_grow;

2935
	spin_lock(&n->list_lock);
2936
	shared = READ_ONCE(n->shared);
L
Linus Torvalds 已提交
2937

2938
	/* See if we can refill from the shared array */
2939 2940
	if (shared && transfer_objects(ac, shared, batchcount)) {
		shared->touched = 1;
2941
		goto alloc_done;
2942
	}
2943

L
Linus Torvalds 已提交
2944 2945
	while (batchcount > 0) {
		/* Get slab alloc is to come from. */
2946
		page = get_first_slab(n, false);
2947 2948
		if (!page)
			goto must_grow;
L
Linus Torvalds 已提交
2949 2950

		check_spinlock_acquired(cachep);
2951

2952
		batchcount = alloc_block(cachep, ac, page, batchcount);
2953
		fixup_slab_list(cachep, n, page, &list);
L
Linus Torvalds 已提交
2954 2955
	}

A
Andrew Morton 已提交
2956
must_grow:
2957
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
2958
alloc_done:
2959
	spin_unlock(&n->list_lock);
2960
	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
2961

2962
direct_grow:
L
Linus Torvalds 已提交
2963
	if (unlikely(!ac->avail)) {
2964 2965 2966 2967 2968 2969 2970 2971
		/* Check if we can use obj in pfmemalloc slab */
		if (sk_memalloc_socks()) {
			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);

			if (obj)
				return obj;
		}

2972
		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2973

2974 2975 2976 2977
		/*
		 * cache_grow_begin() can reenable interrupts,
		 * then ac could change.
		 */
2978
		ac = cpu_cache_get(cachep);
2979 2980 2981
		if (!ac->avail && page)
			alloc_block(cachep, ac, page, batchcount);
		cache_grow_end(cachep, page);
2982

2983
		if (!ac->avail)
L
Linus Torvalds 已提交
2984 2985 2986
			return NULL;
	}
	ac->touched = 1;
2987

2988
	return ac->entry[--ac->avail];
L
Linus Torvalds 已提交
2989 2990
}

A
Andrew Morton 已提交
2991 2992
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
2993
{
2994
	might_sleep_if(gfpflags_allow_blocking(flags));
L
Linus Torvalds 已提交
2995 2996 2997
}

#if DEBUG
A
Andrew Morton 已提交
2998
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2999
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
3000
{
3001
	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
P
Pekka Enberg 已提交
3002
	if (!objp)
L
Linus Torvalds 已提交
3003
		return objp;
P
Pekka Enberg 已提交
3004
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
3005
		check_poison_obj(cachep, objp);
Q
Qian Cai 已提交
3006
		slab_kernel_map(cachep, objp, 1);
L
Linus Torvalds 已提交
3007 3008 3009
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
3010
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
3011 3012

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
3013 3014
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
J
Joe Perches 已提交
3015
			slab_error(cachep, "double free, or memory outside object was overwritten");
3016
			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3017 3018
			       objp, *dbg_redzone1(cachep, objp),
			       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
3019 3020 3021 3022
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
3023

3024
	objp += obj_offset(cachep);
3025
	if (cachep->ctor && cachep->flags & SLAB_POISON)
3026
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
3027 3028
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3029
		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
3030
		       objp, (int)ARCH_SLAB_MINALIGN);
3031
	}
L
Linus Torvalds 已提交
3032 3033 3034 3035 3036 3037
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

3038
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3039
{
P
Pekka Enberg 已提交
3040
	void *objp;
L
Linus Torvalds 已提交
3041 3042
	struct array_cache *ac;

3043
	check_irq_off();
3044

3045
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3046 3047
	if (likely(ac->avail)) {
		ac->touched = 1;
3048
		objp = ac->entry[--ac->avail];
3049

3050 3051
		STATS_INC_ALLOCHIT(cachep);
		goto out;
L
Linus Torvalds 已提交
3052
	}
3053 3054

	STATS_INC_ALLOCMISS(cachep);
3055
	objp = cache_alloc_refill(cachep, flags);
3056 3057 3058 3059 3060 3061 3062
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
3063 3064 3065 3066 3067
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
3068 3069
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
3070 3071 3072
	return objp;
}

3073
#ifdef CONFIG_NUMA
3074
/*
3075
 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3076 3077 3078 3079 3080 3081 3082 3083
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

3084
	if (in_interrupt() || (flags & __GFP_THISNODE))
3085
		return NULL;
3086
	nid_alloc = nid_here = numa_mem_id();
3087
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3088
		nid_alloc = cpuset_slab_spread_node();
3089
	else if (current->mempolicy)
3090
		nid_alloc = mempolicy_slab_node();
3091
	if (nid_alloc != nid_here)
3092
		return ____cache_alloc_node(cachep, flags, nid_alloc);
3093 3094 3095
	return NULL;
}

3096 3097
/*
 * Fallback function if there was no memory available and no objects on a
3098
 * certain node and fall back is permitted. First we scan all the
3099
 * available node for available objects. If that fails then we
3100 3101 3102
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3103
 */
3104
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3105
{
3106
	struct zonelist *zonelist;
3107
	struct zoneref *z;
3108 3109
	struct zone *zone;
	enum zone_type high_zoneidx = gfp_zone(flags);
3110
	void *obj = NULL;
3111
	struct page *page;
3112
	int nid;
3113
	unsigned int cpuset_mems_cookie;
3114 3115 3116 3117

	if (flags & __GFP_THISNODE)
		return NULL;

3118
retry_cpuset:
3119
	cpuset_mems_cookie = read_mems_allowed_begin();
3120
	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3121

3122 3123 3124 3125 3126
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3127 3128
	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
		nid = zone_to_nid(zone);
3129

3130
		if (cpuset_zone_allowed(zone, flags) &&
3131 3132
			get_node(cache, nid) &&
			get_node(cache, nid)->free_objects) {
3133
				obj = ____cache_alloc_node(cache,
D
David Rientjes 已提交
3134
					gfp_exact_node(flags), nid);
3135 3136 3137
				if (obj)
					break;
		}
3138 3139
	}

3140
	if (!obj) {
3141 3142 3143 3144 3145 3146
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3147 3148 3149 3150
		page = cache_grow_begin(cache, flags, numa_mem_id());
		cache_grow_end(cache, page);
		if (page) {
			nid = page_to_nid(page);
3151 3152
			obj = ____cache_alloc_node(cache,
				gfp_exact_node(flags), nid);
3153

3154
			/*
3155 3156
			 * Another processor may allocate the objects in
			 * the slab since we are not holding any locks.
3157
			 */
3158 3159
			if (!obj)
				goto retry;
3160
		}
3161
	}
3162

3163
	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3164
		goto retry_cpuset;
3165 3166 3167
	return obj;
}

3168 3169
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3170
 */
3171
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3172
				int nodeid)
3173
{
3174
	struct page *page;
3175
	struct kmem_cache_node *n;
3176
	void *obj = NULL;
3177
	void *list = NULL;
P
Pekka Enberg 已提交
3178

3179
	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3180
	n = get_node(cachep, nodeid);
3181
	BUG_ON(!n);
P
Pekka Enberg 已提交
3182

3183
	check_irq_off();
3184
	spin_lock(&n->list_lock);
3185
	page = get_first_slab(n, false);
3186 3187
	if (!page)
		goto must_grow;
P
Pekka Enberg 已提交
3188 3189 3190 3191 3192 3193 3194

	check_spinlock_acquired_node(cachep, nodeid);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

3195
	BUG_ON(page->active == cachep->num);
P
Pekka Enberg 已提交
3196

3197
	obj = slab_get_obj(cachep, page);
3198
	n->free_objects--;
P
Pekka Enberg 已提交
3199

3200
	fixup_slab_list(cachep, n, page, &list);
3201

3202
	spin_unlock(&n->list_lock);
3203
	fixup_objfreelist_debug(cachep, &list);
3204
	return obj;
3205

A
Andrew Morton 已提交
3206
must_grow:
3207
	spin_unlock(&n->list_lock);
3208
	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3209 3210 3211 3212
	if (page) {
		/* This slab isn't counted yet so don't update free_objects */
		obj = slab_get_obj(cachep, page);
	}
3213
	cache_grow_end(cachep, page);
L
Linus Torvalds 已提交
3214

3215
	return obj ? obj : fallback_alloc(cachep, flags);
3216
}
3217 3218

static __always_inline void *
3219
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3220
		   unsigned long caller)
3221 3222 3223
{
	unsigned long save_flags;
	void *ptr;
3224
	int slab_node = numa_mem_id();
3225

3226
	flags &= gfp_allowed_mask;
3227 3228
	cachep = slab_pre_alloc_hook(cachep, flags);
	if (unlikely(!cachep))
3229 3230
		return NULL;

3231 3232 3233
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3234
	if (nodeid == NUMA_NO_NODE)
3235
		nodeid = slab_node;
3236

3237
	if (unlikely(!get_node(cachep, nodeid))) {
3238 3239 3240 3241 3242
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3243
	if (nodeid == slab_node) {
3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);

3260
	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3261
		memset(ptr, 0, cachep->object_size);
3262

3263
	slab_post_alloc_hook(cachep, flags, 1, &ptr);
3264 3265 3266 3267 3268 3269 3270 3271
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

3272
	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3283 3284
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3300
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3301 3302 3303 3304
{
	unsigned long save_flags;
	void *objp;

3305
	flags &= gfp_allowed_mask;
3306 3307
	cachep = slab_pre_alloc_hook(cachep, flags);
	if (unlikely(!cachep))
3308 3309
		return NULL;

3310 3311 3312 3313 3314 3315 3316
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
	prefetchw(objp);

3317
	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3318
		memset(objp, 0, cachep->object_size);
3319

3320
	slab_post_alloc_hook(cachep, flags, 1, &objp);
3321 3322
	return objp;
}
3323 3324

/*
3325
 * Caller needs to acquire correct kmem_cache_node's list_lock
3326
 * @list: List of detached free slabs should be freed by caller
3327
 */
3328 3329
static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
L
Linus Torvalds 已提交
3330 3331
{
	int i;
3332
	struct kmem_cache_node *n = get_node(cachep, node);
3333 3334 3335
	struct page *page;

	n->free_objects += nr_objects;
L
Linus Torvalds 已提交
3336 3337

	for (i = 0; i < nr_objects; i++) {
3338
		void *objp;
3339
		struct page *page;
L
Linus Torvalds 已提交
3340

3341 3342
		objp = objpp[i];

3343
		page = virt_to_head_page(objp);
3344
		list_del(&page->slab_list);
3345
		check_spinlock_acquired_node(cachep, node);
3346
		slab_put_obj(cachep, page, objp);
L
Linus Torvalds 已提交
3347 3348 3349
		STATS_DEC_ACTIVE(cachep);

		/* fixup slab chains */
3350
		if (page->active == 0) {
3351
			list_add(&page->slab_list, &n->slabs_free);
3352 3353
			n->free_slabs++;
		} else {
L
Linus Torvalds 已提交
3354 3355 3356 3357
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3358
			list_add_tail(&page->slab_list, &n->slabs_partial);
L
Linus Torvalds 已提交
3359 3360
		}
	}
3361 3362 3363 3364

	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
		n->free_objects -= cachep->num;

3365 3366
		page = list_last_entry(&n->slabs_free, struct page, slab_list);
		list_move(&page->slab_list, list);
3367
		n->free_slabs--;
3368
		n->total_slabs--;
3369
	}
L
Linus Torvalds 已提交
3370 3371
}

3372
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3373 3374
{
	int batchcount;
3375
	struct kmem_cache_node *n;
3376
	int node = numa_mem_id();
3377
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3378 3379

	batchcount = ac->batchcount;
3380

L
Linus Torvalds 已提交
3381
	check_irq_off();
3382
	n = get_node(cachep, node);
3383 3384 3385
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3386
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3387 3388 3389
		if (max) {
			if (batchcount > max)
				batchcount = max;
3390
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3391
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3392 3393 3394 3395 3396
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3397
	free_block(cachep, ac->entry, batchcount, node, &list);
A
Andrew Morton 已提交
3398
free_done:
L
Linus Torvalds 已提交
3399 3400 3401
#if STATS
	{
		int i = 0;
3402
		struct page *page;
L
Linus Torvalds 已提交
3403

3404
		list_for_each_entry(page, &n->slabs_free, slab_list) {
3405
			BUG_ON(page->active);
L
Linus Torvalds 已提交
3406 3407 3408 3409 3410 3411

			i++;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3412
	spin_unlock(&n->list_lock);
3413
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3414
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3415
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
L
Linus Torvalds 已提交
3416 3417 3418
}

/*
A
Andrew Morton 已提交
3419 3420
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3421
 */
3422 3423
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
					 unsigned long caller)
L
Linus Torvalds 已提交
3424
{
3425
	/* Put the object into the quarantine, don't touch it for now. */
3426
	if (kasan_slab_free(cachep, objp, _RET_IP_))
3427 3428 3429 3430
		return;

	___cache_free(cachep, objp, caller);
}
L
Linus Torvalds 已提交
3431

3432 3433 3434 3435
void ___cache_free(struct kmem_cache *cachep, void *objp,
		unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);
A
Alexander Potapenko 已提交
3436

L
Linus Torvalds 已提交
3437
	check_irq_off();
3438 3439
	if (unlikely(slab_want_init_on_free(cachep)))
		memset(objp, 0, cachep->object_size);
3440
	kmemleak_free_recursive(objp, cachep->flags);
3441
	objp = cache_free_debugcheck(cachep, objp, caller);
L
Linus Torvalds 已提交
3442

3443 3444 3445 3446 3447 3448 3449
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3450
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3451 3452
		return;

3453
	if (ac->avail < ac->limit) {
L
Linus Torvalds 已提交
3454 3455 3456 3457 3458
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3459

3460 3461 3462 3463 3464 3465 3466 3467 3468 3469
	if (sk_memalloc_socks()) {
		struct page *page = virt_to_head_page(objp);

		if (unlikely(PageSlabPfmemalloc(page))) {
			cache_free_pfmemalloc(cachep, page, objp);
			return;
		}
	}

	ac->entry[ac->avail++] = objp;
L
Linus Torvalds 已提交
3470 3471 3472 3473 3474 3475 3476 3477 3478
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
3479 3480
 *
 * Return: pointer to the new object or %NULL in case of error
L
Linus Torvalds 已提交
3481
 */
3482
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3483
{
3484
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3485

3486
	trace_kmem_cache_alloc(_RET_IP_, ret,
3487
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3488 3489

	return ret;
L
Linus Torvalds 已提交
3490 3491 3492
}
EXPORT_SYMBOL(kmem_cache_alloc);

3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
static __always_inline void
cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
				  size_t size, void **p, unsigned long caller)
{
	size_t i;

	for (i = 0; i < size; i++)
		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
}

3503
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3504
			  void **p)
3505
{
3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
	size_t i;

	s = slab_pre_alloc_hook(s, flags);
	if (!s)
		return 0;

	cache_alloc_debugcheck_before(s, flags);

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = __do_cache_alloc(s, flags);

		if (unlikely(!objp))
			goto error;
		p[i] = objp;
	}
	local_irq_enable();

3524 3525
	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);

3526
	/* Clear memory outside IRQ disabled section */
3527
	if (unlikely(slab_want_init_on_alloc(flags, s)))
3528 3529 3530 3531 3532 3533 3534 3535
		for (i = 0; i < size; i++)
			memset(p[i], 0, s->object_size);

	slab_post_alloc_hook(s, flags, size, p);
	/* FIXME: Trace call missing. Christoph would like a bulk variant */
	return size;
error:
	local_irq_enable();
3536
	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3537 3538 3539
	slab_post_alloc_hook(s, flags, i, p);
	__kmem_cache_free_bulk(s, i, p);
	return 0;
3540 3541 3542
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

3543
#ifdef CONFIG_TRACING
3544
void *
3545
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3546
{
3547 3548
	void *ret;

3549
	ret = slab_alloc(cachep, flags, _RET_IP_);
3550

3551
	ret = kasan_kmalloc(cachep, ret, size, flags);
3552
	trace_kmalloc(_RET_IP_, ret,
3553
		      size, cachep->size, flags);
3554
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3555
}
3556
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3557 3558
#endif

L
Linus Torvalds 已提交
3559
#ifdef CONFIG_NUMA
3560 3561 3562 3563 3564 3565 3566 3567 3568 3569
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
3570 3571
 *
 * Return: pointer to the new object or %NULL in case of error
3572
 */
3573 3574
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3575
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3576

3577
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3578
				    cachep->object_size, cachep->size,
3579
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3580 3581

	return ret;
3582
}
L
Linus Torvalds 已提交
3583 3584
EXPORT_SYMBOL(kmem_cache_alloc_node);

3585
#ifdef CONFIG_TRACING
3586
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3587
				  gfp_t flags,
3588 3589
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3590
{
3591 3592
	void *ret;

3593
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3594

3595
	ret = kasan_kmalloc(cachep, ret, size, flags);
3596
	trace_kmalloc_node(_RET_IP_, ret,
3597
			   size, cachep->size,
3598 3599
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3600
}
3601
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3602 3603
#endif

3604
static __always_inline void *
3605
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3606
{
3607
	struct kmem_cache *cachep;
A
Alexander Potapenko 已提交
3608
	void *ret;
3609

3610 3611
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
		return NULL;
3612
	cachep = kmalloc_slab(size, flags);
3613 3614
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
A
Alexander Potapenko 已提交
3615
	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3616
	ret = kasan_kmalloc(cachep, ret, size, flags);
A
Alexander Potapenko 已提交
3617 3618

	return ret;
3619
}
3620 3621 3622

void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3623
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3624
}
3625
EXPORT_SYMBOL(__kmalloc_node);
3626 3627

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3628
		int node, unsigned long caller)
3629
{
3630
	return __do_kmalloc_node(size, flags, node, caller);
3631 3632 3633
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3634 3635

/**
3636
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3637
 * @size: how many bytes of memory are required.
3638
 * @flags: the type of memory to allocate (see kmalloc).
3639
 * @caller: function caller for debug tracking of the caller
3640 3641
 *
 * Return: pointer to the allocated memory or %NULL in case of error
L
Linus Torvalds 已提交
3642
 */
3643
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3644
					  unsigned long caller)
L
Linus Torvalds 已提交
3645
{
3646
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3647
	void *ret;
L
Linus Torvalds 已提交
3648

3649 3650
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
		return NULL;
3651
	cachep = kmalloc_slab(size, flags);
3652 3653
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3654
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3655

3656
	ret = kasan_kmalloc(cachep, ret, size, flags);
3657
	trace_kmalloc(caller, ret,
3658
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3659 3660

	return ret;
3661 3662 3663 3664
}

void *__kmalloc(size_t size, gfp_t flags)
{
3665
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3666 3667 3668
}
EXPORT_SYMBOL(__kmalloc);

3669
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3670
{
3671
	return __do_kmalloc(size, flags, caller);
3672 3673
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3674

L
Linus Torvalds 已提交
3675 3676 3677 3678 3679 3680 3681 3682
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3683
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3684 3685
{
	unsigned long flags;
3686 3687 3688
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3689 3690

	local_irq_save(flags);
3691
	debug_check_no_locks_freed(objp, cachep->object_size);
3692
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3693
		debug_check_no_obj_freed(objp, cachep->object_size);
3694
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3695
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3696

3697
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3698 3699 3700
}
EXPORT_SYMBOL(kmem_cache_free);

3701 3702 3703 3704 3705 3706 3707 3708 3709
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
{
	struct kmem_cache *s;
	size_t i;

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = p[i];

3710 3711 3712 3713
		if (!orig_s) /* called via kfree_bulk */
			s = virt_to_cache(objp);
		else
			s = cache_from_obj(orig_s, objp);
3714 3715
		if (!s)
			continue;
3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728

		debug_check_no_locks_freed(objp, s->object_size);
		if (!(s->flags & SLAB_DEBUG_OBJECTS))
			debug_check_no_obj_freed(objp, s->object_size);

		__cache_free(s, objp, _RET_IP_);
	}
	local_irq_enable();

	/* FIXME: add tracing */
}
EXPORT_SYMBOL(kmem_cache_free_bulk);

L
Linus Torvalds 已提交
3729 3730 3731 3732
/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3733 3734
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3735 3736 3737 3738 3739
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3740
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3741 3742
	unsigned long flags;

3743 3744
	trace_kfree(_RET_IP_, objp);

3745
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3746 3747 3748
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3749
	c = virt_to_cache(objp);
3750 3751 3752 3753
	if (!c) {
		local_irq_restore(flags);
		return;
	}
3754 3755 3756
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3757
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3758 3759 3760 3761
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3762
/*
3763
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3764
 */
3765
static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3766
{
3767
	int ret;
3768
	int node;
3769
	struct kmem_cache_node *n;
3770

3771
	for_each_online_node(node) {
3772 3773
		ret = setup_kmem_cache_node(cachep, node, gfp, true);
		if (ret)
3774 3775 3776
			goto fail;

	}
3777

3778
	return 0;
3779

A
Andrew Morton 已提交
3780
fail:
3781
	if (!cachep->list.next) {
3782 3783 3784
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3785 3786
			n = get_node(cachep, node);
			if (n) {
3787 3788 3789
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3790
				cachep->node[node] = NULL;
3791 3792 3793 3794
			}
			node--;
		}
	}
3795
	return -ENOMEM;
3796 3797
}

3798
/* Always called with the slab_mutex held */
G
Glauber Costa 已提交
3799
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3800
				int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3801
{
3802 3803
	struct array_cache __percpu *cpu_cache, *prev;
	int cpu;
L
Linus Torvalds 已提交
3804

3805 3806
	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
	if (!cpu_cache)
3807 3808
		return -ENOMEM;

3809 3810
	prev = cachep->cpu_cache;
	cachep->cpu_cache = cpu_cache;
3811 3812 3813 3814 3815 3816
	/*
	 * Without a previous cpu_cache there's no need to synchronize remote
	 * cpus, so skip the IPIs.
	 */
	if (prev)
		kick_all_cpus_sync();
3817

L
Linus Torvalds 已提交
3818 3819 3820
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3821
	cachep->shared = shared;
L
Linus Torvalds 已提交
3822

3823
	if (!prev)
3824
		goto setup_node;
3825 3826

	for_each_online_cpu(cpu) {
3827
		LIST_HEAD(list);
3828 3829
		int node;
		struct kmem_cache_node *n;
3830
		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3831

3832
		node = cpu_to_mem(cpu);
3833 3834
		n = get_node(cachep, node);
		spin_lock_irq(&n->list_lock);
3835
		free_block(cachep, ac->entry, ac->avail, node, &list);
3836
		spin_unlock_irq(&n->list_lock);
3837
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3838
	}
3839 3840
	free_percpu(prev);

3841 3842
setup_node:
	return setup_kmem_cache_nodes(cachep, gfp);
L
Linus Torvalds 已提交
3843 3844
}

G
Glauber Costa 已提交
3845 3846 3847 3848
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
				int batchcount, int shared, gfp_t gfp)
{
	int ret;
3849
	struct kmem_cache *c;
G
Glauber Costa 已提交
3850 3851 3852 3853 3854 3855 3856 3857 3858

	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);

	if (slab_state < FULL)
		return ret;

	if ((ret < 0) || !is_root_cache(cachep))
		return ret;

3859 3860 3861 3862
	lockdep_assert_held(&slab_mutex);
	for_each_memcg_cache(c, cachep) {
		/* return value determined by the root cache only */
		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
G
Glauber Costa 已提交
3863 3864 3865 3866 3867
	}

	return ret;
}

3868
/* Called with slab_mutex held always */
3869
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3870 3871
{
	int err;
G
Glauber Costa 已提交
3872 3873 3874 3875
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

3876
	err = cache_random_seq_create(cachep, cachep->num, gfp);
T
Thomas Garnier 已提交
3877 3878 3879
	if (err)
		goto end;

G
Glauber Costa 已提交
3880 3881 3882 3883 3884 3885
	if (!is_root_cache(cachep)) {
		struct kmem_cache *root = memcg_root_cache(cachep);
		limit = root->limit;
		shared = root->shared;
		batchcount = root->batchcount;
	}
L
Linus Torvalds 已提交
3886

G
Glauber Costa 已提交
3887 3888
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3889 3890
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3891 3892
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3893
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3894 3895 3896 3897
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3898
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3899
		limit = 1;
3900
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3901
		limit = 8;
3902
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3903
		limit = 24;
3904
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3905 3906 3907 3908
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3909 3910
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3911 3912 3913 3914 3915 3916 3917 3918
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
3919
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
3920 3921 3922
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
3923 3924 3925
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
3926 3927 3928 3929
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
3930 3931 3932
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
T
Thomas Garnier 已提交
3933
end:
L
Linus Torvalds 已提交
3934
	if (err)
3935
		pr_err("enable_cpucache failed for %s, error %d\n",
P
Pekka Enberg 已提交
3936
		       cachep->name, -err);
3937
	return err;
L
Linus Torvalds 已提交
3938 3939
}

3940
/*
3941 3942
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
3943
 * if drain_array() is used on the shared array.
3944
 */
3945
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3946
			 struct array_cache *ac, int node)
L
Linus Torvalds 已提交
3947
{
3948
	LIST_HEAD(list);
3949 3950 3951

	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
	check_mutex_acquired();
L
Linus Torvalds 已提交
3952

3953 3954
	if (!ac || !ac->avail)
		return;
3955 3956

	if (ac->touched) {
L
Linus Torvalds 已提交
3957
		ac->touched = 0;
3958
		return;
L
Linus Torvalds 已提交
3959
	}
3960 3961 3962 3963 3964 3965

	spin_lock_irq(&n->list_lock);
	drain_array_locked(cachep, ac, node, false, &list);
	spin_unlock_irq(&n->list_lock);

	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3966 3967 3968 3969
}

/**
 * cache_reap - Reclaim memory from caches.
3970
 * @w: work descriptor
L
Linus Torvalds 已提交
3971 3972 3973 3974 3975 3976
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
3977 3978
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
3979
 */
3980
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
3981
{
3982
	struct kmem_cache *searchp;
3983
	struct kmem_cache_node *n;
3984
	int node = numa_mem_id();
3985
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
3986

3987
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
3988
		/* Give up. Setup the next iteration. */
3989
		goto out;
L
Linus Torvalds 已提交
3990

3991
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
3992 3993
		check_irq_on();

3994
		/*
3995
		 * We only take the node lock if absolutely necessary and we
3996 3997 3998
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
3999
		n = get_node(searchp, node);
4000

4001
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
4002

4003
		drain_array(searchp, n, cpu_cache_get(searchp), node);
L
Linus Torvalds 已提交
4004

4005 4006 4007 4008
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
4009
		if (time_after(n->next_reap, jiffies))
4010
			goto next;
L
Linus Torvalds 已提交
4011

4012
		n->next_reap = jiffies + REAPTIMEOUT_NODE;
L
Linus Torvalds 已提交
4013

4014
		drain_array(searchp, n, n->shared, node);
L
Linus Torvalds 已提交
4015

4016 4017
		if (n->free_touched)
			n->free_touched = 0;
4018 4019
		else {
			int freed;
L
Linus Torvalds 已提交
4020

4021
			freed = drain_freelist(searchp, n, (n->free_limit +
4022 4023 4024
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
4025
next:
L
Linus Torvalds 已提交
4026 4027 4028
		cond_resched();
	}
	check_irq_on();
4029
	mutex_unlock(&slab_mutex);
4030
	next_reap_node();
4031
out:
A
Andrew Morton 已提交
4032
	/* Set up the next iteration */
4033 4034
	schedule_delayed_work_on(smp_processor_id(), work,
				round_jiffies_relative(REAPTIMEOUT_AC));
L
Linus Torvalds 已提交
4035 4036
}

4037
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
4038
{
4039
	unsigned long active_objs, num_objs, active_slabs;
4040 4041
	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
	unsigned long free_slabs = 0;
4042
	int node;
4043
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
4044

4045
	for_each_kmem_cache_node(cachep, node, n) {
4046
		check_irq_on();
4047
		spin_lock_irq(&n->list_lock);
4048

4049 4050
		total_slabs += n->total_slabs;
		free_slabs += n->free_slabs;
4051
		free_objs += n->free_objects;
4052

4053 4054
		if (n->shared)
			shared_avail += n->shared->avail;
4055

4056
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4057
	}
4058 4059
	num_objs = total_slabs * cachep->num;
	active_slabs = total_slabs - free_slabs;
4060
	active_objs = num_objs - free_objs;
L
Linus Torvalds 已提交
4061

4062 4063 4064
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
4065
	sinfo->num_slabs = total_slabs;
4066 4067 4068 4069 4070 4071 4072 4073 4074 4075
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
4076
#if STATS
4077
	{			/* node stats */
L
Linus Torvalds 已提交
4078 4079 4080 4081 4082 4083 4084
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
4085
		unsigned long node_frees = cachep->node_frees;
4086
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
4087

J
Joe Perches 已提交
4088
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
J
Joe Perches 已提交
4089 4090 4091
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
4092 4093 4094 4095 4096 4097 4098 4099 4100
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4101
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
4113 4114
 *
 * Return: %0 on success, negative error code otherwise.
L
Linus Torvalds 已提交
4115
 */
4116
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4117
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4118
{
P
Pekka Enberg 已提交
4119
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4120
	int limit, batchcount, shared, res;
4121
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4122

L
Linus Torvalds 已提交
4123 4124 4125 4126
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4127
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4128 4129 4130 4131 4132 4133 4134 4135 4136 4137

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4138
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4139
	res = -EINVAL;
4140
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4141
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4142 4143
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4144
				res = 0;
L
Linus Torvalds 已提交
4145
			} else {
4146
				res = do_tune_cpucache(cachep, limit,
4147 4148
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4149 4150 4151 4152
			}
			break;
		}
	}
4153
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4154 4155 4156 4157
	if (res >= 0)
		res = count;
	return res;
}
4158

K
Kees Cook 已提交
4159 4160
#ifdef CONFIG_HARDENED_USERCOPY
/*
4161 4162 4163
 * Rejects incorrectly sized objects and objects that are to be copied
 * to/from userspace but do not fall entirely within the containing slab
 * cache's usercopy region.
K
Kees Cook 已提交
4164 4165 4166 4167
 *
 * Returns NULL if check passes, otherwise const char * to name of cache
 * to indicate an error.
 */
4168 4169
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
			 bool to_user)
K
Kees Cook 已提交
4170 4171 4172 4173 4174
{
	struct kmem_cache *cachep;
	unsigned int objnr;
	unsigned long offset;

4175 4176
	ptr = kasan_reset_tag(ptr);

K
Kees Cook 已提交
4177 4178 4179 4180 4181 4182 4183 4184
	/* Find and validate object. */
	cachep = page->slab_cache;
	objnr = obj_to_index(cachep, page, (void *)ptr);
	BUG_ON(objnr >= cachep->num);

	/* Find offset within object. */
	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);

4185 4186 4187 4188
	/* Allow address range falling entirely within usercopy region. */
	if (offset >= cachep->useroffset &&
	    offset - cachep->useroffset <= cachep->usersize &&
	    n <= cachep->useroffset - offset + cachep->usersize)
4189
		return;
K
Kees Cook 已提交
4190

4191 4192 4193 4194 4195 4196
	/*
	 * If the copy is still within the allocated object, produce
	 * a warning instead of rejecting the copy. This is intended
	 * to be a temporary method to find any missing usercopy
	 * whitelists.
	 */
4197 4198
	if (usercopy_fallback &&
	    offset <= cachep->object_size &&
4199 4200 4201 4202
	    n <= cachep->object_size - offset) {
		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
		return;
	}
K
Kees Cook 已提交
4203

4204
	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
K
Kees Cook 已提交
4205 4206 4207
}
#endif /* CONFIG_HARDENED_USERCOPY */

4208
/**
4209
 * __ksize -- Uninstrumented ksize.
4210
 * @objp: pointer to the object
4211
 *
4212 4213
 * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
 * safety checks as ksize() with KASAN instrumentation enabled.
4214 4215
 *
 * Return: size of the actual memory used by @objp in bytes
4216
 */
4217
size_t __ksize(const void *objp)
L
Linus Torvalds 已提交
4218
{
4219
	struct kmem_cache *c;
A
Alexander Potapenko 已提交
4220 4221
	size_t size;

4222 4223
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4224
		return 0;
L
Linus Torvalds 已提交
4225

4226 4227
	c = virt_to_cache(objp);
	size = c ? c->object_size : 0;
A
Alexander Potapenko 已提交
4228 4229

	return size;
L
Linus Torvalds 已提交
4230
}
4231
EXPORT_SYMBOL(__ksize);