slab.c 105.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
30
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
54
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
55 56 57 58
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
59
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70 71
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
72
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
73 74 75 76 77 78
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
79 80 81 82 83 84 85 86 87
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
88 89 90 91
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
92
#include	<linux/poison.h>
L
Linus Torvalds 已提交
93 94 95 96 97
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
98
#include	<linux/cpuset.h>
99
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
107
#include	<linux/string.h>
108
#include	<linux/uaccess.h>
109
#include	<linux/nodemask.h>
110
#include	<linux/kmemleak.h>
111
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
112
#include	<linux/mutex.h>
113
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
114
#include	<linux/rtmutex.h>
115
#include	<linux/reciprocal_div.h>
116
#include	<linux/debugobjects.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
119
#include	<linux/sched/task_stack.h>
L
Linus Torvalds 已提交
120

121 122
#include	<net/sock.h>

L
Linus Torvalds 已提交
123 124 125 126
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

127 128
#include <trace/events/kmem.h>

129 130
#include	"internal.h"

131 132
#include	"slab.h"

L
Linus Torvalds 已提交
133
/*
134
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
155
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
156 157 158 159 160

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

161 162 163 164 165 166 167 168 169
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

170
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171

L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
189
	void *entry[];	/*
A
Andrew Morton 已提交
190 191 192 193
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
L
Linus Torvalds 已提交
194 195
};

J
Joonsoo Kim 已提交
196 197 198 199 200
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

201 202 203
/*
 * Need this for bootstrapping a per node allocator.
 */
204
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206
#define	CACHE_CACHE 0
207
#define	SIZE_NODE (MAX_NUMNODES)
208

209
static int drain_freelist(struct kmem_cache *cache,
210
			struct kmem_cache_node *n, int tofree);
211
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 213
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215
static void cache_reap(struct work_struct *unused);
216

217 218 219 220 221
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
222 223
static int slab_early_init = 1;

224
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
225

226
static void kmem_cache_node_init(struct kmem_cache_node *parent)
227 228 229 230
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
231
	parent->total_slabs = 0;
232
	parent->free_slabs = 0;
233 234
	parent->shared = NULL;
	parent->alien = NULL;
235
	parent->colour_next = 0;
236 237 238 239 240
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
241 242 243
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
244
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
245 246
	} while (0)

A
Andrew Morton 已提交
247 248
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
249 250 251 252
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
253

254 255
#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
256
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
L
Linus Torvalds 已提交
257 258 259
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
260 261 262
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
263
 *
A
Adrian Bunk 已提交
264
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
265 266
 * which could lock up otherwise freeable slabs.
 */
267 268
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
L
Linus Torvalds 已提交
269 270 271 272 273 274

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
275
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
276 277 278 279 280
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
281 282
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
283
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
284
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
285 286 287 288 289
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
299
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
300 301 302
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
303
#define	STATS_INC_NODEFREES(x)	do { } while (0)
304
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
305
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
306 307 308 309 310 311 312 313
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
314 315
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
316
 * 0		: objp
317
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
318 319
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
320
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
321
 * 		redzone word.
322
 * cachep->obj_offset: The real object.
323 324
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
325
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
326
 */
327
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
328
{
329
	return cachep->obj_offset;
L
Linus Torvalds 已提交
330 331
}

332
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
333 334
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 336
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
337 338
}

339
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
340 341 342
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
343
		return (unsigned long long *)(objp + cachep->size -
344
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
345
					      REDZONE_ALIGN);
346
	return (unsigned long long *) (objp + cachep->size -
347
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
348 349
}

350
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
351 352
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
354 355 356 357
}

#else

358
#define obj_offset(x)			0
359 360
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
361 362 363 364 365
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
366 367
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
368
 */
369 370 371
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
372
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
373

374
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
375 376
				 unsigned int idx)
{
377
	return page->s_mem + cache->size * idx;
378 379
}

380
#define BOOT_CPUCACHE_ENTRIES	1
L
Linus Torvalds 已提交
381
/* internal cache of cache description objs */
382
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
383 384 385
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
386
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
387
	.name = "kmem_cache",
L
Linus Torvalds 已提交
388 389
};

390
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
391

392
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
393
{
394
	return this_cpu_ptr(cachep->cpu_cache);
L
Linus Torvalds 已提交
395 396
}

A
Andrew Morton 已提交
397 398 399
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
400
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
401
		slab_flags_t flags, size_t *left_over)
402
{
403
	unsigned int num;
404
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
405

406 407 408 409 410 411
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
412 413 414 415 416
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
417 418 419 420 421 422
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
423
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
424
		num = slab_size / buffer_size;
425
		*left_over = slab_size % buffer_size;
426
	} else {
427
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
428 429
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
430
	}
431 432

	return num;
L
Linus Torvalds 已提交
433 434
}

435
#if DEBUG
436
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
437

A
Andrew Morton 已提交
438 439
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
440
{
441
	pr_err("slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
442
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
443
	dump_stack();
444
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
445
}
446
#endif
L
Linus Torvalds 已提交
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

464 465 466 467 468 469 470 471 472 473 474
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

475 476 477 478 479 480 481
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
482
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
483 484 485

static void init_reap_node(int cpu)
{
486 487
	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
						    node_online_map);
488 489 490 491
}

static void next_reap_node(void)
{
492
	int node = __this_cpu_read(slab_reap_node);
493

494
	node = next_node_in(node, node_online_map);
495
	__this_cpu_write(slab_reap_node, node);
496 497 498 499 500 501 502
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
503 504 505 506 507 508 509
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
510
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
511
{
512
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
513

514
	if (reap_work->work.func == NULL) {
515
		init_reap_node(cpu);
516
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
517 518
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
519 520 521
	}
}

522
static void init_arraycache(struct array_cache *ac, int limit, int batch)
L
Linus Torvalds 已提交
523
{
524 525 526 527 528
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
L
Linus Torvalds 已提交
529
	}
530 531 532 533 534
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
535
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
536 537 538
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
539 540 541 542 543 544 545 546
	/*
	 * The array_cache structures contain pointers to free object.
	 * However, when such objects are allocated or transferred to another
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
	kmemleak_no_scan(ac);
547 548
	init_arraycache(ac, entries, batchcount);
	return ac;
L
Linus Torvalds 已提交
549 550
}

551 552
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
553
{
554 555 556
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
557

558 559
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
560

561 562 563
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
564

565
	slabs_destroy(cachep, &list);
566 567
}

568 569 570 571 572 573 574 575 576 577
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
578
	int nr = min3(from->avail, max, to->limit - to->avail);
579 580 581 582 583 584 585 586 587 588 589 590

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

591 592 593 594 595 596 597 598 599 600
/* &alien->lock must be held by alien callers. */
static __always_inline void __free_one(struct array_cache *ac, void *objp)
{
	/* Avoid trivial double-free. */
	if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
	    WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
		return;
	ac->entry[ac->avail++] = objp;
}

601 602 603
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
604
#define reap_alien(cachep, n) do { } while (0)
605

J
Joonsoo Kim 已提交
606 607
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
608
{
609
	return NULL;
610 611
}

J
Joonsoo Kim 已提交
612
static inline void free_alien_cache(struct alien_cache **ac_ptr)
613 614 615 616 617 618 619 620 621 622 623 624 625 626
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

627
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
628 629 630 631 632
		 gfp_t flags, int nodeid)
{
	return NULL;
}

D
David Rientjes 已提交
633 634
static inline gfp_t gfp_exact_node(gfp_t flags)
{
635
	return flags & ~__GFP_NOFAIL;
D
David Rientjes 已提交
636 637
}

638 639
#else	/* CONFIG_NUMA */

640
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
641
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
642

J
Joonsoo Kim 已提交
643 644 645
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
646
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
J
Joonsoo Kim 已提交
647 648 649
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
650
	if (alc) {
651
		kmemleak_no_scan(alc);
652 653 654
		init_arraycache(&alc->ac, entries, batch);
		spin_lock_init(&alc->lock);
	}
J
Joonsoo Kim 已提交
655 656 657 658
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
659
{
J
Joonsoo Kim 已提交
660
	struct alien_cache **alc_ptr;
661 662 663 664
	int i;

	if (limit > 1)
		limit = 12;
665
	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
J
Joonsoo Kim 已提交
666 667 668 669 670 671 672 673 674 675 676 677
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
678 679
		}
	}
J
Joonsoo Kim 已提交
680
	return alc_ptr;
681 682
}

J
Joonsoo Kim 已提交
683
static void free_alien_cache(struct alien_cache **alc_ptr)
684 685 686
{
	int i;

J
Joonsoo Kim 已提交
687
	if (!alc_ptr)
688 689
		return;
	for_each_node(i)
J
Joonsoo Kim 已提交
690 691
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
692 693
}

694
static void __drain_alien_cache(struct kmem_cache *cachep,
695 696
				struct array_cache *ac, int node,
				struct list_head *list)
697
{
698
	struct kmem_cache_node *n = get_node(cachep, node);
699 700

	if (ac->avail) {
701
		spin_lock(&n->list_lock);
702 703 704 705 706
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
707 708
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
709

710
		free_block(cachep, ac->entry, ac->avail, node, list);
711
		ac->avail = 0;
712
		spin_unlock(&n->list_lock);
713 714 715
	}
}

716 717 718
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
719
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
720
{
721
	int node = __this_cpu_read(slab_reap_node);
722

723
	if (n->alien) {
J
Joonsoo Kim 已提交
724 725 726 727 728
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
729
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
730 731 732
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
733
				spin_unlock_irq(&alc->lock);
734
				slabs_destroy(cachep, &list);
J
Joonsoo Kim 已提交
735
			}
736 737 738 739
		}
	}
}

A
Andrew Morton 已提交
740
static void drain_alien_cache(struct kmem_cache *cachep,
J
Joonsoo Kim 已提交
741
				struct alien_cache **alien)
742
{
P
Pekka Enberg 已提交
743
	int i = 0;
J
Joonsoo Kim 已提交
744
	struct alien_cache *alc;
745 746 747 748
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
J
Joonsoo Kim 已提交
749 750
		alc = alien[i];
		if (alc) {
751 752
			LIST_HEAD(list);

J
Joonsoo Kim 已提交
753
			ac = &alc->ac;
754
			spin_lock_irqsave(&alc->lock, flags);
755
			__drain_alien_cache(cachep, ac, i, &list);
756
			spin_unlock_irqrestore(&alc->lock, flags);
757
			slabs_destroy(cachep, &list);
758 759 760
		}
	}
}
761

762 763
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
764
{
765
	struct kmem_cache_node *n;
J
Joonsoo Kim 已提交
766 767
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
768
	LIST_HEAD(list);
P
Pekka Enberg 已提交
769

770
	n = get_node(cachep, node);
771
	STATS_INC_NODEFREES(cachep);
772 773
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
J
Joonsoo Kim 已提交
774
		ac = &alien->ac;
775
		spin_lock(&alien->lock);
J
Joonsoo Kim 已提交
776
		if (unlikely(ac->avail == ac->limit)) {
777
			STATS_INC_ACOVERFLOW(cachep);
778
			__drain_alien_cache(cachep, ac, page_node, &list);
779
		}
780
		__free_one(ac, objp);
781
		spin_unlock(&alien->lock);
782
		slabs_destroy(cachep, &list);
783
	} else {
784
		n = get_node(cachep, page_node);
785
		spin_lock(&n->list_lock);
786
		free_block(cachep, &objp, 1, page_node, &list);
787
		spin_unlock(&n->list_lock);
788
		slabs_destroy(cachep, &list);
789 790 791
	}
	return 1;
}
792 793 794 795 796 797 798 799 800 801 802 803 804 805

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
D
David Rientjes 已提交
806 807

/*
808 809
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
D
David Rientjes 已提交
810 811 812
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
813
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
D
David Rientjes 已提交
814
}
815 816
#endif

817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

857
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
858
/*
859
 * Allocates and initializes node for a node on each slab cache, used for
860
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
861
 * will be allocated off-node since memory is not yet online for the new node.
862
 * When hotplugging memory or a cpu, existing node are not replaced if
863 864
 * already in use.
 *
865
 * Must hold slab_mutex.
866
 */
867
static int init_cache_node_node(int node)
868
{
869
	int ret;
870 871
	struct kmem_cache *cachep;

872
	list_for_each_entry(cachep, &slab_caches, list) {
873 874 875
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
876
	}
877

878 879
	return 0;
}
880
#endif
881

882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

931 932 933 934
	/*
	 * To protect lockless access to n->shared during irq disabled context.
	 * If n->shared isn't NULL in irq disabled context, accessing to it is
	 * guaranteed to be valid until irq is re-enabled, because it will be
935
	 * freed after synchronize_rcu().
936
	 */
937
	if (old_shared && force_change)
938
		synchronize_rcu();
939

940 941 942 943 944 945 946 947
fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

948 949
#ifdef CONFIG_SMP

950
static void cpuup_canceled(long cpu)
951 952
{
	struct kmem_cache *cachep;
953
	struct kmem_cache_node *n = NULL;
954
	int node = cpu_to_mem(cpu);
955
	const struct cpumask *mask = cpumask_of_node(node);
956

957
	list_for_each_entry(cachep, &slab_caches, list) {
958 959
		struct array_cache *nc;
		struct array_cache *shared;
J
Joonsoo Kim 已提交
960
		struct alien_cache **alien;
961
		LIST_HEAD(list);
962

963
		n = get_node(cachep, node);
964
		if (!n)
965
			continue;
966

967
		spin_lock_irq(&n->list_lock);
968

969 970
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
971 972 973

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
974 975
		free_block(cachep, nc->entry, nc->avail, node, &list);
		nc->avail = 0;
976

977
		if (!cpumask_empty(mask)) {
978
			spin_unlock_irq(&n->list_lock);
979
			goto free_slab;
980 981
		}

982
		shared = n->shared;
983 984
		if (shared) {
			free_block(cachep, shared->entry,
985
				   shared->avail, node, &list);
986
			n->shared = NULL;
987 988
		}

989 990
		alien = n->alien;
		n->alien = NULL;
991

992
		spin_unlock_irq(&n->list_lock);
993 994 995 996 997 998

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
999 1000

free_slab:
1001
		slabs_destroy(cachep, &list);
1002 1003 1004 1005 1006 1007
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1008
	list_for_each_entry(cachep, &slab_caches, list) {
1009
		n = get_node(cachep, node);
1010
		if (!n)
1011
			continue;
1012
		drain_freelist(cachep, n, INT_MAX);
1013 1014 1015
	}
}

1016
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1017
{
1018
	struct kmem_cache *cachep;
1019
	int node = cpu_to_mem(cpu);
1020
	int err;
L
Linus Torvalds 已提交
1021

1022 1023 1024 1025
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1026
	 * kmem_cache_node and not this cpu's kmem_cache_node
1027
	 */
1028
	err = init_cache_node_node(node);
1029 1030
	if (err < 0)
		goto bad;
1031 1032 1033 1034 1035

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1036
	list_for_each_entry(cachep, &slab_caches, list) {
1037 1038 1039
		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
		if (err)
			goto bad;
1040
	}
1041

1042 1043
	return 0;
bad:
1044
	cpuup_canceled(cpu);
1045 1046 1047
	return -ENOMEM;
}

1048
int slab_prepare_cpu(unsigned int cpu)
1049
{
1050
	int err;
1051

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	mutex_lock(&slab_mutex);
	err = cpuup_prepare(cpu);
	mutex_unlock(&slab_mutex);
	return err;
}

/*
 * This is called for a failed online attempt and for a successful
 * offline.
 *
 * Even if all the cpus of a node are down, we don't free the
1063
 * kmem_cache_node of any cache. This to avoid a race between cpu_down, and
1064
 * a kmalloc allocation from another cpu for memory from the node of
C
Chen Tao 已提交
1065
 * the cpu going down.  The kmem_cache_node structure is usually allocated from
1066 1067 1068 1069 1070 1071 1072 1073 1074
 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
 */
int slab_dead_cpu(unsigned int cpu)
{
	mutex_lock(&slab_mutex);
	cpuup_canceled(cpu);
	mutex_unlock(&slab_mutex);
	return 0;
}
1075
#endif
1076 1077 1078 1079 1080

static int slab_online_cpu(unsigned int cpu)
{
	start_cpu_timer(cpu);
	return 0;
L
Linus Torvalds 已提交
1081 1082
}

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
static int slab_offline_cpu(unsigned int cpu)
{
	/*
	 * Shutdown cache reaper. Note that the slab_mutex is held so
	 * that if cache_reap() is invoked it cannot do anything
	 * expensive but will only modify reap_work and reschedule the
	 * timer.
	 */
	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
	/* Now the cache_reaper is guaranteed to be not running. */
	per_cpu(slab_reap_work, cpu).work.func = NULL;
	return 0;
}
L
Linus Torvalds 已提交
1096

1097 1098 1099 1100 1101 1102
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1103
 * Must hold slab_mutex.
1104
 */
1105
static int __meminit drain_cache_node_node(int node)
1106 1107 1108 1109
{
	struct kmem_cache *cachep;
	int ret = 0;

1110
	list_for_each_entry(cachep, &slab_caches, list) {
1111
		struct kmem_cache_node *n;
1112

1113
		n = get_node(cachep, node);
1114
		if (!n)
1115 1116
			continue;

1117
		drain_freelist(cachep, n, INT_MAX);
1118

1119 1120
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1141
		mutex_lock(&slab_mutex);
1142
		ret = init_cache_node_node(nid);
1143
		mutex_unlock(&slab_mutex);
1144 1145
		break;
	case MEM_GOING_OFFLINE:
1146
		mutex_lock(&slab_mutex);
1147
		ret = drain_cache_node_node(nid);
1148
		mutex_unlock(&slab_mutex);
1149 1150 1151 1152 1153 1154 1155 1156
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1157
	return notifier_from_errno(ret);
1158 1159 1160
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1161
/*
1162
 * swap the static kmem_cache_node with kmalloced memory
1163
 */
1164
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1165
				int nodeid)
1166
{
1167
	struct kmem_cache_node *ptr;
1168

1169
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1170 1171
	BUG_ON(!ptr);

1172
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1173 1174 1175 1176 1177
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1178
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1179
	cachep->node[nodeid] = ptr;
1180 1181
}

1182
/*
1183 1184
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1185
 */
1186
static void __init set_up_node(struct kmem_cache *cachep, int index)
1187 1188 1189 1190
{
	int node;

	for_each_online_node(node) {
1191
		cachep->node[node] = &init_kmem_cache_node[index + node];
1192
		cachep->node[node]->next_reap = jiffies +
1193 1194
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1195 1196 1197
	}
}

A
Andrew Morton 已提交
1198 1199 1200
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1201 1202 1203
 */
void __init kmem_cache_init(void)
{
1204 1205
	int i;

1206 1207
	kmem_cache = &kmem_cache_boot;

1208
	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1209 1210
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1211
	for (i = 0; i < NUM_INIT_LISTS; i++)
1212
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1213

L
Linus Torvalds 已提交
1214 1215
	/*
	 * Fragmentation resistance on low memory - only use bigger
1216 1217
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1218
	 */
1219
	if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1220
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1221 1222 1223

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1224 1225 1226
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1227
	 *    Initially an __init data area is used for the head array and the
1228
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1229
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1230
	 * 2) Create the first kmalloc cache.
1231
	 *    The struct kmem_cache for the new cache is allocated normally.
1232 1233 1234
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1235
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1236
	 *    kmalloc cache with kmalloc allocated arrays.
1237
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1238 1239
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1240 1241
	 */

1242
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1243

E
Eric Dumazet 已提交
1244
	/*
1245
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1246
	 */
1247
	create_boot_cache(kmem_cache, "kmem_cache",
1248
		offsetof(struct kmem_cache, node) +
1249
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1250
				  SLAB_HWCACHE_ALIGN, 0, 0);
1251
	list_add(&kmem_cache->list, &slab_caches);
1252
	slab_state = PARTIAL;
L
Linus Torvalds 已提交
1253

A
Andrew Morton 已提交
1254
	/*
1255 1256
	 * Initialize the caches that provide memory for the  kmem_cache_node
	 * structures first.  Without this, further allocations will bug.
1257
	 */
1258
	kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1259
				kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1260 1261 1262
				kmalloc_info[INDEX_NODE].size,
				ARCH_KMALLOC_FLAGS, 0,
				kmalloc_info[INDEX_NODE].size);
1263
	slab_state = PARTIAL_NODE;
1264
	setup_kmalloc_cache_index_table();
1265

1266 1267
	slab_early_init = 0;

1268
	/* 5) Replace the bootstrap kmem_cache_node */
1269
	{
P
Pekka Enberg 已提交
1270 1271
		int nid;

1272
		for_each_online_node(nid) {
1273
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1274

1275
			init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
1276
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1277 1278
		}
	}
L
Linus Torvalds 已提交
1279

1280
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1281 1282 1283 1284 1285 1286 1287
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

	/* 6) resize the head arrays to their final sizes */
1288 1289
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1290 1291
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1292
	mutex_unlock(&slab_mutex);
1293

1294 1295 1296
	/* Done! */
	slab_state = FULL;

1297 1298 1299
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1300
	 * node.
1301 1302 1303 1304
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1305 1306 1307
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1308 1309 1310 1311 1312
	 */
}

static int __init cpucache_init(void)
{
1313
	int ret;
L
Linus Torvalds 已提交
1314

A
Andrew Morton 已提交
1315 1316
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1317
	 */
1318 1319 1320
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
				slab_online_cpu, slab_offline_cpu);
	WARN_ON(ret < 0);
1321

L
Linus Torvalds 已提交
1322 1323 1324 1325
	return 0;
}
__initcall(cpucache_init);

1326 1327 1328
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1329
#if DEBUG
1330
	struct kmem_cache_node *n;
1331 1332
	unsigned long flags;
	int node;
1333 1334 1335 1336 1337
	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
		return;
1338

1339 1340 1341
	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
		nodeid, gfpflags, &gfpflags);
	pr_warn("  cache: %s, object size: %d, order: %d\n",
1342
		cachep->name, cachep->size, cachep->gfporder);
1343

1344
	for_each_kmem_cache_node(cachep, node, n) {
1345
		unsigned long total_slabs, free_slabs, free_objs;
1346

1347
		spin_lock_irqsave(&n->list_lock, flags);
1348 1349 1350
		total_slabs = n->total_slabs;
		free_slabs = n->free_slabs;
		free_objs = n->free_objects;
1351
		spin_unlock_irqrestore(&n->list_lock, flags);
1352

1353 1354 1355 1356
		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
			node, total_slabs - free_slabs, total_slabs,
			(total_slabs * cachep->num) - free_objs,
			total_slabs * cachep->num);
1357
	}
1358
#endif
1359 1360
}

L
Linus Torvalds 已提交
1361
/*
W
Wang Sheng-Hui 已提交
1362 1363
 * Interface to system's page allocator. No need to hold the
 * kmem_cache_node ->list_lock.
L
Linus Torvalds 已提交
1364 1365 1366 1367 1368
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1369 1370
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1371 1372
{
	struct page *page;
1373

1374
	flags |= cachep->allocflags;
1375

1376
	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1377
	if (!page) {
1378
		slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1379
		return NULL;
1380
	}
L
Linus Torvalds 已提交
1381

1382
	account_slab_page(page, cachep->gfporder, cachep);
1383
	__SetPageSlab(page);
1384 1385
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1386
		SetPageSlabPfmemalloc(page);
1387

1388
	return page;
L
Linus Torvalds 已提交
1389 1390 1391 1392 1393
}

/*
 * Interface to system's page release.
 */
1394
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1395
{
1396
	int order = cachep->gfporder;
J
Joonsoo Kim 已提交
1397

1398
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1399
	__ClearPageSlabPfmemalloc(page);
1400
	__ClearPageSlab(page);
1401
	page_mapcount_reset(page);
1402 1403
	/* In union with page->mapping where page allocator expects NULL */
	page->slab_cache = NULL;
G
Glauber Costa 已提交
1404

L
Linus Torvalds 已提交
1405
	if (current->reclaim_state)
1406
		current->reclaim_state->reclaimed_slab += 1 << order;
1407
	unaccount_slab_page(page, order, cachep);
1408
	__free_pages(page, order);
L
Linus Torvalds 已提交
1409 1410 1411 1412
}

static void kmem_rcu_free(struct rcu_head *head)
{
1413 1414
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1415

1416 1417 1418 1419
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1420 1421 1422
}

#if DEBUG
1423 1424
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
{
1425
	if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1426 1427 1428 1429 1430
		(cachep->size % PAGE_SIZE) == 0)
		return true;

	return false;
}
L
Linus Torvalds 已提交
1431 1432

#ifdef CONFIG_DEBUG_PAGEALLOC
Q
Qian Cai 已提交
1433
static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
1434 1435 1436 1437 1438 1439 1440 1441 1442
{
	if (!is_debug_pagealloc_cache(cachep))
		return;

	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}

#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
Q
Qian Cai 已提交
1443
				int map) {}
1444

L
Linus Torvalds 已提交
1445 1446
#endif

1447
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1448
{
1449
	int size = cachep->object_size;
1450
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1451 1452

	memset(addr, val, size);
P
Pekka Enberg 已提交
1453
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1454 1455 1456 1457 1458
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1459 1460 1461
	unsigned char error = 0;
	int bad_count = 0;

1462
	pr_err("%03x: ", offset);
D
Dave Jones 已提交
1463 1464 1465 1466 1467 1468
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1469 1470
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1471 1472 1473 1474

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
1475
			pr_err("Single bit error detected. Probably bad RAM.\n");
D
Dave Jones 已提交
1476
#ifdef CONFIG_X86
1477
			pr_err("Run memtest86+ or a similar memory test tool.\n");
D
Dave Jones 已提交
1478
#else
1479
			pr_err("Run a memory test tool.\n");
D
Dave Jones 已提交
1480 1481 1482
#endif
		}
	}
L
Linus Torvalds 已提交
1483 1484 1485 1486 1487
}
#endif

#if DEBUG

1488
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1489 1490 1491 1492 1493
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1494 1495 1496
		pr_err("Redzone: 0x%llx/0x%llx\n",
		       *dbg_redzone1(cachep, objp),
		       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1497 1498
	}

1499 1500
	if (cachep->flags & SLAB_STORE_USER)
		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1501
	realobj = (char *)objp + obj_offset(cachep);
1502
	size = cachep->object_size;
P
Pekka Enberg 已提交
1503
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1504 1505
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1506 1507
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1508 1509 1510 1511
		dump_line(realobj, i, limit);
	}
}

1512
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1513 1514 1515 1516 1517
{
	char *realobj;
	int size, i;
	int lines = 0;

1518 1519 1520
	if (is_debug_pagealloc_cache(cachep))
		return;

1521
	realobj = (char *)objp + obj_offset(cachep);
1522
	size = cachep->object_size;
L
Linus Torvalds 已提交
1523

P
Pekka Enberg 已提交
1524
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1525
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1526
		if (i == size - 1)
L
Linus Torvalds 已提交
1527 1528 1529 1530 1531 1532
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
1533
				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1534 1535
				       print_tainted(), cachep->name,
				       realobj, size);
L
Linus Torvalds 已提交
1536 1537 1538
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1539
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1540
			limit = 16;
P
Pekka Enberg 已提交
1541 1542
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1555
		struct page *page = virt_to_head_page(objp);
1556
		unsigned int objnr;
L
Linus Torvalds 已提交
1557

1558
		objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
1559
		if (objnr) {
1560
			objp = index_to_obj(cachep, page, objnr - 1);
1561
			realobj = (char *)objp + obj_offset(cachep);
1562
			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1563 1564
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1565
		if (objnr + 1 < cachep->num) {
1566
			objp = index_to_obj(cachep, page, objnr + 1);
1567
			realobj = (char *)objp + obj_offset(cachep);
1568
			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1569 1570 1571 1572 1573 1574
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1575
#if DEBUG
1576 1577
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
L
Linus Torvalds 已提交
1578 1579
{
	int i;
1580 1581 1582 1583 1584 1585

	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
		poison_obj(cachep, page->freelist - obj_offset(cachep),
			POISON_FREE);
	}

L
Linus Torvalds 已提交
1586
	for (i = 0; i < cachep->num; i++) {
1587
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
1588 1589 1590

		if (cachep->flags & SLAB_POISON) {
			check_poison_obj(cachep, objp);
Q
Qian Cai 已提交
1591
			slab_kernel_map(cachep, objp, 1);
L
Linus Torvalds 已提交
1592 1593 1594
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1595
				slab_error(cachep, "start of a freed object was overwritten");
L
Linus Torvalds 已提交
1596
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1597
				slab_error(cachep, "end of a freed object was overwritten");
L
Linus Torvalds 已提交
1598 1599
		}
	}
1600
}
L
Linus Torvalds 已提交
1601
#else
1602 1603
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
1604 1605
{
}
L
Linus Torvalds 已提交
1606 1607
#endif

1608 1609 1610
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
1611
 * @page: page pointer being destroyed
1612
 *
W
Wang Sheng-Hui 已提交
1613 1614 1615
 * Destroy all the objs in a slab page, and release the mem back to the system.
 * Before calling the slab page must have been unlinked from the cache. The
 * kmem_cache_node ->list_lock is not held/needed.
1616
 */
1617
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1618
{
1619
	void *freelist;
1620

1621 1622
	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
1623
	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1624 1625
		call_rcu(&page->rcu_head, kmem_rcu_free);
	else
1626
		kmem_freepages(cachep, page);
1627 1628

	/*
1629
	 * From now on, we don't use freelist
1630 1631 1632
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
1633
		kmem_cache_free(cachep->freelist_cache, freelist);
L
Linus Torvalds 已提交
1634 1635
}

1636 1637 1638 1639
/*
 * Update the size of the caches before calling slabs_destroy as it may
 * recursively call kfree.
 */
1640 1641 1642 1643
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

1644 1645
	list_for_each_entry_safe(page, n, list, slab_list) {
		list_del(&page->slab_list);
1646 1647 1648 1649
		slab_destroy(cachep, page);
	}
}

1650
/**
1651 1652 1653 1654 1655 1656
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
1657 1658 1659 1660
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
1661 1662
 *
 * Return: number of left-over bytes in a slab
1663
 */
A
Andrew Morton 已提交
1664
static size_t calculate_slab_order(struct kmem_cache *cachep,
1665
				size_t size, slab_flags_t flags)
1666 1667
{
	size_t left_over = 0;
1668
	int gfporder;
1669

1670
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1671 1672 1673
		unsigned int num;
		size_t remainder;

1674
		num = cache_estimate(gfporder, size, flags, &remainder);
1675 1676
		if (!num)
			continue;
1677

1678 1679 1680 1681
		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
		if (num > SLAB_OBJ_MAX_NUM)
			break;

1682
		if (flags & CFLGS_OFF_SLAB) {
1683 1684 1685 1686 1687 1688 1689 1690
			struct kmem_cache *freelist_cache;
			size_t freelist_size;

			freelist_size = num * sizeof(freelist_idx_t);
			freelist_cache = kmalloc_slab(freelist_size, 0u);
			if (!freelist_cache)
				continue;

1691
			/*
1692
			 * Needed to avoid possible looping condition
1693
			 * in cache_grow_begin()
1694
			 */
1695 1696
			if (OFF_SLAB(freelist_cache))
				continue;
1697

1698 1699 1700
			/* check if off slab has enough benefit */
			if (freelist_cache->size > cachep->size / 2)
				continue;
1701
		}
1702

1703
		/* Found something acceptable - save it away */
1704
		cachep->num = num;
1705
		cachep->gfporder = gfporder;
1706 1707
		left_over = remainder;

1708 1709 1710 1711 1712 1713 1714 1715
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

1716 1717 1718 1719
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
1720
		if (gfporder >= slab_max_order)
1721 1722
			break;

1723 1724 1725
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
1726
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1727 1728 1729 1730 1731
			break;
	}
	return left_over;
}

1732 1733 1734 1735 1736 1737 1738 1739
static struct array_cache __percpu *alloc_kmem_cache_cpus(
		struct kmem_cache *cachep, int entries, int batchcount)
{
	int cpu;
	size_t size;
	struct array_cache __percpu *cpu_cache;

	size = sizeof(void *) * entries + sizeof(struct array_cache);
1740
	cpu_cache = __alloc_percpu(size, sizeof(void *));
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752

	if (!cpu_cache)
		return NULL;

	for_each_possible_cpu(cpu) {
		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
				entries, batchcount);
	}

	return cpu_cache;
}

1753
static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1754
{
1755
	if (slab_state >= FULL)
1756
		return enable_cpucache(cachep, gfp);
1757

1758 1759 1760 1761
	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
	if (!cachep->cpu_cache)
		return 1;

1762
	if (slab_state == DOWN) {
1763 1764
		/* Creation of first cache (kmem_cache). */
		set_up_node(kmem_cache, CACHE_CACHE);
1765
	} else if (slab_state == PARTIAL) {
1766 1767
		/* For kmem_cache_node */
		set_up_node(cachep, SIZE_NODE);
1768
	} else {
1769
		int node;
1770

1771 1772 1773 1774 1775
		for_each_online_node(node) {
			cachep->node[node] = kmalloc_node(
				sizeof(struct kmem_cache_node), gfp, node);
			BUG_ON(!cachep->node[node]);
			kmem_cache_node_init(cachep->node[node]);
1776 1777
		}
	}
1778

1779
	cachep->node[numa_mem_id()]->next_reap =
1780 1781
			jiffies + REAPTIMEOUT_NODE +
			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1782 1783 1784 1785 1786 1787 1788

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1789
	return 0;
1790 1791
}

1792
slab_flags_t kmem_cache_flags(unsigned int object_size,
1793
	slab_flags_t flags, const char *name,
J
Joonsoo Kim 已提交
1794 1795 1796 1797 1798 1799
	void (*ctor)(void *))
{
	return flags;
}

struct kmem_cache *
1800
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1801
		   slab_flags_t flags, void (*ctor)(void *))
J
Joonsoo Kim 已提交
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
{
	struct kmem_cache *cachep;

	cachep = find_mergeable(size, align, flags, name, ctor);
	if (cachep) {
		cachep->refcount++;

		/*
		 * Adjust the object sizes so that we clear
		 * the complete object on kzalloc.
		 */
		cachep->object_size = max_t(int, cachep->object_size, size);
	}
	return cachep;
}

1818
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1819
			size_t size, slab_flags_t flags)
1820 1821 1822 1823 1824
{
	size_t left;

	cachep->num = 0;

1825 1826 1827 1828 1829 1830 1831 1832
	/*
	 * If slab auto-initialization on free is enabled, store the freelist
	 * off-slab, so that its contents don't end up in one of the allocated
	 * objects.
	 */
	if (unlikely(slab_want_init_on_free(cachep)))
		return false;

1833
	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
		return false;

	left = calculate_slab_order(cachep, size,
			flags | CFLGS_OBJFREELIST_SLAB);
	if (!cachep->num)
		return false;

	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

1849
static bool set_off_slab_cache(struct kmem_cache *cachep,
1850
			size_t size, slab_flags_t flags)
1851 1852 1853 1854 1855 1856
{
	size_t left;

	cachep->num = 0;

	/*
1857 1858
	 * Always use on-slab management when SLAB_NOLEAKTRACE
	 * to avoid recursive calls into kmemleak.
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
	 */
	if (flags & SLAB_NOLEAKTRACE)
		return false;

	/*
	 * Size is large, assume best to place the slab management obj
	 * off-slab (should allow better packing of objs).
	 */
	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
	if (!cachep->num)
		return false;

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
	if (left >= cachep->num * sizeof(freelist_idx_t))
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

static bool set_on_slab_cache(struct kmem_cache *cachep,
1884
			size_t size, slab_flags_t flags)
1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
{
	size_t left;

	cachep->num = 0;

	left = calculate_slab_order(cachep, size, flags);
	if (!cachep->num)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

L
Linus Torvalds 已提交
1899
/**
1900
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
1901
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
1902 1903 1904 1905
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
1906
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
1919 1920
 *
 * Return: a pointer to the created cache or %NULL in case of error
L
Linus Torvalds 已提交
1921
 */
1922
int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
L
Linus Torvalds 已提交
1923
{
1924
	size_t ralign = BYTES_PER_WORD;
1925
	gfp_t gfp;
1926
	int err;
1927
	unsigned int size = cachep->size;
L
Linus Torvalds 已提交
1928 1929 1930 1931 1932 1933 1934 1935 1936

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
1937 1938
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
1939
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1940
	if (!(flags & SLAB_TYPESAFE_BY_RCU))
L
Linus Torvalds 已提交
1941 1942 1943 1944
		flags |= SLAB_POISON;
#endif
#endif

A
Andrew Morton 已提交
1945 1946
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
1947 1948 1949
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
1950
	size = ALIGN(size, BYTES_PER_WORD);
L
Linus Torvalds 已提交
1951

D
David Woodhouse 已提交
1952 1953 1954 1955
	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
1956
		size = ALIGN(size, REDZONE_ALIGN);
D
David Woodhouse 已提交
1957
	}
1958

1959
	/* 3) caller mandated alignment */
1960 1961
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
1962
	}
1963 1964
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
1965
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
1966
	/*
1967
	 * 4) Store it.
L
Linus Torvalds 已提交
1968
	 */
1969
	cachep->align = ralign;
1970 1971 1972 1973
	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
L
Linus Torvalds 已提交
1974

1975 1976 1977 1978 1979
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

L
Linus Torvalds 已提交
1980 1981
#if DEBUG

1982 1983 1984 1985
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
1986 1987
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
1988 1989
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
1990 1991
	}
	if (flags & SLAB_STORE_USER) {
1992
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
1993 1994
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
1995
		 */
D
David Woodhouse 已提交
1996 1997 1998 1999
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
2000
	}
2001 2002
#endif

A
Alexander Potapenko 已提交
2003 2004
	kasan_cache_create(cachep, &size, &flags);

2005 2006 2007 2008 2009 2010 2011 2012 2013
	size = ALIGN(size, cachep->align);
	/*
	 * We should restrict the number of objects in a slab to implement
	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
	 */
	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);

#if DEBUG
2014 2015 2016 2017 2018 2019 2020
	/*
	 * To activate debug pagealloc, off-slab management is necessary
	 * requirement. In early phase of initialization, small sized slab
	 * doesn't get initialized so it would not be possible. So, we need
	 * to check size >= 256. It guarantees that all necessary small
	 * sized slab is initialized in current slab initialization sequence.
	 */
2021
	if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
		size >= 256 && cachep->object_size > cache_line_size()) {
		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
			size_t tmp_size = ALIGN(size, PAGE_SIZE);

			if (set_off_slab_cache(cachep, tmp_size, flags)) {
				flags |= CFLGS_OFF_SLAB;
				cachep->obj_offset += tmp_size - size;
				size = tmp_size;
				goto done;
			}
		}
L
Linus Torvalds 已提交
2033 2034 2035
	}
#endif

2036 2037 2038 2039 2040
	if (set_objfreelist_slab_cache(cachep, size, flags)) {
		flags |= CFLGS_OBJFREELIST_SLAB;
		goto done;
	}

2041
	if (set_off_slab_cache(cachep, size, flags)) {
L
Linus Torvalds 已提交
2042
		flags |= CFLGS_OFF_SLAB;
2043
		goto done;
2044
	}
L
Linus Torvalds 已提交
2045

2046 2047
	if (set_on_slab_cache(cachep, size, flags))
		goto done;
L
Linus Torvalds 已提交
2048

2049
	return -E2BIG;
L
Linus Torvalds 已提交
2050

2051 2052
done:
	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
L
Linus Torvalds 已提交
2053
	cachep->flags = flags;
2054
	cachep->allocflags = __GFP_COMP;
Y
Yang Shi 已提交
2055
	if (flags & SLAB_CACHE_DMA)
2056
		cachep->allocflags |= GFP_DMA;
2057 2058
	if (flags & SLAB_CACHE_DMA32)
		cachep->allocflags |= GFP_DMA32;
2059 2060
	if (flags & SLAB_RECLAIM_ACCOUNT)
		cachep->allocflags |= __GFP_RECLAIMABLE;
2061
	cachep->size = size;
2062
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2063

2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
#if DEBUG
	/*
	 * If we're going to use the generic kernel_map_pages()
	 * poisoning, then it's going to smash the contents of
	 * the redzone and userword anyhow, so switch them off.
	 */
	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
		(cachep->flags & SLAB_POISON) &&
		is_debug_pagealloc_cache(cachep))
		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif

	if (OFF_SLAB(cachep)) {
2077 2078
		cachep->freelist_cache =
			kmalloc_slab(cachep->freelist_size, 0u);
2079
	}
L
Linus Torvalds 已提交
2080

2081 2082
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2083
		__kmem_cache_release(cachep);
2084
		return err;
2085
	}
L
Linus Torvalds 已提交
2086

2087
	return 0;
L
Linus Torvalds 已提交
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2101 2102 2103 2104 2105
static void check_mutex_acquired(void)
{
	BUG_ON(!mutex_is_locked(&slab_mutex));
}

2106
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2107 2108 2109
{
#ifdef CONFIG_SMP
	check_irq_off();
2110
	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
L
Linus Torvalds 已提交
2111 2112
#endif
}
2113

2114
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2115 2116 2117
{
#ifdef CONFIG_SMP
	check_irq_off();
2118
	assert_spin_locked(&get_node(cachep, node)->list_lock);
2119 2120 2121
#endif
}

L
Linus Torvalds 已提交
2122 2123 2124
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
2125
#define check_mutex_acquired()	do { } while(0)
L
Linus Torvalds 已提交
2126
#define check_spinlock_acquired(x) do { } while(0)
2127
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2128 2129
#endif

2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
				int node, bool free_all, struct list_head *list)
{
	int tofree;

	if (!ac || !ac->avail)
		return;

	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
	if (tofree > ac->avail)
		tofree = (ac->avail + 1) / 2;

	free_block(cachep, ac->entry, tofree, node, list);
	ac->avail -= tofree;
	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
}
2146

L
Linus Torvalds 已提交
2147 2148
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2149
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2150
	struct array_cache *ac;
2151
	int node = numa_mem_id();
2152
	struct kmem_cache_node *n;
2153
	LIST_HEAD(list);
L
Linus Torvalds 已提交
2154 2155

	check_irq_off();
2156
	ac = cpu_cache_get(cachep);
2157 2158
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
2159
	free_block(cachep, ac->entry, ac->avail, node, &list);
2160
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2161
	ac->avail = 0;
2162
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
2163 2164
}

2165
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2166
{
2167
	struct kmem_cache_node *n;
2168
	int node;
2169
	LIST_HEAD(list);
2170

2171
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2172
	check_irq_on();
2173 2174
	for_each_kmem_cache_node(cachep, node, n)
		if (n->alien)
2175
			drain_alien_cache(cachep, n->alien);
2176

2177 2178 2179 2180 2181 2182 2183
	for_each_kmem_cache_node(cachep, node, n) {
		spin_lock_irq(&n->list_lock);
		drain_array_locked(cachep, n->shared, node, true, &list);
		spin_unlock_irq(&n->list_lock);

		slabs_destroy(cachep, &list);
	}
L
Linus Torvalds 已提交
2184 2185
}

2186 2187 2188 2189 2190 2191 2192
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2193
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2194
{
2195 2196
	struct list_head *p;
	int nr_freed;
2197
	struct page *page;
L
Linus Torvalds 已提交
2198

2199
	nr_freed = 0;
2200
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2201

2202 2203 2204 2205
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2206 2207
			goto out;
		}
L
Linus Torvalds 已提交
2208

2209 2210
		page = list_entry(p, struct page, slab_list);
		list_del(&page->slab_list);
2211
		n->free_slabs--;
2212
		n->total_slabs--;
2213 2214 2215 2216
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2217 2218
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2219
		slab_destroy(cache, page);
2220
		nr_freed++;
L
Linus Torvalds 已提交
2221
	}
2222 2223
out:
	return nr_freed;
L
Linus Torvalds 已提交
2224 2225
}

2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
bool __kmem_cache_empty(struct kmem_cache *s)
{
	int node;
	struct kmem_cache_node *n;

	for_each_kmem_cache_node(s, node, n)
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial))
			return false;
	return true;
}

2238
int __kmem_cache_shrink(struct kmem_cache *cachep)
2239
{
2240 2241
	int ret = 0;
	int node;
2242
	struct kmem_cache_node *n;
2243 2244 2245 2246

	drain_cpu_caches(cachep);

	check_irq_on();
2247
	for_each_kmem_cache_node(cachep, node, n) {
2248
		drain_freelist(cachep, n, INT_MAX);
2249

2250 2251
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2252 2253 2254 2255
	}
	return (ret ? 1 : 0);
}

2256
int __kmem_cache_shutdown(struct kmem_cache *cachep)
2257
{
2258
	return __kmem_cache_shrink(cachep);
2259 2260 2261
}

void __kmem_cache_release(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2262
{
2263
	int i;
2264
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2265

T
Thomas Garnier 已提交
2266 2267
	cache_random_seq_destroy(cachep);

2268
	free_percpu(cachep->cpu_cache);
L
Linus Torvalds 已提交
2269

2270
	/* NUMA: free the node structures */
2271 2272 2273 2274 2275
	for_each_kmem_cache_node(cachep, i, n) {
		kfree(n->shared);
		free_alien_cache(n->alien);
		kfree(n);
		cachep->node[i] = NULL;
2276
	}
L
Linus Torvalds 已提交
2277 2278
}

2279 2280
/*
 * Get the memory for a slab management obj.
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
 *
 * For a slab cache when the slab descriptor is off-slab, the
 * slab descriptor can't come from the same cache which is being created,
 * Because if it is the case, that means we defer the creation of
 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
 * And we eventually call down to __kmem_cache_create(), which
 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
 * This is a "chicken-and-egg" problem.
 *
 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
 * which are all initialized during kmem_cache_init().
2292
 */
2293
static void *alloc_slabmgmt(struct kmem_cache *cachep,
2294 2295
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2296
{
2297
	void *freelist;
2298
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2299

2300
	page->s_mem = addr + colour_off;
2301 2302
	page->active = 0;

2303 2304 2305
	if (OBJFREELIST_SLAB(cachep))
		freelist = NULL;
	else if (OFF_SLAB(cachep)) {
L
Linus Torvalds 已提交
2306
		/* Slab management obj is off-slab. */
2307
		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2308
					      local_flags, nodeid);
L
Linus Torvalds 已提交
2309
	} else {
2310 2311 2312
		/* We will use last bytes at the slab for freelist */
		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
				cachep->freelist_size;
L
Linus Torvalds 已提交
2313
	}
2314

2315
	return freelist;
L
Linus Torvalds 已提交
2316 2317
}

2318
static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
L
Linus Torvalds 已提交
2319
{
2320
	return ((freelist_idx_t *)page->freelist)[idx];
2321 2322 2323
}

static inline void set_free_obj(struct page *page,
2324
					unsigned int idx, freelist_idx_t val)
2325
{
2326
	((freelist_idx_t *)(page->freelist))[idx] = val;
L
Linus Torvalds 已提交
2327 2328
}

2329
static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
2330
{
2331
#if DEBUG
L
Linus Torvalds 已提交
2332 2333 2334
	int i;

	for (i = 0; i < cachep->num; i++) {
2335
		void *objp = index_to_obj(cachep, page, i);
2336

L
Linus Torvalds 已提交
2337 2338 2339 2340 2341 2342 2343 2344
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2345 2346 2347
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2348
		 */
A
Alexander Potapenko 已提交
2349 2350 2351
		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
			kasan_unpoison_object_data(cachep,
						   objp + obj_offset(cachep));
2352
			cachep->ctor(objp + obj_offset(cachep));
A
Alexander Potapenko 已提交
2353 2354 2355
			kasan_poison_object_data(
				cachep, objp + obj_offset(cachep));
		}
L
Linus Torvalds 已提交
2356 2357 2358

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2359
				slab_error(cachep, "constructor overwrote the end of an object");
L
Linus Torvalds 已提交
2360
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2361
				slab_error(cachep, "constructor overwrote the start of an object");
L
Linus Torvalds 已提交
2362
		}
2363 2364 2365
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON) {
			poison_obj(cachep, objp, POISON_FREE);
Q
Qian Cai 已提交
2366
			slab_kernel_map(cachep, objp, 0);
2367
		}
2368
	}
L
Linus Torvalds 已提交
2369
#endif
2370 2371
}

T
Thomas Garnier 已提交
2372 2373 2374 2375 2376
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Hold information during a freelist initialization */
union freelist_init_state {
	struct {
		unsigned int pos;
2377
		unsigned int *list;
T
Thomas Garnier 已提交
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
		unsigned int count;
	};
	struct rnd_state rnd_state;
};

/*
 * Initialize the state based on the randomization methode available.
 * return true if the pre-computed list is available, false otherwize.
 */
static bool freelist_state_initialize(union freelist_init_state *state,
				struct kmem_cache *cachep,
				unsigned int count)
{
	bool ret;
	unsigned int rand;

	/* Use best entropy available to define a random shift */
2395
	rand = get_random_int();
T
Thomas Garnier 已提交
2396 2397 2398 2399 2400 2401 2402 2403

	/* Use a random state if the pre-computed list is not available */
	if (!cachep->random_seq) {
		prandom_seed_state(&state->rnd_state, rand);
		ret = false;
	} else {
		state->list = cachep->random_seq;
		state->count = count;
2404
		state->pos = rand % count;
T
Thomas Garnier 已提交
2405 2406 2407 2408 2409 2410 2411 2412
		ret = true;
	}
	return ret;
}

/* Get the next entry on the list and randomize it using a random shift */
static freelist_idx_t next_random_slot(union freelist_init_state *state)
{
2413 2414 2415
	if (state->pos >= state->count)
		state->pos = 0;
	return state->list[state->pos++];
T
Thomas Garnier 已提交
2416 2417
}

2418 2419 2420 2421 2422 2423 2424
/* Swap two freelist entries */
static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
{
	swap(((freelist_idx_t *)page->freelist)[a],
		((freelist_idx_t *)page->freelist)[b]);
}

T
Thomas Garnier 已提交
2425 2426 2427 2428 2429 2430
/*
 * Shuffle the freelist initialization state based on pre-computed lists.
 * return true if the list was successfully shuffled, false otherwise.
 */
static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
{
2431
	unsigned int objfreelist = 0, i, rand, count = cachep->num;
T
Thomas Garnier 已提交
2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
	union freelist_init_state state;
	bool precomputed;

	if (count < 2)
		return false;

	precomputed = freelist_state_initialize(&state, cachep, count);

	/* Take a random entry as the objfreelist */
	if (OBJFREELIST_SLAB(cachep)) {
		if (!precomputed)
			objfreelist = count - 1;
		else
			objfreelist = next_random_slot(&state);
		page->freelist = index_to_obj(cachep, page, objfreelist) +
						obj_offset(cachep);
		count--;
	}

	/*
	 * On early boot, generate the list dynamically.
	 * Later use a pre-computed list for speed.
	 */
	if (!precomputed) {
2456 2457 2458 2459 2460 2461 2462 2463 2464
		for (i = 0; i < count; i++)
			set_free_obj(page, i, i);

		/* Fisher-Yates shuffle */
		for (i = count - 1; i > 0; i--) {
			rand = prandom_u32_state(&state.rnd_state);
			rand %= (i + 1);
			swap_free_obj(page, i, rand);
		}
T
Thomas Garnier 已提交
2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
	} else {
		for (i = 0; i < count; i++)
			set_free_obj(page, i, next_random_slot(&state));
	}

	if (OBJFREELIST_SLAB(cachep))
		set_free_obj(page, cachep->num - 1, objfreelist);

	return true;
}
#else
static inline bool shuffle_freelist(struct kmem_cache *cachep,
				struct page *page)
{
	return false;
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

2483 2484 2485 2486
static void cache_init_objs(struct kmem_cache *cachep,
			    struct page *page)
{
	int i;
A
Alexander Potapenko 已提交
2487
	void *objp;
T
Thomas Garnier 已提交
2488
	bool shuffled;
2489 2490 2491

	cache_init_objs_debug(cachep, page);

T
Thomas Garnier 已提交
2492 2493 2494 2495
	/* Try to randomize the freelist if enabled */
	shuffled = shuffle_freelist(cachep, page);

	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2496 2497 2498 2499
		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
						obj_offset(cachep);
	}

2500
	for (i = 0; i < cachep->num; i++) {
2501
		objp = index_to_obj(cachep, page, i);
2502
		objp = kasan_init_slab_obj(cachep, objp);
2503

2504
		/* constructor could break poison info */
A
Alexander Potapenko 已提交
2505 2506 2507 2508 2509
		if (DEBUG == 0 && cachep->ctor) {
			kasan_unpoison_object_data(cachep, objp);
			cachep->ctor(objp);
			kasan_poison_object_data(cachep, objp);
		}
2510

T
Thomas Garnier 已提交
2511 2512
		if (!shuffled)
			set_free_obj(page, i, i);
L
Linus Torvalds 已提交
2513 2514 2515
	}
}

2516
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2517
{
2518
	void *objp;
2519

2520
	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2521
	page->active++;
2522 2523 2524 2525

	return objp;
}

2526 2527
static void slab_put_obj(struct kmem_cache *cachep,
			struct page *page, void *objp)
2528
{
2529
	unsigned int objnr = obj_to_index(cachep, page, objp);
2530
#if DEBUG
J
Joonsoo Kim 已提交
2531
	unsigned int i;
2532 2533

	/* Verify double free bug */
2534
	for (i = page->active; i < cachep->num; i++) {
2535
		if (get_free_obj(page, i) == objnr) {
2536
			pr_err("slab: double free detected in cache '%s', objp %px\n",
J
Joe Perches 已提交
2537
			       cachep->name, objp);
2538 2539
			BUG();
		}
2540 2541
	}
#endif
2542
	page->active--;
2543 2544 2545
	if (!page->freelist)
		page->freelist = objp + obj_offset(cachep);

2546
	set_free_obj(page, page->active, objnr);
2547 2548
}

2549 2550 2551
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2552
 * virtual address for kfree, ksize, and slab debugging.
2553
 */
2554
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2555
			   void *freelist)
L
Linus Torvalds 已提交
2556
{
2557
	page->slab_cache = cache;
2558
	page->freelist = freelist;
L
Linus Torvalds 已提交
2559 2560 2561 2562 2563 2564
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2565 2566
static struct page *cache_grow_begin(struct kmem_cache *cachep,
				gfp_t flags, int nodeid)
L
Linus Torvalds 已提交
2567
{
2568
	void *freelist;
P
Pekka Enberg 已提交
2569 2570
	size_t offset;
	gfp_t local_flags;
2571
	int page_node;
2572
	struct kmem_cache_node *n;
2573
	struct page *page;
L
Linus Torvalds 已提交
2574

A
Andrew Morton 已提交
2575 2576 2577
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2578
	 */
2579 2580 2581
	if (unlikely(flags & GFP_SLAB_BUG_MASK))
		flags = kmalloc_fix_flags(flags);

2582
	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
C
Christoph Lameter 已提交
2583
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2584 2585

	check_irq_off();
2586
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2587 2588
		local_irq_enable();

A
Andrew Morton 已提交
2589 2590 2591
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2592
	 */
2593
	page = kmem_getpages(cachep, local_flags, nodeid);
2594
	if (!page)
L
Linus Torvalds 已提交
2595 2596
		goto failed;

2597 2598
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610

	/* Get colour for the slab, and cal the next value. */
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;

	offset = n->colour_next;
	if (offset >= cachep->colour)
		offset = 0;

	offset *= cachep->colour_off;

2611 2612 2613 2614 2615 2616 2617
	/*
	 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
	 * page_address() in the latter returns a non-tagged pointer,
	 * as it should be for slab pages.
	 */
	kasan_poison_slab(page);

L
Linus Torvalds 已提交
2618
	/* Get slab management. */
2619
	freelist = alloc_slabmgmt(cachep, page, offset,
2620
			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2621
	if (OFF_SLAB(cachep) && !freelist)
L
Linus Torvalds 已提交
2622 2623
		goto opps1;

2624
	slab_map_pages(cachep, page, freelist);
L
Linus Torvalds 已提交
2625

2626
	cache_init_objs(cachep, page);
L
Linus Torvalds 已提交
2627

2628
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2629 2630
		local_irq_disable();

2631 2632
	return page;

A
Andrew Morton 已提交
2633
opps1:
2634
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2635
failed:
2636
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2637
		local_irq_disable();
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650
	return NULL;
}

static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
{
	struct kmem_cache_node *n;
	void *list = NULL;

	check_irq_off();

	if (!page)
		return;

2651
	INIT_LIST_HEAD(&page->slab_list);
2652 2653 2654
	n = get_node(cachep, page_to_nid(page));

	spin_lock(&n->list_lock);
2655
	n->total_slabs++;
2656
	if (!page->active) {
2657
		list_add_tail(&page->slab_list, &n->slabs_free);
2658
		n->free_slabs++;
2659
	} else
2660
		fixup_slab_list(cachep, n, page, &list);
2661

2662 2663 2664 2665 2666
	STATS_INC_GROWN(cachep);
	n->free_objects += cachep->num - page->active;
	spin_unlock(&n->list_lock);

	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
2679
		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
P
Pekka Enberg 已提交
2680 2681
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2682 2683 2684
	}
}

2685 2686
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2687
	unsigned long long redzone1, redzone2;
2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2703
	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2704
	       obj, redzone1, redzone2);
2705 2706
}

2707
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2708
				   unsigned long caller)
L
Linus Torvalds 已提交
2709 2710
{
	unsigned int objnr;
2711
	struct page *page;
L
Linus Torvalds 已提交
2712

2713 2714
	BUG_ON(virt_to_cache(objp) != cachep);

2715
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2716
	kfree_debugcheck(objp);
2717
	page = virt_to_head_page(objp);
L
Linus Torvalds 已提交
2718 2719

	if (cachep->flags & SLAB_RED_ZONE) {
2720
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2721 2722 2723
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
Q
Qian Cai 已提交
2724
	if (cachep->flags & SLAB_STORE_USER)
2725
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2726

2727
	objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
2728 2729

	BUG_ON(objnr >= cachep->num);
2730
	BUG_ON(objp != index_to_obj(cachep, page, objnr));
L
Linus Torvalds 已提交
2731 2732 2733

	if (cachep->flags & SLAB_POISON) {
		poison_obj(cachep, objp, POISON_FREE);
Q
Qian Cai 已提交
2734
		slab_kernel_map(cachep, objp, 0);
L
Linus Torvalds 已提交
2735 2736 2737 2738 2739 2740 2741 2742 2743
	}
	return objp;
}

#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#endif

2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list)
{
#if DEBUG
	void *next = *list;
	void *objp;

	while (next) {
		objp = next - obj_offset(cachep);
		next = *(void **)next;
		poison_obj(cachep, objp, POISON_FREE);
	}
#endif
}

2759
static inline void fixup_slab_list(struct kmem_cache *cachep,
2760 2761
				struct kmem_cache_node *n, struct page *page,
				void **list)
2762 2763
{
	/* move slabp to correct slabp list: */
2764
	list_del(&page->slab_list);
2765
	if (page->active == cachep->num) {
2766
		list_add(&page->slab_list, &n->slabs_full);
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
		if (OBJFREELIST_SLAB(cachep)) {
#if DEBUG
			/* Poisoning will be done without holding the lock */
			if (cachep->flags & SLAB_POISON) {
				void **objp = page->freelist;

				*objp = *list;
				*list = objp;
			}
#endif
			page->freelist = NULL;
		}
	} else
2780
		list_add(&page->slab_list, &n->slabs_partial);
2781 2782
}

2783 2784
/* Try to find non-pfmemalloc slab if needed */
static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2785
					struct page *page, bool pfmemalloc)
2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802
{
	if (!page)
		return NULL;

	if (pfmemalloc)
		return page;

	if (!PageSlabPfmemalloc(page))
		return page;

	/* No need to keep pfmemalloc slab if we have enough free objects */
	if (n->free_objects > n->free_limit) {
		ClearPageSlabPfmemalloc(page);
		return page;
	}

	/* Move pfmemalloc slab to the end of list to speed up next search */
2803
	list_del(&page->slab_list);
2804
	if (!page->active) {
2805
		list_add_tail(&page->slab_list, &n->slabs_free);
2806
		n->free_slabs++;
2807
	} else
2808
		list_add_tail(&page->slab_list, &n->slabs_partial);
2809

2810
	list_for_each_entry(page, &n->slabs_partial, slab_list) {
2811 2812 2813 2814
		if (!PageSlabPfmemalloc(page))
			return page;
	}

2815
	n->free_touched = 1;
2816
	list_for_each_entry(page, &n->slabs_free, slab_list) {
2817
		if (!PageSlabPfmemalloc(page)) {
2818
			n->free_slabs--;
2819
			return page;
2820
		}
2821 2822 2823 2824 2825 2826
	}

	return NULL;
}

static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2827 2828 2829
{
	struct page *page;

2830
	assert_spin_locked(&n->list_lock);
2831 2832
	page = list_first_entry_or_null(&n->slabs_partial, struct page,
					slab_list);
2833 2834
	if (!page) {
		n->free_touched = 1;
2835
		page = list_first_entry_or_null(&n->slabs_free, struct page,
2836
						slab_list);
2837
		if (page)
2838
			n->free_slabs--;
2839 2840
	}

2841
	if (sk_memalloc_socks())
2842
		page = get_valid_first_slab(n, page, pfmemalloc);
2843

2844 2845 2846
	return page;
}

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874
static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
				struct kmem_cache_node *n, gfp_t flags)
{
	struct page *page;
	void *obj;
	void *list = NULL;

	if (!gfp_pfmemalloc_allowed(flags))
		return NULL;

	spin_lock(&n->list_lock);
	page = get_first_slab(n, true);
	if (!page) {
		spin_unlock(&n->list_lock);
		return NULL;
	}

	obj = slab_get_obj(cachep, page);
	n->free_objects--;

	fixup_slab_list(cachep, n, page, &list);

	spin_unlock(&n->list_lock);
	fixup_objfreelist_debug(cachep, &list);

	return obj;
}

2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898
/*
 * Slab list should be fixed up by fixup_slab_list() for existing slab
 * or cache_grow_end() for new slab
 */
static __always_inline int alloc_block(struct kmem_cache *cachep,
		struct array_cache *ac, struct page *page, int batchcount)
{
	/*
	 * There must be at least one object available for
	 * allocation.
	 */
	BUG_ON(page->active >= cachep->num);

	while (page->active < cachep->num && batchcount--) {
		STATS_INC_ALLOCED(cachep);
		STATS_INC_ACTIVE(cachep);
		STATS_SET_HIGH(cachep);

		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
	}

	return batchcount;
}

2899
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2900 2901
{
	int batchcount;
2902
	struct kmem_cache_node *n;
2903
	struct array_cache *ac, *shared;
P
Pekka Enberg 已提交
2904
	int node;
2905
	void *list = NULL;
2906
	struct page *page;
P
Pekka Enberg 已提交
2907

L
Linus Torvalds 已提交
2908
	check_irq_off();
2909
	node = numa_mem_id();
2910

2911
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2912 2913
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2914 2915 2916 2917
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2918 2919 2920
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2921
	n = get_node(cachep, node);
2922

2923
	BUG_ON(ac->avail > 0 || !n);
2924 2925 2926 2927
	shared = READ_ONCE(n->shared);
	if (!n->free_objects && (!shared || !shared->avail))
		goto direct_grow;

2928
	spin_lock(&n->list_lock);
2929
	shared = READ_ONCE(n->shared);
L
Linus Torvalds 已提交
2930

2931
	/* See if we can refill from the shared array */
2932 2933
	if (shared && transfer_objects(ac, shared, batchcount)) {
		shared->touched = 1;
2934
		goto alloc_done;
2935
	}
2936

L
Linus Torvalds 已提交
2937 2938
	while (batchcount > 0) {
		/* Get slab alloc is to come from. */
2939
		page = get_first_slab(n, false);
2940 2941
		if (!page)
			goto must_grow;
L
Linus Torvalds 已提交
2942 2943

		check_spinlock_acquired(cachep);
2944

2945
		batchcount = alloc_block(cachep, ac, page, batchcount);
2946
		fixup_slab_list(cachep, n, page, &list);
L
Linus Torvalds 已提交
2947 2948
	}

A
Andrew Morton 已提交
2949
must_grow:
2950
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
2951
alloc_done:
2952
	spin_unlock(&n->list_lock);
2953
	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
2954

2955
direct_grow:
L
Linus Torvalds 已提交
2956
	if (unlikely(!ac->avail)) {
2957 2958 2959 2960 2961 2962 2963 2964
		/* Check if we can use obj in pfmemalloc slab */
		if (sk_memalloc_socks()) {
			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);

			if (obj)
				return obj;
		}

2965
		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2966

2967 2968 2969 2970
		/*
		 * cache_grow_begin() can reenable interrupts,
		 * then ac could change.
		 */
2971
		ac = cpu_cache_get(cachep);
2972 2973 2974
		if (!ac->avail && page)
			alloc_block(cachep, ac, page, batchcount);
		cache_grow_end(cachep, page);
2975

2976
		if (!ac->avail)
L
Linus Torvalds 已提交
2977 2978 2979
			return NULL;
	}
	ac->touched = 1;
2980

2981
	return ac->entry[--ac->avail];
L
Linus Torvalds 已提交
2982 2983
}

A
Andrew Morton 已提交
2984 2985
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
2986
{
2987
	might_sleep_if(gfpflags_allow_blocking(flags));
L
Linus Torvalds 已提交
2988 2989 2990
}

#if DEBUG
A
Andrew Morton 已提交
2991
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2992
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
2993
{
2994
	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
P
Pekka Enberg 已提交
2995
	if (!objp)
L
Linus Torvalds 已提交
2996
		return objp;
P
Pekka Enberg 已提交
2997
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
2998
		check_poison_obj(cachep, objp);
Q
Qian Cai 已提交
2999
		slab_kernel_map(cachep, objp, 1);
L
Linus Torvalds 已提交
3000 3001 3002
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
3003
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
3004 3005

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
3006 3007
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
J
Joe Perches 已提交
3008
			slab_error(cachep, "double free, or memory outside object was overwritten");
3009
			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3010 3011
			       objp, *dbg_redzone1(cachep, objp),
			       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
3012 3013 3014 3015
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
3016

3017
	objp += obj_offset(cachep);
3018
	if (cachep->ctor && cachep->flags & SLAB_POISON)
3019
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
3020 3021
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3022
		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
3023
		       objp, (int)ARCH_SLAB_MINALIGN);
3024
	}
L
Linus Torvalds 已提交
3025 3026 3027 3028 3029 3030
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

3031
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3032
{
P
Pekka Enberg 已提交
3033
	void *objp;
L
Linus Torvalds 已提交
3034 3035
	struct array_cache *ac;

3036
	check_irq_off();
3037

3038
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3039 3040
	if (likely(ac->avail)) {
		ac->touched = 1;
3041
		objp = ac->entry[--ac->avail];
3042

3043 3044
		STATS_INC_ALLOCHIT(cachep);
		goto out;
L
Linus Torvalds 已提交
3045
	}
3046 3047

	STATS_INC_ALLOCMISS(cachep);
3048
	objp = cache_alloc_refill(cachep, flags);
3049 3050 3051 3052 3053 3054 3055
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
3056 3057 3058 3059 3060
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
3061 3062
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
3063 3064 3065
	return objp;
}

3066
#ifdef CONFIG_NUMA
3067
/*
3068
 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3069 3070 3071 3072 3073 3074 3075 3076
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

3077
	if (in_interrupt() || (flags & __GFP_THISNODE))
3078
		return NULL;
3079
	nid_alloc = nid_here = numa_mem_id();
3080
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3081
		nid_alloc = cpuset_slab_spread_node();
3082
	else if (current->mempolicy)
3083
		nid_alloc = mempolicy_slab_node();
3084
	if (nid_alloc != nid_here)
3085
		return ____cache_alloc_node(cachep, flags, nid_alloc);
3086 3087 3088
	return NULL;
}

3089 3090
/*
 * Fallback function if there was no memory available and no objects on a
3091
 * certain node and fall back is permitted. First we scan all the
3092
 * available node for available objects. If that fails then we
3093 3094 3095
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3096
 */
3097
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3098
{
3099
	struct zonelist *zonelist;
3100
	struct zoneref *z;
3101
	struct zone *zone;
3102
	enum zone_type highest_zoneidx = gfp_zone(flags);
3103
	void *obj = NULL;
3104
	struct page *page;
3105
	int nid;
3106
	unsigned int cpuset_mems_cookie;
3107 3108 3109 3110

	if (flags & __GFP_THISNODE)
		return NULL;

3111
retry_cpuset:
3112
	cpuset_mems_cookie = read_mems_allowed_begin();
3113
	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3114

3115 3116 3117 3118 3119
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3120
	for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3121
		nid = zone_to_nid(zone);
3122

3123
		if (cpuset_zone_allowed(zone, flags) &&
3124 3125
			get_node(cache, nid) &&
			get_node(cache, nid)->free_objects) {
3126
				obj = ____cache_alloc_node(cache,
D
David Rientjes 已提交
3127
					gfp_exact_node(flags), nid);
3128 3129 3130
				if (obj)
					break;
		}
3131 3132
	}

3133
	if (!obj) {
3134 3135 3136 3137 3138 3139
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3140 3141 3142 3143
		page = cache_grow_begin(cache, flags, numa_mem_id());
		cache_grow_end(cache, page);
		if (page) {
			nid = page_to_nid(page);
3144 3145
			obj = ____cache_alloc_node(cache,
				gfp_exact_node(flags), nid);
3146

3147
			/*
3148 3149
			 * Another processor may allocate the objects in
			 * the slab since we are not holding any locks.
3150
			 */
3151 3152
			if (!obj)
				goto retry;
3153
		}
3154
	}
3155

3156
	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3157
		goto retry_cpuset;
3158 3159 3160
	return obj;
}

3161 3162
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3163
 */
3164
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3165
				int nodeid)
3166
{
3167
	struct page *page;
3168
	struct kmem_cache_node *n;
3169
	void *obj = NULL;
3170
	void *list = NULL;
P
Pekka Enberg 已提交
3171

3172
	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3173
	n = get_node(cachep, nodeid);
3174
	BUG_ON(!n);
P
Pekka Enberg 已提交
3175

3176
	check_irq_off();
3177
	spin_lock(&n->list_lock);
3178
	page = get_first_slab(n, false);
3179 3180
	if (!page)
		goto must_grow;
P
Pekka Enberg 已提交
3181 3182 3183 3184 3185 3186 3187

	check_spinlock_acquired_node(cachep, nodeid);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

3188
	BUG_ON(page->active == cachep->num);
P
Pekka Enberg 已提交
3189

3190
	obj = slab_get_obj(cachep, page);
3191
	n->free_objects--;
P
Pekka Enberg 已提交
3192

3193
	fixup_slab_list(cachep, n, page, &list);
3194

3195
	spin_unlock(&n->list_lock);
3196
	fixup_objfreelist_debug(cachep, &list);
3197
	return obj;
3198

A
Andrew Morton 已提交
3199
must_grow:
3200
	spin_unlock(&n->list_lock);
3201
	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3202 3203 3204 3205
	if (page) {
		/* This slab isn't counted yet so don't update free_objects */
		obj = slab_get_obj(cachep, page);
	}
3206
	cache_grow_end(cachep, page);
L
Linus Torvalds 已提交
3207

3208
	return obj ? obj : fallback_alloc(cachep, flags);
3209
}
3210 3211

static __always_inline void *
3212
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3213
		   unsigned long caller)
3214 3215 3216
{
	unsigned long save_flags;
	void *ptr;
3217
	int slab_node = numa_mem_id();
3218
	struct obj_cgroup *objcg = NULL;
3219

3220
	flags &= gfp_allowed_mask;
3221
	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3222
	if (unlikely(!cachep))
3223 3224
		return NULL;

3225 3226 3227
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3228
	if (nodeid == NUMA_NO_NODE)
3229
		nodeid = slab_node;
3230

3231
	if (unlikely(!get_node(cachep, nodeid))) {
3232 3233 3234 3235 3236
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3237
	if (nodeid == slab_node) {
3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);

3254
	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3255
		memset(ptr, 0, cachep->object_size);
3256

3257
	slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
3258 3259 3260 3261 3262 3263 3264 3265
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

3266
	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3267 3268 3269 3270 3271 3272 3273 3274 3275 3276
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3277 3278
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3294
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3295 3296 3297
{
	unsigned long save_flags;
	void *objp;
3298
	struct obj_cgroup *objcg = NULL;
3299

3300
	flags &= gfp_allowed_mask;
3301
	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3302
	if (unlikely(!cachep))
3303 3304
		return NULL;

3305 3306 3307 3308 3309 3310 3311
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
	prefetchw(objp);

3312
	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3313
		memset(objp, 0, cachep->object_size);
3314

3315
	slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
3316 3317
	return objp;
}
3318 3319

/*
3320
 * Caller needs to acquire correct kmem_cache_node's list_lock
3321
 * @list: List of detached free slabs should be freed by caller
3322
 */
3323 3324
static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
L
Linus Torvalds 已提交
3325 3326
{
	int i;
3327
	struct kmem_cache_node *n = get_node(cachep, node);
3328 3329 3330
	struct page *page;

	n->free_objects += nr_objects;
L
Linus Torvalds 已提交
3331 3332

	for (i = 0; i < nr_objects; i++) {
3333
		void *objp;
3334
		struct page *page;
L
Linus Torvalds 已提交
3335

3336 3337
		objp = objpp[i];

3338
		page = virt_to_head_page(objp);
3339
		list_del(&page->slab_list);
3340
		check_spinlock_acquired_node(cachep, node);
3341
		slab_put_obj(cachep, page, objp);
L
Linus Torvalds 已提交
3342 3343 3344
		STATS_DEC_ACTIVE(cachep);

		/* fixup slab chains */
3345
		if (page->active == 0) {
3346
			list_add(&page->slab_list, &n->slabs_free);
3347 3348
			n->free_slabs++;
		} else {
L
Linus Torvalds 已提交
3349 3350 3351 3352
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3353
			list_add_tail(&page->slab_list, &n->slabs_partial);
L
Linus Torvalds 已提交
3354 3355
		}
	}
3356 3357 3358 3359

	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
		n->free_objects -= cachep->num;

3360 3361
		page = list_last_entry(&n->slabs_free, struct page, slab_list);
		list_move(&page->slab_list, list);
3362
		n->free_slabs--;
3363
		n->total_slabs--;
3364
	}
L
Linus Torvalds 已提交
3365 3366
}

3367
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3368 3369
{
	int batchcount;
3370
	struct kmem_cache_node *n;
3371
	int node = numa_mem_id();
3372
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3373 3374

	batchcount = ac->batchcount;
3375

L
Linus Torvalds 已提交
3376
	check_irq_off();
3377
	n = get_node(cachep, node);
3378 3379 3380
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3381
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3382 3383 3384
		if (max) {
			if (batchcount > max)
				batchcount = max;
3385
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3386
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3387 3388 3389 3390 3391
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3392
	free_block(cachep, ac->entry, batchcount, node, &list);
A
Andrew Morton 已提交
3393
free_done:
L
Linus Torvalds 已提交
3394 3395 3396
#if STATS
	{
		int i = 0;
3397
		struct page *page;
L
Linus Torvalds 已提交
3398

3399
		list_for_each_entry(page, &n->slabs_free, slab_list) {
3400
			BUG_ON(page->active);
L
Linus Torvalds 已提交
3401 3402 3403 3404 3405 3406

			i++;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3407
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
3408
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3409
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3410
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3411 3412 3413
}

/*
A
Andrew Morton 已提交
3414 3415
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3416
 */
3417 3418
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
					 unsigned long caller)
L
Linus Torvalds 已提交
3419
{
3420 3421 3422
	if (unlikely(slab_want_init_on_free(cachep)))
		memset(objp, 0, cachep->object_size);

3423
	/* Put the object into the quarantine, don't touch it for now. */
3424
	if (kasan_slab_free(cachep, objp, _RET_IP_))
3425 3426
		return;

3427 3428 3429 3430 3431
	/* Use KCSAN to help debug racy use-after-free. */
	if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
		__kcsan_check_access(objp, cachep->object_size,
				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);

3432 3433
	___cache_free(cachep, objp, caller);
}
L
Linus Torvalds 已提交
3434

3435 3436 3437 3438
void ___cache_free(struct kmem_cache *cachep, void *objp,
		unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);
A
Alexander Potapenko 已提交
3439

L
Linus Torvalds 已提交
3440
	check_irq_off();
3441
	kmemleak_free_recursive(objp, cachep->flags);
3442
	objp = cache_free_debugcheck(cachep, objp, caller);
3443
	memcg_slab_free_hook(cachep, &objp, 1);
L
Linus Torvalds 已提交
3444

3445 3446 3447 3448 3449 3450 3451
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3452
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3453 3454
		return;

3455
	if (ac->avail < ac->limit) {
L
Linus Torvalds 已提交
3456 3457 3458 3459 3460
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3461

3462 3463 3464 3465 3466 3467 3468 3469 3470
	if (sk_memalloc_socks()) {
		struct page *page = virt_to_head_page(objp);

		if (unlikely(PageSlabPfmemalloc(page))) {
			cache_free_pfmemalloc(cachep, page, objp);
			return;
		}
	}

3471
	__free_one(ac, objp);
L
Linus Torvalds 已提交
3472 3473 3474 3475 3476 3477 3478 3479 3480
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
3481 3482
 *
 * Return: pointer to the new object or %NULL in case of error
L
Linus Torvalds 已提交
3483
 */
3484
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3485
{
3486
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3487

3488
	trace_kmem_cache_alloc(_RET_IP_, ret,
3489
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3490 3491

	return ret;
L
Linus Torvalds 已提交
3492 3493 3494
}
EXPORT_SYMBOL(kmem_cache_alloc);

3495 3496 3497 3498 3499 3500 3501 3502 3503 3504
static __always_inline void
cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
				  size_t size, void **p, unsigned long caller)
{
	size_t i;

	for (i = 0; i < size; i++)
		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
}

3505
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3506
			  void **p)
3507
{
3508
	size_t i;
3509
	struct obj_cgroup *objcg = NULL;
3510

3511
	s = slab_pre_alloc_hook(s, &objcg, size, flags);
3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526
	if (!s)
		return 0;

	cache_alloc_debugcheck_before(s, flags);

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = __do_cache_alloc(s, flags);

		if (unlikely(!objp))
			goto error;
		p[i] = objp;
	}
	local_irq_enable();

3527 3528
	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);

3529
	/* Clear memory outside IRQ disabled section */
3530
	if (unlikely(slab_want_init_on_alloc(flags, s)))
3531 3532 3533
		for (i = 0; i < size; i++)
			memset(p[i], 0, s->object_size);

3534
	slab_post_alloc_hook(s, objcg, flags, size, p);
3535 3536 3537 3538
	/* FIXME: Trace call missing. Christoph would like a bulk variant */
	return size;
error:
	local_irq_enable();
3539
	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3540
	slab_post_alloc_hook(s, objcg, flags, i, p);
3541 3542
	__kmem_cache_free_bulk(s, i, p);
	return 0;
3543 3544 3545
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

3546
#ifdef CONFIG_TRACING
3547
void *
3548
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3549
{
3550 3551
	void *ret;

3552
	ret = slab_alloc(cachep, flags, _RET_IP_);
3553

3554
	ret = kasan_kmalloc(cachep, ret, size, flags);
3555
	trace_kmalloc(_RET_IP_, ret,
3556
		      size, cachep->size, flags);
3557
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3558
}
3559
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3560 3561
#endif

L
Linus Torvalds 已提交
3562
#ifdef CONFIG_NUMA
3563 3564 3565 3566 3567 3568 3569 3570 3571 3572
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
3573 3574
 *
 * Return: pointer to the new object or %NULL in case of error
3575
 */
3576 3577
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3578
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3579

3580
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3581
				    cachep->object_size, cachep->size,
3582
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3583 3584

	return ret;
3585
}
L
Linus Torvalds 已提交
3586 3587
EXPORT_SYMBOL(kmem_cache_alloc_node);

3588
#ifdef CONFIG_TRACING
3589
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3590
				  gfp_t flags,
3591 3592
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3593
{
3594 3595
	void *ret;

3596
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3597

3598
	ret = kasan_kmalloc(cachep, ret, size, flags);
3599
	trace_kmalloc_node(_RET_IP_, ret,
3600
			   size, cachep->size,
3601 3602
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3603
}
3604
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3605 3606
#endif

3607
static __always_inline void *
3608
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3609
{
3610
	struct kmem_cache *cachep;
A
Alexander Potapenko 已提交
3611
	void *ret;
3612

3613 3614
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
		return NULL;
3615
	cachep = kmalloc_slab(size, flags);
3616 3617
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
A
Alexander Potapenko 已提交
3618
	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3619
	ret = kasan_kmalloc(cachep, ret, size, flags);
A
Alexander Potapenko 已提交
3620 3621

	return ret;
3622
}
3623 3624 3625

void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3626
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3627
}
3628
EXPORT_SYMBOL(__kmalloc_node);
3629 3630

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3631
		int node, unsigned long caller)
3632
{
3633
	return __do_kmalloc_node(size, flags, node, caller);
3634 3635 3636
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3637 3638

/**
3639
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3640
 * @size: how many bytes of memory are required.
3641
 * @flags: the type of memory to allocate (see kmalloc).
3642
 * @caller: function caller for debug tracking of the caller
3643 3644
 *
 * Return: pointer to the allocated memory or %NULL in case of error
L
Linus Torvalds 已提交
3645
 */
3646
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3647
					  unsigned long caller)
L
Linus Torvalds 已提交
3648
{
3649
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3650
	void *ret;
L
Linus Torvalds 已提交
3651

3652 3653
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
		return NULL;
3654
	cachep = kmalloc_slab(size, flags);
3655 3656
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3657
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3658

3659
	ret = kasan_kmalloc(cachep, ret, size, flags);
3660
	trace_kmalloc(caller, ret,
3661
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3662 3663

	return ret;
3664 3665 3666 3667
}

void *__kmalloc(size_t size, gfp_t flags)
{
3668
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3669 3670 3671
}
EXPORT_SYMBOL(__kmalloc);

3672
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3673
{
3674
	return __do_kmalloc(size, flags, caller);
3675 3676
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3677

L
Linus Torvalds 已提交
3678 3679 3680 3681 3682 3683 3684 3685
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3686
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3687 3688
{
	unsigned long flags;
3689 3690 3691
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3692 3693

	local_irq_save(flags);
3694
	debug_check_no_locks_freed(objp, cachep->object_size);
3695
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3696
		debug_check_no_obj_freed(objp, cachep->object_size);
3697
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3698
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3699

3700
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3701 3702 3703
}
EXPORT_SYMBOL(kmem_cache_free);

3704 3705 3706 3707 3708 3709 3710 3711 3712
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
{
	struct kmem_cache *s;
	size_t i;

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = p[i];

3713 3714 3715 3716
		if (!orig_s) /* called via kfree_bulk */
			s = virt_to_cache(objp);
		else
			s = cache_from_obj(orig_s, objp);
3717 3718
		if (!s)
			continue;
3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731

		debug_check_no_locks_freed(objp, s->object_size);
		if (!(s->flags & SLAB_DEBUG_OBJECTS))
			debug_check_no_obj_freed(objp, s->object_size);

		__cache_free(s, objp, _RET_IP_);
	}
	local_irq_enable();

	/* FIXME: add tracing */
}
EXPORT_SYMBOL(kmem_cache_free_bulk);

L
Linus Torvalds 已提交
3732 3733 3734 3735
/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3736 3737
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3738 3739 3740 3741 3742
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3743
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3744 3745
	unsigned long flags;

3746 3747
	trace_kfree(_RET_IP_, objp);

3748
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3749 3750 3751
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3752
	c = virt_to_cache(objp);
3753 3754 3755 3756
	if (!c) {
		local_irq_restore(flags);
		return;
	}
3757 3758 3759
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3760
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3761 3762 3763 3764
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3765
/*
3766
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3767
 */
3768
static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3769
{
3770
	int ret;
3771
	int node;
3772
	struct kmem_cache_node *n;
3773

3774
	for_each_online_node(node) {
3775 3776
		ret = setup_kmem_cache_node(cachep, node, gfp, true);
		if (ret)
3777 3778 3779
			goto fail;

	}
3780

3781
	return 0;
3782

A
Andrew Morton 已提交
3783
fail:
3784
	if (!cachep->list.next) {
3785 3786 3787
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3788 3789
			n = get_node(cachep, node);
			if (n) {
3790 3791 3792
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3793
				cachep->node[node] = NULL;
3794 3795 3796 3797
			}
			node--;
		}
	}
3798
	return -ENOMEM;
3799 3800
}

3801
/* Always called with the slab_mutex held */
3802 3803
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
			    int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3804
{
3805 3806
	struct array_cache __percpu *cpu_cache, *prev;
	int cpu;
L
Linus Torvalds 已提交
3807

3808 3809
	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
	if (!cpu_cache)
3810 3811
		return -ENOMEM;

3812 3813
	prev = cachep->cpu_cache;
	cachep->cpu_cache = cpu_cache;
3814 3815 3816 3817 3818 3819
	/*
	 * Without a previous cpu_cache there's no need to synchronize remote
	 * cpus, so skip the IPIs.
	 */
	if (prev)
		kick_all_cpus_sync();
3820

L
Linus Torvalds 已提交
3821 3822 3823
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3824
	cachep->shared = shared;
L
Linus Torvalds 已提交
3825

3826
	if (!prev)
3827
		goto setup_node;
3828 3829

	for_each_online_cpu(cpu) {
3830
		LIST_HEAD(list);
3831 3832
		int node;
		struct kmem_cache_node *n;
3833
		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3834

3835
		node = cpu_to_mem(cpu);
3836 3837
		n = get_node(cachep, node);
		spin_lock_irq(&n->list_lock);
3838
		free_block(cachep, ac->entry, ac->avail, node, &list);
3839
		spin_unlock_irq(&n->list_lock);
3840
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3841
	}
3842 3843
	free_percpu(prev);

3844 3845
setup_node:
	return setup_kmem_cache_nodes(cachep, gfp);
L
Linus Torvalds 已提交
3846 3847
}

3848
/* Called with slab_mutex held always */
3849
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3850 3851
{
	int err;
G
Glauber Costa 已提交
3852 3853 3854 3855
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

3856
	err = cache_random_seq_create(cachep, cachep->num, gfp);
T
Thomas Garnier 已提交
3857 3858 3859
	if (err)
		goto end;

G
Glauber Costa 已提交
3860 3861
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3862 3863
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3864 3865
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3866
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3867 3868 3869 3870
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3871
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3872
		limit = 1;
3873
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3874
		limit = 8;
3875
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3876
		limit = 24;
3877
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3878 3879 3880 3881
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3882 3883
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3884 3885 3886 3887 3888 3889 3890 3891
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
3892
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
3893 3894 3895
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
3896 3897 3898
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
3899 3900 3901 3902
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
3903 3904 3905
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
T
Thomas Garnier 已提交
3906
end:
L
Linus Torvalds 已提交
3907
	if (err)
3908
		pr_err("enable_cpucache failed for %s, error %d\n",
P
Pekka Enberg 已提交
3909
		       cachep->name, -err);
3910
	return err;
L
Linus Torvalds 已提交
3911 3912
}

3913
/*
3914 3915
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
3916
 * if drain_array() is used on the shared array.
3917
 */
3918
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3919
			 struct array_cache *ac, int node)
L
Linus Torvalds 已提交
3920
{
3921
	LIST_HEAD(list);
3922 3923 3924

	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
	check_mutex_acquired();
L
Linus Torvalds 已提交
3925

3926 3927
	if (!ac || !ac->avail)
		return;
3928 3929

	if (ac->touched) {
L
Linus Torvalds 已提交
3930
		ac->touched = 0;
3931
		return;
L
Linus Torvalds 已提交
3932
	}
3933 3934 3935 3936 3937 3938

	spin_lock_irq(&n->list_lock);
	drain_array_locked(cachep, ac, node, false, &list);
	spin_unlock_irq(&n->list_lock);

	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3939 3940 3941 3942
}

/**
 * cache_reap - Reclaim memory from caches.
3943
 * @w: work descriptor
L
Linus Torvalds 已提交
3944 3945 3946 3947 3948 3949
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
3950 3951
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
3952
 */
3953
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
3954
{
3955
	struct kmem_cache *searchp;
3956
	struct kmem_cache_node *n;
3957
	int node = numa_mem_id();
3958
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
3959

3960
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
3961
		/* Give up. Setup the next iteration. */
3962
		goto out;
L
Linus Torvalds 已提交
3963

3964
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
3965 3966
		check_irq_on();

3967
		/*
3968
		 * We only take the node lock if absolutely necessary and we
3969 3970 3971
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
3972
		n = get_node(searchp, node);
3973

3974
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
3975

3976
		drain_array(searchp, n, cpu_cache_get(searchp), node);
L
Linus Torvalds 已提交
3977

3978 3979 3980 3981
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
3982
		if (time_after(n->next_reap, jiffies))
3983
			goto next;
L
Linus Torvalds 已提交
3984

3985
		n->next_reap = jiffies + REAPTIMEOUT_NODE;
L
Linus Torvalds 已提交
3986

3987
		drain_array(searchp, n, n->shared, node);
L
Linus Torvalds 已提交
3988

3989 3990
		if (n->free_touched)
			n->free_touched = 0;
3991 3992
		else {
			int freed;
L
Linus Torvalds 已提交
3993

3994
			freed = drain_freelist(searchp, n, (n->free_limit +
3995 3996 3997
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
3998
next:
L
Linus Torvalds 已提交
3999 4000 4001
		cond_resched();
	}
	check_irq_on();
4002
	mutex_unlock(&slab_mutex);
4003
	next_reap_node();
4004
out:
A
Andrew Morton 已提交
4005
	/* Set up the next iteration */
4006 4007
	schedule_delayed_work_on(smp_processor_id(), work,
				round_jiffies_relative(REAPTIMEOUT_AC));
L
Linus Torvalds 已提交
4008 4009
}

4010
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
4011
{
4012
	unsigned long active_objs, num_objs, active_slabs;
4013 4014
	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
	unsigned long free_slabs = 0;
4015
	int node;
4016
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
4017

4018
	for_each_kmem_cache_node(cachep, node, n) {
4019
		check_irq_on();
4020
		spin_lock_irq(&n->list_lock);
4021

4022 4023
		total_slabs += n->total_slabs;
		free_slabs += n->free_slabs;
4024
		free_objs += n->free_objects;
4025

4026 4027
		if (n->shared)
			shared_avail += n->shared->avail;
4028

4029
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4030
	}
4031 4032
	num_objs = total_slabs * cachep->num;
	active_slabs = total_slabs - free_slabs;
4033
	active_objs = num_objs - free_objs;
L
Linus Torvalds 已提交
4034

4035 4036 4037
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
4038
	sinfo->num_slabs = total_slabs;
4039 4040 4041 4042 4043 4044 4045 4046 4047 4048
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
4049
#if STATS
4050
	{			/* node stats */
L
Linus Torvalds 已提交
4051 4052 4053 4054 4055 4056 4057
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
4058
		unsigned long node_frees = cachep->node_frees;
4059
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
4060

J
Joe Perches 已提交
4061
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
J
Joe Perches 已提交
4062 4063 4064
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
4065 4066 4067 4068 4069 4070 4071 4072 4073
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4074
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
4086 4087
 *
 * Return: %0 on success, negative error code otherwise.
L
Linus Torvalds 已提交
4088
 */
4089
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4090
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4091
{
P
Pekka Enberg 已提交
4092
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4093
	int limit, batchcount, shared, res;
4094
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4095

L
Linus Torvalds 已提交
4096 4097 4098 4099
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4100
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4101 4102 4103 4104 4105 4106 4107 4108 4109 4110

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4111
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4112
	res = -EINVAL;
4113
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4114
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4115 4116
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4117
				res = 0;
L
Linus Torvalds 已提交
4118
			} else {
4119
				res = do_tune_cpucache(cachep, limit,
4120 4121
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4122 4123 4124 4125
			}
			break;
		}
	}
4126
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4127 4128 4129 4130
	if (res >= 0)
		res = count;
	return res;
}
4131

K
Kees Cook 已提交
4132 4133
#ifdef CONFIG_HARDENED_USERCOPY
/*
4134 4135 4136
 * Rejects incorrectly sized objects and objects that are to be copied
 * to/from userspace but do not fall entirely within the containing slab
 * cache's usercopy region.
K
Kees Cook 已提交
4137 4138 4139 4140
 *
 * Returns NULL if check passes, otherwise const char * to name of cache
 * to indicate an error.
 */
4141 4142
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
			 bool to_user)
K
Kees Cook 已提交
4143 4144 4145 4146 4147
{
	struct kmem_cache *cachep;
	unsigned int objnr;
	unsigned long offset;

4148 4149
	ptr = kasan_reset_tag(ptr);

K
Kees Cook 已提交
4150 4151 4152 4153 4154 4155 4156 4157
	/* Find and validate object. */
	cachep = page->slab_cache;
	objnr = obj_to_index(cachep, page, (void *)ptr);
	BUG_ON(objnr >= cachep->num);

	/* Find offset within object. */
	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);

4158 4159 4160 4161
	/* Allow address range falling entirely within usercopy region. */
	if (offset >= cachep->useroffset &&
	    offset - cachep->useroffset <= cachep->usersize &&
	    n <= cachep->useroffset - offset + cachep->usersize)
4162
		return;
K
Kees Cook 已提交
4163

4164 4165 4166 4167 4168 4169
	/*
	 * If the copy is still within the allocated object, produce
	 * a warning instead of rejecting the copy. This is intended
	 * to be a temporary method to find any missing usercopy
	 * whitelists.
	 */
4170 4171
	if (usercopy_fallback &&
	    offset <= cachep->object_size &&
4172 4173 4174 4175
	    n <= cachep->object_size - offset) {
		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
		return;
	}
K
Kees Cook 已提交
4176

4177
	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
K
Kees Cook 已提交
4178 4179 4180
}
#endif /* CONFIG_HARDENED_USERCOPY */

4181
/**
4182
 * __ksize -- Uninstrumented ksize.
4183
 * @objp: pointer to the object
4184
 *
4185 4186
 * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
 * safety checks as ksize() with KASAN instrumentation enabled.
4187 4188
 *
 * Return: size of the actual memory used by @objp in bytes
4189
 */
4190
size_t __ksize(const void *objp)
L
Linus Torvalds 已提交
4191
{
4192
	struct kmem_cache *c;
A
Alexander Potapenko 已提交
4193 4194
	size_t size;

4195 4196
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4197
		return 0;
L
Linus Torvalds 已提交
4198

4199 4200
	c = virt_to_cache(objp);
	size = c ? c->object_size : 0;
A
Alexander Potapenko 已提交
4201 4202

	return size;
L
Linus Torvalds 已提交
4203
}
4204
EXPORT_SYMBOL(__ksize);